1//===-- CodeGen.cpp -- bridge to lower to LLVM ----------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Coding style: https://mlir.llvm.org/getting_started/DeveloperGuide/
10//
11//===----------------------------------------------------------------------===//
12
13#include "flang/Optimizer/CodeGen/CodeGen.h"
14
15#include "flang/Optimizer/CodeGen/CodeGenOpenMP.h"
16#include "flang/Optimizer/CodeGen/FIROpPatterns.h"
17#include "flang/Optimizer/CodeGen/LLVMInsertChainFolder.h"
18#include "flang/Optimizer/CodeGen/TypeConverter.h"
19#include "flang/Optimizer/Dialect/FIRAttr.h"
20#include "flang/Optimizer/Dialect/FIRCG/CGOps.h"
21#include "flang/Optimizer/Dialect/FIRDialect.h"
22#include "flang/Optimizer/Dialect/FIROps.h"
23#include "flang/Optimizer/Dialect/FIRType.h"
24#include "flang/Optimizer/Support/DataLayout.h"
25#include "flang/Optimizer/Support/InternalNames.h"
26#include "flang/Optimizer/Support/TypeCode.h"
27#include "flang/Optimizer/Support/Utils.h"
28#include "flang/Runtime/CUDA/descriptor.h"
29#include "flang/Runtime/CUDA/memory.h"
30#include "flang/Runtime/allocator-registry-consts.h"
31#include "flang/Runtime/descriptor-consts.h"
32#include "flang/Semantics/runtime-type-info.h"
33#include "mlir/Conversion/ArithCommon/AttrToLLVMConverter.h"
34#include "mlir/Conversion/ArithToLLVM/ArithToLLVM.h"
35#include "mlir/Conversion/ComplexToLLVM/ComplexToLLVM.h"
36#include "mlir/Conversion/ComplexToStandard/ComplexToStandard.h"
37#include "mlir/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.h"
38#include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVM.h"
39#include "mlir/Conversion/LLVMCommon/Pattern.h"
40#include "mlir/Conversion/MathToFuncs/MathToFuncs.h"
41#include "mlir/Conversion/MathToLLVM/MathToLLVM.h"
42#include "mlir/Conversion/MathToLibm/MathToLibm.h"
43#include "mlir/Conversion/MathToROCDL/MathToROCDL.h"
44#include "mlir/Conversion/OpenMPToLLVM/ConvertOpenMPToLLVM.h"
45#include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h"
46#include "mlir/Dialect/Arith/IR/Arith.h"
47#include "mlir/Dialect/DLTI/DLTI.h"
48#include "mlir/Dialect/GPU/IR/GPUDialect.h"
49#include "mlir/Dialect/LLVMIR/LLVMAttrs.h"
50#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
51#include "mlir/Dialect/LLVMIR/NVVMDialect.h"
52#include "mlir/Dialect/LLVMIR/Transforms/AddComdats.h"
53#include "mlir/Dialect/OpenACC/OpenACC.h"
54#include "mlir/Dialect/OpenMP/OpenMPDialect.h"
55#include "mlir/IR/BuiltinTypes.h"
56#include "mlir/IR/Matchers.h"
57#include "mlir/Pass/Pass.h"
58#include "mlir/Pass/PassManager.h"
59#include "mlir/Target/LLVMIR/Import.h"
60#include "mlir/Target/LLVMIR/ModuleTranslation.h"
61#include "llvm/ADT/ArrayRef.h"
62#include "llvm/ADT/TypeSwitch.h"
63
64namespace fir {
65#define GEN_PASS_DEF_FIRTOLLVMLOWERING
66#include "flang/Optimizer/CodeGen/CGPasses.h.inc"
67} // namespace fir
68
69#define DEBUG_TYPE "flang-codegen"
70
71// TODO: This should really be recovered from the specified target.
72static constexpr unsigned defaultAlign = 8;
73
74/// `fir.box` attribute values as defined for CFI_attribute_t in
75/// flang/ISO_Fortran_binding.h.
76static constexpr unsigned kAttrPointer = CFI_attribute_pointer;
77static constexpr unsigned kAttrAllocatable = CFI_attribute_allocatable;
78
79static inline mlir::Type getLlvmPtrType(mlir::MLIRContext *context,
80 unsigned addressSpace = 0) {
81 return mlir::LLVM::LLVMPointerType::get(context, addressSpace);
82}
83
84static inline mlir::Type getI8Type(mlir::MLIRContext *context) {
85 return mlir::IntegerType::get(context, 8);
86}
87
88static mlir::LLVM::ConstantOp
89genConstantIndex(mlir::Location loc, mlir::Type ity,
90 mlir::ConversionPatternRewriter &rewriter,
91 std::int64_t offset) {
92 auto cattr = rewriter.getI64IntegerAttr(offset);
93 return rewriter.create<mlir::LLVM::ConstantOp>(loc, ity, cattr);
94}
95
96static mlir::Block *createBlock(mlir::ConversionPatternRewriter &rewriter,
97 mlir::Block *insertBefore) {
98 assert(insertBefore && "expected valid insertion block");
99 return rewriter.createBlock(insertBefore->getParent(),
100 mlir::Region::iterator(insertBefore));
101}
102
103/// Extract constant from a value that must be the result of one of the
104/// ConstantOp operations.
105static int64_t getConstantIntValue(mlir::Value val) {
106 if (auto constVal = fir::getIntIfConstant(val))
107 return *constVal;
108 fir::emitFatalError(val.getLoc(), "must be a constant");
109}
110
111static unsigned getTypeDescFieldId(mlir::Type ty) {
112 auto isArray = mlir::isa<fir::SequenceType>(fir::dyn_cast_ptrOrBoxEleTy(ty));
113 return isArray ? kOptTypePtrPosInBox : kDimsPosInBox;
114}
115static unsigned getLenParamFieldId(mlir::Type ty) {
116 return getTypeDescFieldId(ty) + 1;
117}
118
119static llvm::SmallVector<mlir::NamedAttribute>
120addLLVMOpBundleAttrs(mlir::ConversionPatternRewriter &rewriter,
121 llvm::ArrayRef<mlir::NamedAttribute> attrs,
122 int32_t numCallOperands) {
123 llvm::SmallVector<mlir::NamedAttribute> newAttrs;
124 newAttrs.reserve(attrs.size() + 2);
125
126 for (mlir::NamedAttribute attr : attrs) {
127 if (attr.getName() != "operandSegmentSizes")
128 newAttrs.push_back(attr);
129 }
130
131 newAttrs.push_back(rewriter.getNamedAttr(
132 "operandSegmentSizes",
133 rewriter.getDenseI32ArrayAttr({numCallOperands, 0})));
134 newAttrs.push_back(rewriter.getNamedAttr("op_bundle_sizes",
135 rewriter.getDenseI32ArrayAttr({})));
136 return newAttrs;
137}
138
139namespace {
140
141mlir::Value replaceWithAddrOfOrASCast(mlir::ConversionPatternRewriter &rewriter,
142 mlir::Location loc,
143 std::uint64_t globalAS,
144 std::uint64_t programAS,
145 llvm::StringRef symName, mlir::Type type,
146 mlir::Operation *replaceOp = nullptr) {
147 if (mlir::isa<mlir::LLVM::LLVMPointerType>(type)) {
148 if (globalAS != programAS) {
149 auto llvmAddrOp = rewriter.create<mlir::LLVM::AddressOfOp>(
150 loc, getLlvmPtrType(rewriter.getContext(), globalAS), symName);
151 if (replaceOp)
152 return rewriter.replaceOpWithNewOp<mlir::LLVM::AddrSpaceCastOp>(
153 replaceOp, ::getLlvmPtrType(rewriter.getContext(), programAS),
154 llvmAddrOp);
155 return rewriter.create<mlir::LLVM::AddrSpaceCastOp>(
156 loc, getLlvmPtrType(rewriter.getContext(), programAS), llvmAddrOp);
157 }
158
159 if (replaceOp)
160 return rewriter.replaceOpWithNewOp<mlir::LLVM::AddressOfOp>(
161 replaceOp, getLlvmPtrType(rewriter.getContext(), globalAS), symName);
162 return rewriter.create<mlir::LLVM::AddressOfOp>(
163 loc, getLlvmPtrType(rewriter.getContext(), globalAS), symName);
164 }
165
166 if (replaceOp)
167 return rewriter.replaceOpWithNewOp<mlir::LLVM::AddressOfOp>(replaceOp, type,
168 symName);
169 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, type, symName);
170}
171
172/// Lower `fir.address_of` operation to `llvm.address_of` operation.
173struct AddrOfOpConversion : public fir::FIROpConversion<fir::AddrOfOp> {
174 using FIROpConversion::FIROpConversion;
175
176 llvm::LogicalResult
177 matchAndRewrite(fir::AddrOfOp addr, OpAdaptor adaptor,
178 mlir::ConversionPatternRewriter &rewriter) const override {
179 auto global = addr->getParentOfType<mlir::ModuleOp>()
180 .lookupSymbol<mlir::LLVM::GlobalOp>(addr.getSymbol());
181 replaceWithAddrOfOrASCast(
182 rewriter, addr->getLoc(),
183 global ? global.getAddrSpace() : getGlobalAddressSpace(rewriter),
184 getProgramAddressSpace(rewriter),
185 global ? global.getSymName()
186 : addr.getSymbol().getRootReference().getValue(),
187 convertType(addr.getType()), addr);
188 return mlir::success();
189 }
190};
191} // namespace
192
193/// Lookup the function to compute the memory size of this parametric derived
194/// type. The size of the object may depend on the LEN type parameters of the
195/// derived type.
196static mlir::LLVM::LLVMFuncOp
197getDependentTypeMemSizeFn(fir::RecordType recTy, fir::AllocaOp op,
198 mlir::ConversionPatternRewriter &rewriter) {
199 auto module = op->getParentOfType<mlir::ModuleOp>();
200 std::string name = recTy.getName().str() + "P.mem.size";
201 if (auto memSizeFunc = module.lookupSymbol<mlir::LLVM::LLVMFuncOp>(name))
202 return memSizeFunc;
203 TODO(op.getLoc(), "did not find allocation function");
204}
205
206// Compute the alloc scale size (constant factors encoded in the array type).
207// We do this for arrays without a constant interior or arrays of character with
208// dynamic length arrays, since those are the only ones that get decayed to a
209// pointer to the element type.
210template <typename OP>
211static mlir::Value
212genAllocationScaleSize(OP op, mlir::Type ity,
213 mlir::ConversionPatternRewriter &rewriter) {
214 mlir::Location loc = op.getLoc();
215 mlir::Type dataTy = op.getInType();
216 auto seqTy = mlir::dyn_cast<fir::SequenceType>(dataTy);
217 fir::SequenceType::Extent constSize = 1;
218 if (seqTy) {
219 int constRows = seqTy.getConstantRows();
220 const fir::SequenceType::ShapeRef &shape = seqTy.getShape();
221 if (constRows != static_cast<int>(shape.size())) {
222 for (auto extent : shape) {
223 if (constRows-- > 0)
224 continue;
225 if (extent != fir::SequenceType::getUnknownExtent())
226 constSize *= extent;
227 }
228 }
229 }
230
231 if (constSize != 1) {
232 mlir::Value constVal{
233 genConstantIndex(loc, ity, rewriter, constSize).getResult()};
234 return constVal;
235 }
236 return nullptr;
237}
238
239namespace {
240struct DeclareOpConversion : public fir::FIROpConversion<fir::cg::XDeclareOp> {
241public:
242 using FIROpConversion::FIROpConversion;
243 llvm::LogicalResult
244 matchAndRewrite(fir::cg::XDeclareOp declareOp, OpAdaptor adaptor,
245 mlir::ConversionPatternRewriter &rewriter) const override {
246 auto memRef = adaptor.getOperands()[0];
247 if (auto fusedLoc = mlir::dyn_cast<mlir::FusedLoc>(declareOp.getLoc())) {
248 if (auto varAttr =
249 mlir::dyn_cast_or_null<mlir::LLVM::DILocalVariableAttr>(
250 fusedLoc.getMetadata())) {
251 rewriter.create<mlir::LLVM::DbgDeclareOp>(memRef.getLoc(), memRef,
252 varAttr, nullptr);
253 }
254 }
255 rewriter.replaceOp(declareOp, memRef);
256 return mlir::success();
257 }
258};
259} // namespace
260
261namespace {
262/// convert to LLVM IR dialect `alloca`
263struct AllocaOpConversion : public fir::FIROpConversion<fir::AllocaOp> {
264 using FIROpConversion::FIROpConversion;
265
266 llvm::LogicalResult
267 matchAndRewrite(fir::AllocaOp alloc, OpAdaptor adaptor,
268 mlir::ConversionPatternRewriter &rewriter) const override {
269 mlir::ValueRange operands = adaptor.getOperands();
270 auto loc = alloc.getLoc();
271 mlir::Type ity = lowerTy().indexType();
272 unsigned i = 0;
273 mlir::Value size = genConstantIndex(loc, ity, rewriter, 1).getResult();
274 mlir::Type firObjType = fir::unwrapRefType(alloc.getType());
275 mlir::Type llvmObjectType = convertObjectType(firObjType);
276 if (alloc.hasLenParams()) {
277 unsigned end = alloc.numLenParams();
278 llvm::SmallVector<mlir::Value> lenParams;
279 for (; i < end; ++i)
280 lenParams.push_back(operands[i]);
281 mlir::Type scalarType = fir::unwrapSequenceType(alloc.getInType());
282 if (auto chrTy = mlir::dyn_cast<fir::CharacterType>(scalarType)) {
283 fir::CharacterType rawCharTy = fir::CharacterType::getUnknownLen(
284 chrTy.getContext(), chrTy.getFKind());
285 llvmObjectType = convertType(rawCharTy);
286 assert(end == 1);
287 size = integerCast(loc, rewriter, ity, lenParams[0], /*fold=*/true);
288 } else if (auto recTy = mlir::dyn_cast<fir::RecordType>(scalarType)) {
289 mlir::LLVM::LLVMFuncOp memSizeFn =
290 getDependentTypeMemSizeFn(recTy, alloc, rewriter);
291 if (!memSizeFn)
292 emitError(loc, "did not find allocation function");
293 mlir::NamedAttribute attr = rewriter.getNamedAttr(
294 "callee", mlir::SymbolRefAttr::get(memSizeFn));
295 auto call = rewriter.create<mlir::LLVM::CallOp>(
296 loc, ity, lenParams,
297 addLLVMOpBundleAttrs(rewriter, {attr}, lenParams.size()));
298 size = call.getResult();
299 llvmObjectType = ::getI8Type(alloc.getContext());
300 } else {
301 return emitError(loc, "unexpected type ")
302 << scalarType << " with type parameters";
303 }
304 }
305 if (auto scaleSize = genAllocationScaleSize(alloc, ity, rewriter))
306 size =
307 rewriter.createOrFold<mlir::LLVM::MulOp>(loc, ity, size, scaleSize);
308 if (alloc.hasShapeOperands()) {
309 unsigned end = operands.size();
310 for (; i < end; ++i)
311 size = rewriter.createOrFold<mlir::LLVM::MulOp>(
312 loc, ity, size,
313 integerCast(loc, rewriter, ity, operands[i], /*fold=*/true));
314 }
315
316 unsigned allocaAs = getAllocaAddressSpace(rewriter);
317 unsigned programAs = getProgramAddressSpace(rewriter);
318
319 if (mlir::isa<mlir::LLVM::ConstantOp>(size.getDefiningOp())) {
320 // Set the Block in which the llvm alloca should be inserted.
321 mlir::Operation *parentOp = rewriter.getInsertionBlock()->getParentOp();
322 mlir::Region *parentRegion = rewriter.getInsertionBlock()->getParent();
323 mlir::Block *insertBlock =
324 getBlockForAllocaInsert(parentOp, parentRegion);
325
326 // The old size might have had multiple users, some at a broader scope
327 // than we can safely outline the alloca to. As it is only an
328 // llvm.constant operation, it is faster to clone it than to calculate the
329 // dominance to see if it really should be moved.
330 mlir::Operation *clonedSize = rewriter.clone(*size.getDefiningOp());
331 size = clonedSize->getResult(0);
332 clonedSize->moveBefore(&insertBlock->front());
333 rewriter.setInsertionPointAfter(size.getDefiningOp());
334 }
335
336 // NOTE: we used to pass alloc->getAttrs() in the builder for non opaque
337 // pointers! Only propagate pinned and bindc_name to help debugging, but
338 // this should have no functional purpose (and passing the operand segment
339 // attribute like before is certainly bad).
340 auto llvmAlloc = rewriter.create<mlir::LLVM::AllocaOp>(
341 loc, ::getLlvmPtrType(alloc.getContext(), allocaAs), llvmObjectType,
342 size);
343 if (alloc.getPinned())
344 llvmAlloc->setDiscardableAttr(alloc.getPinnedAttrName(),
345 alloc.getPinnedAttr());
346 if (alloc.getBindcName())
347 llvmAlloc->setDiscardableAttr(alloc.getBindcNameAttrName(),
348 alloc.getBindcNameAttr());
349 if (allocaAs == programAs) {
350 rewriter.replaceOp(alloc, llvmAlloc);
351 } else {
352 // if our allocation address space, is not the same as the program address
353 // space, then we must emit a cast to the program address space before
354 // use. An example case would be on AMDGPU, where the allocation address
355 // space is the numeric value 5 (private), and the program address space
356 // is 0 (generic).
357 rewriter.replaceOpWithNewOp<mlir::LLVM::AddrSpaceCastOp>(
358 alloc, ::getLlvmPtrType(alloc.getContext(), programAs), llvmAlloc);
359 }
360
361 return mlir::success();
362 }
363};
364} // namespace
365
366namespace {
367/// Lower `fir.box_addr` to the sequence of operations to extract the first
368/// element of the box.
369struct BoxAddrOpConversion : public fir::FIROpConversion<fir::BoxAddrOp> {
370 using FIROpConversion::FIROpConversion;
371
372 llvm::LogicalResult
373 matchAndRewrite(fir::BoxAddrOp boxaddr, OpAdaptor adaptor,
374 mlir::ConversionPatternRewriter &rewriter) const override {
375 mlir::Value a = adaptor.getOperands()[0];
376 auto loc = boxaddr.getLoc();
377 if (auto argty =
378 mlir::dyn_cast<fir::BaseBoxType>(boxaddr.getVal().getType())) {
379 TypePair boxTyPair = getBoxTypePair(argty);
380 rewriter.replaceOp(boxaddr,
381 getBaseAddrFromBox(loc, boxTyPair, a, rewriter));
382 } else {
383 rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>(boxaddr, a, 0);
384 }
385 return mlir::success();
386 }
387};
388
389/// Convert `!fir.boxchar_len` to `!llvm.extractvalue` for the 2nd part of the
390/// boxchar.
391struct BoxCharLenOpConversion : public fir::FIROpConversion<fir::BoxCharLenOp> {
392 using FIROpConversion::FIROpConversion;
393
394 llvm::LogicalResult
395 matchAndRewrite(fir::BoxCharLenOp boxCharLen, OpAdaptor adaptor,
396 mlir::ConversionPatternRewriter &rewriter) const override {
397 mlir::Value boxChar = adaptor.getOperands()[0];
398 mlir::Location loc = boxChar.getLoc();
399 mlir::Type returnValTy = boxCharLen.getResult().getType();
400
401 constexpr int boxcharLenIdx = 1;
402 auto len = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, boxChar,
403 boxcharLenIdx);
404 mlir::Value lenAfterCast = integerCast(loc, rewriter, returnValTy, len);
405 rewriter.replaceOp(boxCharLen, lenAfterCast);
406
407 return mlir::success();
408 }
409};
410
411/// Lower `fir.box_dims` to a sequence of operations to extract the requested
412/// dimension information from the boxed value.
413/// Result in a triple set of GEPs and loads.
414struct BoxDimsOpConversion : public fir::FIROpConversion<fir::BoxDimsOp> {
415 using FIROpConversion::FIROpConversion;
416
417 llvm::LogicalResult
418 matchAndRewrite(fir::BoxDimsOp boxdims, OpAdaptor adaptor,
419 mlir::ConversionPatternRewriter &rewriter) const override {
420 llvm::SmallVector<mlir::Type, 3> resultTypes = {
421 convertType(boxdims.getResult(0).getType()),
422 convertType(boxdims.getResult(1).getType()),
423 convertType(boxdims.getResult(2).getType()),
424 };
425 TypePair boxTyPair = getBoxTypePair(boxdims.getVal().getType());
426 auto results = getDimsFromBox(boxdims.getLoc(), resultTypes, boxTyPair,
427 adaptor.getOperands()[0],
428 adaptor.getOperands()[1], rewriter);
429 rewriter.replaceOp(boxdims, results);
430 return mlir::success();
431 }
432};
433
434/// Lower `fir.box_elesize` to a sequence of operations ro extract the size of
435/// an element in the boxed value.
436struct BoxEleSizeOpConversion : public fir::FIROpConversion<fir::BoxEleSizeOp> {
437 using FIROpConversion::FIROpConversion;
438
439 llvm::LogicalResult
440 matchAndRewrite(fir::BoxEleSizeOp boxelesz, OpAdaptor adaptor,
441 mlir::ConversionPatternRewriter &rewriter) const override {
442 mlir::Value box = adaptor.getOperands()[0];
443 auto loc = boxelesz.getLoc();
444 auto ty = convertType(boxelesz.getType());
445 TypePair boxTyPair = getBoxTypePair(boxelesz.getVal().getType());
446 auto elemSize = getElementSizeFromBox(loc, ty, boxTyPair, box, rewriter);
447 rewriter.replaceOp(boxelesz, elemSize);
448 return mlir::success();
449 }
450};
451
452/// Lower `fir.box_isalloc` to a sequence of operations to determine if the
453/// boxed value was from an ALLOCATABLE entity.
454struct BoxIsAllocOpConversion : public fir::FIROpConversion<fir::BoxIsAllocOp> {
455 using FIROpConversion::FIROpConversion;
456
457 llvm::LogicalResult
458 matchAndRewrite(fir::BoxIsAllocOp boxisalloc, OpAdaptor adaptor,
459 mlir::ConversionPatternRewriter &rewriter) const override {
460 mlir::Value box = adaptor.getOperands()[0];
461 auto loc = boxisalloc.getLoc();
462 TypePair boxTyPair = getBoxTypePair(boxisalloc.getVal().getType());
463 mlir::Value check =
464 genBoxAttributeCheck(loc, boxTyPair, box, rewriter, kAttrAllocatable);
465 rewriter.replaceOp(boxisalloc, check);
466 return mlir::success();
467 }
468};
469
470/// Lower `fir.box_isarray` to a sequence of operations to determine if the
471/// boxed is an array.
472struct BoxIsArrayOpConversion : public fir::FIROpConversion<fir::BoxIsArrayOp> {
473 using FIROpConversion::FIROpConversion;
474
475 llvm::LogicalResult
476 matchAndRewrite(fir::BoxIsArrayOp boxisarray, OpAdaptor adaptor,
477 mlir::ConversionPatternRewriter &rewriter) const override {
478 mlir::Value a = adaptor.getOperands()[0];
479 auto loc = boxisarray.getLoc();
480 TypePair boxTyPair = getBoxTypePair(boxisarray.getVal().getType());
481 mlir::Value rank = getRankFromBox(loc, boxTyPair, a, rewriter);
482 mlir::Value c0 = genConstantIndex(loc, rank.getType(), rewriter, 0);
483 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>(
484 boxisarray, mlir::LLVM::ICmpPredicate::ne, rank, c0);
485 return mlir::success();
486 }
487};
488
489/// Lower `fir.box_isptr` to a sequence of operations to determined if the
490/// boxed value was from a POINTER entity.
491struct BoxIsPtrOpConversion : public fir::FIROpConversion<fir::BoxIsPtrOp> {
492 using FIROpConversion::FIROpConversion;
493
494 llvm::LogicalResult
495 matchAndRewrite(fir::BoxIsPtrOp boxisptr, OpAdaptor adaptor,
496 mlir::ConversionPatternRewriter &rewriter) const override {
497 mlir::Value box = adaptor.getOperands()[0];
498 auto loc = boxisptr.getLoc();
499 TypePair boxTyPair = getBoxTypePair(boxisptr.getVal().getType());
500 mlir::Value check =
501 genBoxAttributeCheck(loc, boxTyPair, box, rewriter, kAttrPointer);
502 rewriter.replaceOp(boxisptr, check);
503 return mlir::success();
504 }
505};
506
507/// Lower `fir.box_rank` to the sequence of operation to extract the rank from
508/// the box.
509struct BoxRankOpConversion : public fir::FIROpConversion<fir::BoxRankOp> {
510 using FIROpConversion::FIROpConversion;
511
512 llvm::LogicalResult
513 matchAndRewrite(fir::BoxRankOp boxrank, OpAdaptor adaptor,
514 mlir::ConversionPatternRewriter &rewriter) const override {
515 mlir::Value a = adaptor.getOperands()[0];
516 auto loc = boxrank.getLoc();
517 mlir::Type ty = convertType(boxrank.getType());
518 TypePair boxTyPair =
519 getBoxTypePair(fir::unwrapRefType(boxrank.getBox().getType()));
520 mlir::Value rank = getRankFromBox(loc, boxTyPair, a, rewriter);
521 mlir::Value result = integerCast(loc, rewriter, ty, rank);
522 rewriter.replaceOp(boxrank, result);
523 return mlir::success();
524 }
525};
526
527/// Lower `fir.boxproc_host` operation. Extracts the host pointer from the
528/// boxproc.
529/// TODO: Part of supporting Fortran 2003 procedure pointers.
530struct BoxProcHostOpConversion
531 : public fir::FIROpConversion<fir::BoxProcHostOp> {
532 using FIROpConversion::FIROpConversion;
533
534 llvm::LogicalResult
535 matchAndRewrite(fir::BoxProcHostOp boxprochost, OpAdaptor adaptor,
536 mlir::ConversionPatternRewriter &rewriter) const override {
537 TODO(boxprochost.getLoc(), "fir.boxproc_host codegen");
538 return mlir::failure();
539 }
540};
541
542/// Lower `fir.box_tdesc` to the sequence of operations to extract the type
543/// descriptor from the box.
544struct BoxTypeDescOpConversion
545 : public fir::FIROpConversion<fir::BoxTypeDescOp> {
546 using FIROpConversion::FIROpConversion;
547
548 llvm::LogicalResult
549 matchAndRewrite(fir::BoxTypeDescOp boxtypedesc, OpAdaptor adaptor,
550 mlir::ConversionPatternRewriter &rewriter) const override {
551 mlir::Value box = adaptor.getOperands()[0];
552 TypePair boxTyPair = getBoxTypePair(boxtypedesc.getBox().getType());
553 auto typeDescAddr =
554 loadTypeDescAddress(boxtypedesc.getLoc(), boxTyPair, box, rewriter);
555 rewriter.replaceOp(boxtypedesc, typeDescAddr);
556 return mlir::success();
557 }
558};
559
560/// Lower `fir.box_typecode` to a sequence of operations to extract the type
561/// code in the boxed value.
562struct BoxTypeCodeOpConversion
563 : public fir::FIROpConversion<fir::BoxTypeCodeOp> {
564 using FIROpConversion::FIROpConversion;
565
566 llvm::LogicalResult
567 matchAndRewrite(fir::BoxTypeCodeOp op, OpAdaptor adaptor,
568 mlir::ConversionPatternRewriter &rewriter) const override {
569 mlir::Value box = adaptor.getOperands()[0];
570 auto loc = box.getLoc();
571 auto ty = convertType(op.getType());
572 TypePair boxTyPair = getBoxTypePair(op.getBox().getType());
573 auto typeCode =
574 getValueFromBox(loc, boxTyPair, box, ty, rewriter, kTypePosInBox);
575 rewriter.replaceOp(op, typeCode);
576 return mlir::success();
577 }
578};
579
580/// Lower `fir.string_lit` to LLVM IR dialect operation.
581struct StringLitOpConversion : public fir::FIROpConversion<fir::StringLitOp> {
582 using FIROpConversion::FIROpConversion;
583
584 llvm::LogicalResult
585 matchAndRewrite(fir::StringLitOp constop, OpAdaptor adaptor,
586 mlir::ConversionPatternRewriter &rewriter) const override {
587 auto ty = convertType(constop.getType());
588 auto attr = constop.getValue();
589 if (mlir::isa<mlir::StringAttr>(attr)) {
590 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>(constop, ty, attr);
591 return mlir::success();
592 }
593
594 auto charTy = mlir::cast<fir::CharacterType>(constop.getType());
595 unsigned bits = lowerTy().characterBitsize(charTy);
596 mlir::Type intTy = rewriter.getIntegerType(bits);
597 mlir::Location loc = constop.getLoc();
598 mlir::Value cst = rewriter.create<mlir::LLVM::UndefOp>(loc, ty);
599 if (auto arr = mlir::dyn_cast<mlir::DenseElementsAttr>(attr)) {
600 cst = rewriter.create<mlir::LLVM::ConstantOp>(loc, ty, arr);
601 } else if (auto arr = mlir::dyn_cast<mlir::ArrayAttr>(attr)) {
602 for (auto a : llvm::enumerate(arr.getValue())) {
603 // convert each character to a precise bitsize
604 auto elemAttr = mlir::IntegerAttr::get(
605 intTy,
606 mlir::cast<mlir::IntegerAttr>(a.value()).getValue().zextOrTrunc(
607 bits));
608 auto elemCst =
609 rewriter.create<mlir::LLVM::ConstantOp>(loc, intTy, elemAttr);
610 cst = rewriter.create<mlir::LLVM::InsertValueOp>(loc, cst, elemCst,
611 a.index());
612 }
613 } else {
614 return mlir::failure();
615 }
616 rewriter.replaceOp(constop, cst);
617 return mlir::success();
618 }
619};
620
621/// `fir.call` -> `llvm.call`
622struct CallOpConversion : public fir::FIROpConversion<fir::CallOp> {
623 using FIROpConversion::FIROpConversion;
624
625 llvm::LogicalResult
626 matchAndRewrite(fir::CallOp call, OpAdaptor adaptor,
627 mlir::ConversionPatternRewriter &rewriter) const override {
628 llvm::SmallVector<mlir::Type> resultTys;
629 mlir::Attribute memAttr =
630 call->getAttr(fir::FIROpsDialect::getFirCallMemoryAttrName());
631 if (memAttr)
632 call->removeAttr(fir::FIROpsDialect::getFirCallMemoryAttrName());
633
634 for (auto r : call.getResults())
635 resultTys.push_back(convertType(r.getType()));
636 // Convert arith::FastMathFlagsAttr to LLVM::FastMathFlagsAttr.
637 mlir::arith::AttrConvertFastMathToLLVM<fir::CallOp, mlir::LLVM::CallOp>
638 attrConvert(call);
639 auto llvmCall = rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>(
640 call, resultTys, adaptor.getOperands(),
641 addLLVMOpBundleAttrs(rewriter, attrConvert.getAttrs(),
642 adaptor.getOperands().size()));
643 if (mlir::ArrayAttr argAttrsArray = call.getArgAttrsAttr()) {
644 // sret and byval type needs to be converted.
645 auto convertTypeAttr = [&](const mlir::NamedAttribute &attr) {
646 return mlir::TypeAttr::get(convertType(
647 llvm::cast<mlir::TypeAttr>(attr.getValue()).getValue()));
648 };
649 llvm::SmallVector<mlir::Attribute> newArgAttrsArray;
650 for (auto argAttrs : argAttrsArray) {
651 llvm::SmallVector<mlir::NamedAttribute> convertedAttrs;
652 for (const mlir::NamedAttribute &attr :
653 llvm::cast<mlir::DictionaryAttr>(argAttrs)) {
654 if (attr.getName().getValue() ==
655 mlir::LLVM::LLVMDialect::getByValAttrName()) {
656 convertedAttrs.push_back(rewriter.getNamedAttr(
657 mlir::LLVM::LLVMDialect::getByValAttrName(),
658 convertTypeAttr(attr)));
659 } else if (attr.getName().getValue() ==
660 mlir::LLVM::LLVMDialect::getStructRetAttrName()) {
661 convertedAttrs.push_back(rewriter.getNamedAttr(
662 mlir::LLVM::LLVMDialect::getStructRetAttrName(),
663 convertTypeAttr(attr)));
664 } else {
665 convertedAttrs.push_back(attr);
666 }
667 }
668 newArgAttrsArray.emplace_back(
669 mlir::DictionaryAttr::get(rewriter.getContext(), convertedAttrs));
670 }
671 llvmCall.setArgAttrsAttr(rewriter.getArrayAttr(newArgAttrsArray));
672 }
673 if (mlir::ArrayAttr resAttrs = call.getResAttrsAttr())
674 llvmCall.setResAttrsAttr(resAttrs);
675
676 if (memAttr)
677 llvmCall.setMemoryEffectsAttr(
678 mlir::cast<mlir::LLVM::MemoryEffectsAttr>(memAttr));
679 return mlir::success();
680 }
681};
682} // namespace
683
684static mlir::Type getComplexEleTy(mlir::Type complex) {
685 return mlir::cast<mlir::ComplexType>(complex).getElementType();
686}
687
688namespace {
689/// Compare complex values
690///
691/// Per 10.1, the only comparisons available are .EQ. (oeq) and .NE. (une).
692///
693/// For completeness, all other comparison are done on the real component only.
694struct CmpcOpConversion : public fir::FIROpConversion<fir::CmpcOp> {
695 using FIROpConversion::FIROpConversion;
696
697 llvm::LogicalResult
698 matchAndRewrite(fir::CmpcOp cmp, OpAdaptor adaptor,
699 mlir::ConversionPatternRewriter &rewriter) const override {
700 mlir::ValueRange operands = adaptor.getOperands();
701 mlir::Type resTy = convertType(cmp.getType());
702 mlir::Location loc = cmp.getLoc();
703 mlir::LLVM::FastmathFlags fmf =
704 mlir::arith::convertArithFastMathFlagsToLLVM(cmp.getFastmath());
705 mlir::LLVM::FCmpPredicate pred =
706 static_cast<mlir::LLVM::FCmpPredicate>(cmp.getPredicate());
707 auto rcp = rewriter.create<mlir::LLVM::FCmpOp>(
708 loc, resTy, pred,
709 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, operands[0], 0),
710 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, operands[1], 0), fmf);
711 auto icp = rewriter.create<mlir::LLVM::FCmpOp>(
712 loc, resTy, pred,
713 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, operands[0], 1),
714 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, operands[1], 1), fmf);
715 llvm::SmallVector<mlir::Value, 2> cp = {rcp, icp};
716 switch (cmp.getPredicate()) {
717 case mlir::arith::CmpFPredicate::OEQ: // .EQ.
718 rewriter.replaceOpWithNewOp<mlir::LLVM::AndOp>(cmp, resTy, cp);
719 break;
720 case mlir::arith::CmpFPredicate::UNE: // .NE.
721 rewriter.replaceOpWithNewOp<mlir::LLVM::OrOp>(cmp, resTy, cp);
722 break;
723 default:
724 rewriter.replaceOp(cmp, rcp.getResult());
725 break;
726 }
727 return mlir::success();
728 }
729};
730
731/// fir.volatile_cast is only useful at the fir level. Once we lower to LLVM,
732/// volatility is described by setting volatile attributes on the LLVM ops.
733struct VolatileCastOpConversion
734 : public fir::FIROpConversion<fir::VolatileCastOp> {
735 using FIROpConversion::FIROpConversion;
736
737 llvm::LogicalResult
738 matchAndRewrite(fir::VolatileCastOp volatileCast, OpAdaptor adaptor,
739 mlir::ConversionPatternRewriter &rewriter) const override {
740 rewriter.replaceOp(volatileCast, adaptor.getOperands()[0]);
741 return mlir::success();
742 }
743};
744
745/// convert value of from-type to value of to-type
746struct ConvertOpConversion : public fir::FIROpConversion<fir::ConvertOp> {
747 using FIROpConversion::FIROpConversion;
748
749 static bool isFloatingPointTy(mlir::Type ty) {
750 return mlir::isa<mlir::FloatType>(ty);
751 }
752
753 llvm::LogicalResult
754 matchAndRewrite(fir::ConvertOp convert, OpAdaptor adaptor,
755 mlir::ConversionPatternRewriter &rewriter) const override {
756 auto fromFirTy = convert.getValue().getType();
757 auto toFirTy = convert.getRes().getType();
758 auto fromTy = convertType(fromFirTy);
759 auto toTy = convertType(toFirTy);
760 mlir::Value op0 = adaptor.getOperands()[0];
761
762 if (fromFirTy == toFirTy) {
763 rewriter.replaceOp(convert, op0);
764 return mlir::success();
765 }
766
767 auto loc = convert.getLoc();
768 auto i1Type = mlir::IntegerType::get(convert.getContext(), 1);
769
770 if (mlir::isa<fir::RecordType>(toFirTy)) {
771 // Convert to compatible BIND(C) record type.
772 // Double check that the record types are compatible (it should have
773 // already been checked by the verifier).
774 assert(mlir::cast<fir::RecordType>(fromFirTy).getTypeList() ==
775 mlir::cast<fir::RecordType>(toFirTy).getTypeList() &&
776 "incompatible record types");
777
778 auto toStTy = mlir::cast<mlir::LLVM::LLVMStructType>(toTy);
779 mlir::Value val = rewriter.create<mlir::LLVM::UndefOp>(loc, toStTy);
780 auto indexTypeMap = toStTy.getSubelementIndexMap();
781 assert(indexTypeMap.has_value() && "invalid record type");
782
783 for (auto [attr, type] : indexTypeMap.value()) {
784 int64_t index = mlir::cast<mlir::IntegerAttr>(attr).getInt();
785 auto extVal =
786 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, op0, index);
787 val =
788 rewriter.create<mlir::LLVM::InsertValueOp>(loc, val, extVal, index);
789 }
790
791 rewriter.replaceOp(convert, val);
792 return mlir::success();
793 }
794
795 if (mlir::isa<fir::LogicalType>(fromFirTy) ||
796 mlir::isa<fir::LogicalType>(toFirTy)) {
797 // By specification fir::LogicalType value may be any number,
798 // where non-zero value represents .true. and zero value represents
799 // .false.
800 //
801 // integer<->logical conversion requires value normalization.
802 // Conversion from wide logical to narrow logical must set the result
803 // to non-zero iff the input is non-zero - the easiest way to implement
804 // it is to compare the input agains zero and set the result to
805 // the canonical 0/1.
806 // Conversion from narrow logical to wide logical may be implemented
807 // as a zero or sign extension of the input, but it may use value
808 // normalization as well.
809 if (!mlir::isa<mlir::IntegerType>(fromTy) ||
810 !mlir::isa<mlir::IntegerType>(toTy))
811 return mlir::emitError(loc)
812 << "unsupported types for logical conversion: " << fromTy
813 << " -> " << toTy;
814
815 // Do folding for constant inputs.
816 if (auto constVal = fir::getIntIfConstant(op0)) {
817 mlir::Value normVal =
818 genConstantIndex(loc, toTy, rewriter, *constVal ? 1 : 0);
819 rewriter.replaceOp(convert, normVal);
820 return mlir::success();
821 }
822
823 // If the input is i1, then we can just zero extend it, and
824 // the result will be normalized.
825 if (fromTy == i1Type) {
826 rewriter.replaceOpWithNewOp<mlir::LLVM::ZExtOp>(convert, toTy, op0);
827 return mlir::success();
828 }
829
830 // Compare the input with zero.
831 mlir::Value zero = genConstantIndex(loc, fromTy, rewriter, 0);
832 auto isTrue = rewriter.create<mlir::LLVM::ICmpOp>(
833 loc, mlir::LLVM::ICmpPredicate::ne, op0, zero);
834
835 // Zero extend the i1 isTrue result to the required type (unless it is i1
836 // itself).
837 if (toTy != i1Type)
838 rewriter.replaceOpWithNewOp<mlir::LLVM::ZExtOp>(convert, toTy, isTrue);
839 else
840 rewriter.replaceOp(convert, isTrue.getResult());
841
842 return mlir::success();
843 }
844
845 if (fromTy == toTy) {
846 rewriter.replaceOp(convert, op0);
847 return mlir::success();
848 }
849 auto convertFpToFp = [&](mlir::Value val, unsigned fromBits,
850 unsigned toBits, mlir::Type toTy) -> mlir::Value {
851 if (fromBits == toBits) {
852 // TODO: Converting between two floating-point representations with the
853 // same bitwidth is not allowed for now.
854 mlir::emitError(loc,
855 "cannot implicitly convert between two floating-point "
856 "representations of the same bitwidth");
857 return {};
858 }
859 if (fromBits > toBits)
860 return rewriter.create<mlir::LLVM::FPTruncOp>(loc, toTy, val);
861 return rewriter.create<mlir::LLVM::FPExtOp>(loc, toTy, val);
862 };
863 // Complex to complex conversion.
864 if (fir::isa_complex(fromFirTy) && fir::isa_complex(toFirTy)) {
865 // Special case: handle the conversion of a complex such that both the
866 // real and imaginary parts are converted together.
867 auto ty = convertType(getComplexEleTy(convert.getValue().getType()));
868 auto rp = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, op0, 0);
869 auto ip = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, op0, 1);
870 auto nt = convertType(getComplexEleTy(convert.getRes().getType()));
871 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(ty);
872 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(nt);
873 auto rc = convertFpToFp(rp, fromBits, toBits, nt);
874 auto ic = convertFpToFp(ip, fromBits, toBits, nt);
875 auto un = rewriter.create<mlir::LLVM::UndefOp>(loc, toTy);
876 auto i1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, un, rc, 0);
877 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(convert, i1, ic,
878 1);
879 return mlir::success();
880 }
881
882 // Floating point to floating point conversion.
883 if (isFloatingPointTy(fromTy)) {
884 if (isFloatingPointTy(toTy)) {
885 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(fromTy);
886 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(toTy);
887 auto v = convertFpToFp(op0, fromBits, toBits, toTy);
888 rewriter.replaceOp(convert, v);
889 return mlir::success();
890 }
891 if (mlir::isa<mlir::IntegerType>(toTy)) {
892 // NOTE: We are checking the fir type here because toTy is an LLVM type
893 // which is signless, and we need to use the intrinsic that matches the
894 // sign of the output in fir.
895 if (toFirTy.isUnsignedInteger()) {
896 auto intrinsicName =
897 mlir::StringAttr::get(convert.getContext(), "llvm.fptoui.sat");
898 rewriter.replaceOpWithNewOp<mlir::LLVM::CallIntrinsicOp>(
899 convert, toTy, intrinsicName, op0);
900 } else {
901 auto intrinsicName =
902 mlir::StringAttr::get(convert.getContext(), "llvm.fptosi.sat");
903 rewriter.replaceOpWithNewOp<mlir::LLVM::CallIntrinsicOp>(
904 convert, toTy, intrinsicName, op0);
905 }
906 return mlir::success();
907 }
908 } else if (mlir::isa<mlir::IntegerType>(fromTy)) {
909 // Integer to integer conversion.
910 if (mlir::isa<mlir::IntegerType>(toTy)) {
911 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(fromTy);
912 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(toTy);
913 assert(fromBits != toBits);
914 if (fromBits > toBits) {
915 rewriter.replaceOpWithNewOp<mlir::LLVM::TruncOp>(convert, toTy, op0);
916 return mlir::success();
917 }
918 if (fromFirTy == i1Type || fromFirTy.isUnsignedInteger()) {
919 rewriter.replaceOpWithNewOp<mlir::LLVM::ZExtOp>(convert, toTy, op0);
920 return mlir::success();
921 }
922 rewriter.replaceOpWithNewOp<mlir::LLVM::SExtOp>(convert, toTy, op0);
923 return mlir::success();
924 }
925 // Integer to floating point conversion.
926 if (isFloatingPointTy(toTy)) {
927 if (fromTy.isUnsignedInteger())
928 rewriter.replaceOpWithNewOp<mlir::LLVM::UIToFPOp>(convert, toTy, op0);
929 else
930 rewriter.replaceOpWithNewOp<mlir::LLVM::SIToFPOp>(convert, toTy, op0);
931 return mlir::success();
932 }
933 // Integer to pointer conversion.
934 if (mlir::isa<mlir::LLVM::LLVMPointerType>(toTy)) {
935 rewriter.replaceOpWithNewOp<mlir::LLVM::IntToPtrOp>(convert, toTy, op0);
936 return mlir::success();
937 }
938 } else if (mlir::isa<mlir::LLVM::LLVMPointerType>(fromTy)) {
939 // Pointer to integer conversion.
940 if (mlir::isa<mlir::IntegerType>(toTy)) {
941 rewriter.replaceOpWithNewOp<mlir::LLVM::PtrToIntOp>(convert, toTy, op0);
942 return mlir::success();
943 }
944 // Pointer to pointer conversion.
945 if (mlir::isa<mlir::LLVM::LLVMPointerType>(toTy)) {
946 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(convert, toTy, op0);
947 return mlir::success();
948 }
949 }
950 return emitError(loc) << "cannot convert " << fromTy << " to " << toTy;
951 }
952};
953
954/// `fir.type_info` operation has no specific CodeGen. The operation is
955/// only used to carry information during FIR to FIR passes. It may be used
956/// in the future to generate the runtime type info data structures instead
957/// of generating them in lowering.
958struct TypeInfoOpConversion : public fir::FIROpConversion<fir::TypeInfoOp> {
959 using FIROpConversion::FIROpConversion;
960
961 llvm::LogicalResult
962 matchAndRewrite(fir::TypeInfoOp op, OpAdaptor,
963 mlir::ConversionPatternRewriter &rewriter) const override {
964 rewriter.eraseOp(op);
965 return mlir::success();
966 }
967};
968
969/// `fir.dt_entry` operation has no specific CodeGen. The operation is only used
970/// to carry information during FIR to FIR passes.
971struct DTEntryOpConversion : public fir::FIROpConversion<fir::DTEntryOp> {
972 using FIROpConversion::FIROpConversion;
973
974 llvm::LogicalResult
975 matchAndRewrite(fir::DTEntryOp op, OpAdaptor,
976 mlir::ConversionPatternRewriter &rewriter) const override {
977 rewriter.eraseOp(op);
978 return mlir::success();
979 }
980};
981
982/// Lower `fir.global_len` operation.
983struct GlobalLenOpConversion : public fir::FIROpConversion<fir::GlobalLenOp> {
984 using FIROpConversion::FIROpConversion;
985
986 llvm::LogicalResult
987 matchAndRewrite(fir::GlobalLenOp globalLen, OpAdaptor adaptor,
988 mlir::ConversionPatternRewriter &rewriter) const override {
989 TODO(globalLen.getLoc(), "fir.global_len codegen");
990 return mlir::failure();
991 }
992};
993
994/// Lower fir.len_param_index
995struct LenParamIndexOpConversion
996 : public fir::FIROpConversion<fir::LenParamIndexOp> {
997 using FIROpConversion::FIROpConversion;
998
999 // FIXME: this should be specialized by the runtime target
1000 llvm::LogicalResult
1001 matchAndRewrite(fir::LenParamIndexOp lenp, OpAdaptor,
1002 mlir::ConversionPatternRewriter &rewriter) const override {
1003 TODO(lenp.getLoc(), "fir.len_param_index codegen");
1004 }
1005};
1006
1007/// Convert `!fir.emboxchar<!fir.char<KIND, ?>, #n>` into a sequence of
1008/// instructions that generate `!llvm.struct<(ptr<ik>, i64)>`. The 1st element
1009/// in this struct is a pointer. Its type is determined from `KIND`. The 2nd
1010/// element is the length of the character buffer (`#n`).
1011struct EmboxCharOpConversion : public fir::FIROpConversion<fir::EmboxCharOp> {
1012 using FIROpConversion::FIROpConversion;
1013
1014 llvm::LogicalResult
1015 matchAndRewrite(fir::EmboxCharOp emboxChar, OpAdaptor adaptor,
1016 mlir::ConversionPatternRewriter &rewriter) const override {
1017 mlir::ValueRange operands = adaptor.getOperands();
1018
1019 mlir::Value charBuffer = operands[0];
1020 mlir::Value charBufferLen = operands[1];
1021
1022 mlir::Location loc = emboxChar.getLoc();
1023 mlir::Type llvmStructTy = convertType(emboxChar.getType());
1024 auto llvmStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, llvmStructTy);
1025
1026 mlir::Type lenTy =
1027 mlir::cast<mlir::LLVM::LLVMStructType>(llvmStructTy).getBody()[1];
1028 mlir::Value lenAfterCast = integerCast(loc, rewriter, lenTy, charBufferLen);
1029
1030 mlir::Type addrTy =
1031 mlir::cast<mlir::LLVM::LLVMStructType>(llvmStructTy).getBody()[0];
1032 if (addrTy != charBuffer.getType())
1033 charBuffer =
1034 rewriter.create<mlir::LLVM::BitcastOp>(loc, addrTy, charBuffer);
1035
1036 auto insertBufferOp = rewriter.create<mlir::LLVM::InsertValueOp>(
1037 loc, llvmStruct, charBuffer, 0);
1038 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(
1039 emboxChar, insertBufferOp, lenAfterCast, 1);
1040
1041 return mlir::success();
1042 }
1043};
1044} // namespace
1045
1046template <typename ModuleOp>
1047static mlir::SymbolRefAttr
1048getMallocInModule(ModuleOp mod, fir::AllocMemOp op,
1049 mlir::ConversionPatternRewriter &rewriter,
1050 mlir::Type indexType) {
1051 static constexpr char mallocName[] = "malloc";
1052 if (auto mallocFunc =
1053 mod.template lookupSymbol<mlir::LLVM::LLVMFuncOp>(mallocName))
1054 return mlir::SymbolRefAttr::get(mallocFunc);
1055 if (auto userMalloc =
1056 mod.template lookupSymbol<mlir::func::FuncOp>(mallocName))
1057 return mlir::SymbolRefAttr::get(userMalloc);
1058
1059 mlir::OpBuilder moduleBuilder(mod.getBodyRegion());
1060 auto mallocDecl = moduleBuilder.create<mlir::LLVM::LLVMFuncOp>(
1061 op.getLoc(), mallocName,
1062 mlir::LLVM::LLVMFunctionType::get(getLlvmPtrType(op.getContext()),
1063 indexType,
1064 /*isVarArg=*/false));
1065 return mlir::SymbolRefAttr::get(mallocDecl);
1066}
1067
1068/// Return the LLVMFuncOp corresponding to the standard malloc call.
1069static mlir::SymbolRefAttr getMalloc(fir::AllocMemOp op,
1070 mlir::ConversionPatternRewriter &rewriter,
1071 mlir::Type indexType) {
1072 if (auto mod = op->getParentOfType<mlir::gpu::GPUModuleOp>())
1073 return getMallocInModule(mod, op, rewriter, indexType);
1074 auto mod = op->getParentOfType<mlir::ModuleOp>();
1075 return getMallocInModule(mod, op, rewriter, indexType);
1076}
1077
1078/// Helper function for generating the LLVM IR that computes the distance
1079/// in bytes between adjacent elements pointed to by a pointer
1080/// of type \p ptrTy. The result is returned as a value of \p idxTy integer
1081/// type.
1082static mlir::Value
1083computeElementDistance(mlir::Location loc, mlir::Type llvmObjectType,
1084 mlir::Type idxTy,
1085 mlir::ConversionPatternRewriter &rewriter,
1086 const mlir::DataLayout &dataLayout) {
1087 llvm::TypeSize size = dataLayout.getTypeSize(llvmObjectType);
1088 unsigned short alignment = dataLayout.getTypeABIAlignment(llvmObjectType);
1089 std::int64_t distance = llvm::alignTo(size, alignment);
1090 return genConstantIndex(loc, idxTy, rewriter, distance);
1091}
1092
1093/// Return value of the stride in bytes between adjacent elements
1094/// of LLVM type \p llTy. The result is returned as a value of
1095/// \p idxTy integer type.
1096static mlir::Value
1097genTypeStrideInBytes(mlir::Location loc, mlir::Type idxTy,
1098 mlir::ConversionPatternRewriter &rewriter, mlir::Type llTy,
1099 const mlir::DataLayout &dataLayout) {
1100 // Create a pointer type and use computeElementDistance().
1101 return computeElementDistance(loc, llTy, idxTy, rewriter, dataLayout);
1102}
1103
1104namespace {
1105/// Lower a `fir.allocmem` instruction into `llvm.call @malloc`
1106struct AllocMemOpConversion : public fir::FIROpConversion<fir::AllocMemOp> {
1107 using FIROpConversion::FIROpConversion;
1108
1109 llvm::LogicalResult
1110 matchAndRewrite(fir::AllocMemOp heap, OpAdaptor adaptor,
1111 mlir::ConversionPatternRewriter &rewriter) const override {
1112 mlir::Type heapTy = heap.getType();
1113 mlir::Location loc = heap.getLoc();
1114 auto ity = lowerTy().indexType();
1115 mlir::Type dataTy = fir::unwrapRefType(heapTy);
1116 mlir::Type llvmObjectTy = convertObjectType(dataTy);
1117 if (fir::isRecordWithTypeParameters(fir::unwrapSequenceType(dataTy)))
1118 TODO(loc, "fir.allocmem codegen of derived type with length parameters");
1119 mlir::Value size = genTypeSizeInBytes(loc, ity, rewriter, llvmObjectTy);
1120 if (auto scaleSize = genAllocationScaleSize(heap, ity, rewriter))
1121 size = rewriter.create<mlir::LLVM::MulOp>(loc, ity, size, scaleSize);
1122 for (mlir::Value opnd : adaptor.getOperands())
1123 size = rewriter.create<mlir::LLVM::MulOp>(
1124 loc, ity, size, integerCast(loc, rewriter, ity, opnd));
1125 auto mallocTyWidth = lowerTy().getIndexTypeBitwidth();
1126 auto mallocTy =
1127 mlir::IntegerType::get(rewriter.getContext(), mallocTyWidth);
1128 if (mallocTyWidth != ity.getIntOrFloatBitWidth())
1129 size = integerCast(loc, rewriter, mallocTy, size);
1130 heap->setAttr("callee", getMalloc(heap, rewriter, mallocTy));
1131 rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>(
1132 heap, ::getLlvmPtrType(heap.getContext()), size,
1133 addLLVMOpBundleAttrs(rewriter, heap->getAttrs(), 1));
1134 return mlir::success();
1135 }
1136
1137 /// Compute the allocation size in bytes of the element type of
1138 /// \p llTy pointer type. The result is returned as a value of \p idxTy
1139 /// integer type.
1140 mlir::Value genTypeSizeInBytes(mlir::Location loc, mlir::Type idxTy,
1141 mlir::ConversionPatternRewriter &rewriter,
1142 mlir::Type llTy) const {
1143 return computeElementDistance(loc, llTy, idxTy, rewriter, getDataLayout());
1144 }
1145};
1146} // namespace
1147
1148/// Return the LLVMFuncOp corresponding to the standard free call.
1149template <typename ModuleOp>
1150static mlir::SymbolRefAttr
1151getFreeInModule(ModuleOp mod, fir::FreeMemOp op,
1152 mlir::ConversionPatternRewriter &rewriter) {
1153 static constexpr char freeName[] = "free";
1154 // Check if free already defined in the module.
1155 if (auto freeFunc =
1156 mod.template lookupSymbol<mlir::LLVM::LLVMFuncOp>(freeName))
1157 return mlir::SymbolRefAttr::get(freeFunc);
1158 if (auto freeDefinedByUser =
1159 mod.template lookupSymbol<mlir::func::FuncOp>(freeName))
1160 return mlir::SymbolRefAttr::get(freeDefinedByUser);
1161 // Create llvm declaration for free.
1162 mlir::OpBuilder moduleBuilder(mod.getBodyRegion());
1163 auto voidType = mlir::LLVM::LLVMVoidType::get(op.getContext());
1164 auto freeDecl = moduleBuilder.create<mlir::LLVM::LLVMFuncOp>(
1165 rewriter.getUnknownLoc(), freeName,
1166 mlir::LLVM::LLVMFunctionType::get(voidType,
1167 getLlvmPtrType(op.getContext()),
1168 /*isVarArg=*/false));
1169 return mlir::SymbolRefAttr::get(freeDecl);
1170}
1171
1172static mlir::SymbolRefAttr getFree(fir::FreeMemOp op,
1173 mlir::ConversionPatternRewriter &rewriter) {
1174 if (auto mod = op->getParentOfType<mlir::gpu::GPUModuleOp>())
1175 return getFreeInModule(mod, op, rewriter);
1176 auto mod = op->getParentOfType<mlir::ModuleOp>();
1177 return getFreeInModule(mod, op, rewriter);
1178}
1179
1180static unsigned getDimension(mlir::LLVM::LLVMArrayType ty) {
1181 unsigned result = 1;
1182 for (auto eleTy =
1183 mlir::dyn_cast<mlir::LLVM::LLVMArrayType>(ty.getElementType());
1184 eleTy; eleTy = mlir::dyn_cast<mlir::LLVM::LLVMArrayType>(
1185 eleTy.getElementType()))
1186 ++result;
1187 return result;
1188}
1189
1190namespace {
1191/// Lower a `fir.freemem` instruction into `llvm.call @free`
1192struct FreeMemOpConversion : public fir::FIROpConversion<fir::FreeMemOp> {
1193 using FIROpConversion::FIROpConversion;
1194
1195 llvm::LogicalResult
1196 matchAndRewrite(fir::FreeMemOp freemem, OpAdaptor adaptor,
1197 mlir::ConversionPatternRewriter &rewriter) const override {
1198 mlir::Location loc = freemem.getLoc();
1199 freemem->setAttr("callee", getFree(freemem, rewriter));
1200 rewriter.create<mlir::LLVM::CallOp>(
1201 loc, mlir::TypeRange{}, mlir::ValueRange{adaptor.getHeapref()},
1202 addLLVMOpBundleAttrs(rewriter, freemem->getAttrs(), 1));
1203 rewriter.eraseOp(freemem);
1204 return mlir::success();
1205 }
1206};
1207} // namespace
1208
1209// Convert subcomponent array indices from column-major to row-major ordering.
1210static llvm::SmallVector<mlir::Value>
1211convertSubcomponentIndices(mlir::Location loc, mlir::Type eleTy,
1212 mlir::ValueRange indices,
1213 mlir::Type *retTy = nullptr) {
1214 llvm::SmallVector<mlir::Value> result;
1215 llvm::SmallVector<mlir::Value> arrayIndices;
1216
1217 auto appendArrayIndices = [&] {
1218 if (arrayIndices.empty())
1219 return;
1220 std::reverse(arrayIndices.begin(), arrayIndices.end());
1221 result.append(arrayIndices.begin(), arrayIndices.end());
1222 arrayIndices.clear();
1223 };
1224
1225 for (mlir::Value index : indices) {
1226 // Component indices can be field index to select a component, or array
1227 // index, to select an element in an array component.
1228 if (auto structTy = mlir::dyn_cast<mlir::LLVM::LLVMStructType>(eleTy)) {
1229 std::int64_t cstIndex = getConstantIntValue(index);
1230 assert(cstIndex < (int64_t)structTy.getBody().size() &&
1231 "out-of-bounds struct field index");
1232 eleTy = structTy.getBody()[cstIndex];
1233 appendArrayIndices();
1234 result.push_back(index);
1235 } else if (auto arrayTy =
1236 mlir::dyn_cast<mlir::LLVM::LLVMArrayType>(eleTy)) {
1237 eleTy = arrayTy.getElementType();
1238 arrayIndices.push_back(index);
1239 } else
1240 fir::emitFatalError(loc, "Unexpected subcomponent type");
1241 }
1242 appendArrayIndices();
1243 if (retTy)
1244 *retTy = eleTy;
1245 return result;
1246}
1247
1248static mlir::Value genSourceFile(mlir::Location loc, mlir::ModuleOp mod,
1249 mlir::ConversionPatternRewriter &rewriter) {
1250 auto ptrTy = mlir::LLVM::LLVMPointerType::get(rewriter.getContext());
1251 if (auto flc = mlir::dyn_cast<mlir::FileLineColLoc>(loc)) {
1252 auto fn = flc.getFilename().str() + '\0';
1253 std::string globalName = fir::factory::uniqueCGIdent("cl", fn);
1254
1255 if (auto g = mod.lookupSymbol<fir::GlobalOp>(globalName)) {
1256 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ptrTy, g.getName());
1257 } else if (auto g = mod.lookupSymbol<mlir::LLVM::GlobalOp>(globalName)) {
1258 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ptrTy, g.getName());
1259 }
1260
1261 auto crtInsPt = rewriter.saveInsertionPoint();
1262 rewriter.setInsertionPoint(mod.getBody(), mod.getBody()->end());
1263 auto arrayTy = mlir::LLVM::LLVMArrayType::get(
1264 mlir::IntegerType::get(rewriter.getContext(), 8), fn.size());
1265 mlir::LLVM::GlobalOp globalOp = rewriter.create<mlir::LLVM::GlobalOp>(
1266 loc, arrayTy, /*constant=*/true, mlir::LLVM::Linkage::Linkonce,
1267 globalName, mlir::Attribute());
1268
1269 mlir::Region &region = globalOp.getInitializerRegion();
1270 mlir::Block *block = rewriter.createBlock(&region);
1271 rewriter.setInsertionPoint(block, block->begin());
1272 mlir::Value constValue = rewriter.create<mlir::LLVM::ConstantOp>(
1273 loc, arrayTy, rewriter.getStringAttr(fn));
1274 rewriter.create<mlir::LLVM::ReturnOp>(loc, constValue);
1275 rewriter.restoreInsertionPoint(crtInsPt);
1276 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ptrTy,
1277 globalOp.getName());
1278 }
1279 return rewriter.create<mlir::LLVM::ZeroOp>(loc, ptrTy);
1280}
1281
1282static mlir::Value genSourceLine(mlir::Location loc,
1283 mlir::ConversionPatternRewriter &rewriter) {
1284 if (auto flc = mlir::dyn_cast<mlir::FileLineColLoc>(loc))
1285 return rewriter.create<mlir::LLVM::ConstantOp>(loc, rewriter.getI32Type(),
1286 flc.getLine());
1287 return rewriter.create<mlir::LLVM::ConstantOp>(loc, rewriter.getI32Type(), 0);
1288}
1289
1290static mlir::Value
1291genCUFAllocDescriptor(mlir::Location loc,
1292 mlir::ConversionPatternRewriter &rewriter,
1293 mlir::ModuleOp mod, fir::BaseBoxType boxTy,
1294 const fir::LLVMTypeConverter &typeConverter) {
1295 std::optional<mlir::DataLayout> dl =
1296 fir::support::getOrSetMLIRDataLayout(mod, /*allowDefaultLayout=*/true);
1297 if (!dl)
1298 mlir::emitError(mod.getLoc(),
1299 "module operation must carry a data layout attribute "
1300 "to generate llvm IR from FIR");
1301
1302 mlir::Value sourceFile = genSourceFile(loc, mod, rewriter);
1303 mlir::Value sourceLine = genSourceLine(loc, rewriter);
1304
1305 mlir::MLIRContext *ctx = mod.getContext();
1306
1307 mlir::LLVM::LLVMPointerType llvmPointerType =
1308 mlir::LLVM::LLVMPointerType::get(ctx);
1309 mlir::Type llvmInt32Type = mlir::IntegerType::get(ctx, 32);
1310 mlir::Type llvmIntPtrType =
1311 mlir::IntegerType::get(ctx, typeConverter.getPointerBitwidth(0));
1312 auto fctTy = mlir::LLVM::LLVMFunctionType::get(
1313 llvmPointerType, {llvmIntPtrType, llvmPointerType, llvmInt32Type});
1314
1315 auto llvmFunc = mod.lookupSymbol<mlir::LLVM::LLVMFuncOp>(
1316 RTNAME_STRING(CUFAllocDescriptor));
1317 auto funcFunc =
1318 mod.lookupSymbol<mlir::func::FuncOp>(RTNAME_STRING(CUFAllocDescriptor));
1319 if (!llvmFunc && !funcFunc)
1320 mlir::OpBuilder::atBlockEnd(mod.getBody())
1321 .create<mlir::LLVM::LLVMFuncOp>(loc, RTNAME_STRING(CUFAllocDescriptor),
1322 fctTy);
1323
1324 mlir::Type structTy = typeConverter.convertBoxTypeAsStruct(boxTy);
1325 std::size_t boxSize = dl->getTypeSizeInBits(structTy) / 8;
1326 mlir::Value sizeInBytes =
1327 genConstantIndex(loc, llvmIntPtrType, rewriter, boxSize);
1328 llvm::SmallVector args = {sizeInBytes, sourceFile, sourceLine};
1329 return rewriter
1330 .create<mlir::LLVM::CallOp>(loc, fctTy, RTNAME_STRING(CUFAllocDescriptor),
1331 args)
1332 .getResult();
1333}
1334
1335/// Get the address of the type descriptor global variable that was created by
1336/// lowering for derived type \p recType.
1337template <typename ModOpTy>
1338static mlir::Value
1339getTypeDescriptor(ModOpTy mod, mlir::ConversionPatternRewriter &rewriter,
1340 mlir::Location loc, fir::RecordType recType,
1341 const fir::FIRToLLVMPassOptions &options) {
1342 std::string name =
1343 options.typeDescriptorsRenamedForAssembly
1344 ? fir::NameUniquer::getTypeDescriptorAssemblyName(recType.getName())
1345 : fir::NameUniquer::getTypeDescriptorName(recType.getName());
1346 mlir::Type llvmPtrTy = ::getLlvmPtrType(mod.getContext());
1347 mlir::DataLayout dataLayout(mod);
1348 if (auto global = mod.template lookupSymbol<fir::GlobalOp>(name))
1349 return replaceWithAddrOfOrASCast(
1350 rewriter, loc, fir::factory::getGlobalAddressSpace(&dataLayout),
1351 fir::factory::getProgramAddressSpace(&dataLayout), global.getSymName(),
1352 llvmPtrTy);
1353 // The global may have already been translated to LLVM.
1354 if (auto global = mod.template lookupSymbol<mlir::LLVM::GlobalOp>(name))
1355 return replaceWithAddrOfOrASCast(
1356 rewriter, loc, global.getAddrSpace(),
1357 fir::factory::getProgramAddressSpace(&dataLayout), global.getSymName(),
1358 llvmPtrTy);
1359 // Type info derived types do not have type descriptors since they are the
1360 // types defining type descriptors.
1361 if (options.ignoreMissingTypeDescriptors ||
1362 fir::NameUniquer::belongsToModule(
1363 name, Fortran::semantics::typeInfoBuiltinModule))
1364 return rewriter.create<mlir::LLVM::ZeroOp>(loc, llvmPtrTy);
1365
1366 if (!options.skipExternalRttiDefinition)
1367 fir::emitFatalError(loc,
1368 "runtime derived type info descriptor was not "
1369 "generated and skipExternalRttiDefinition and "
1370 "ignoreMissingTypeDescriptors options are not set");
1371
1372 // Rtti for a derived type defined in another compilation unit and for which
1373 // rtti was not defined in lowering because of the skipExternalRttiDefinition
1374 // option. Generate the object declaration now.
1375 auto insertPt = rewriter.saveInsertionPoint();
1376 rewriter.setInsertionPoint(mod.getBody(), mod.getBody()->end());
1377 mlir::LLVM::GlobalOp global = rewriter.create<mlir::LLVM::GlobalOp>(
1378 loc, llvmPtrTy, /*constant=*/true, mlir::LLVM::Linkage::External, name,
1379 mlir::Attribute());
1380 rewriter.restoreInsertionPoint(insertPt);
1381 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, llvmPtrTy,
1382 global.getSymName());
1383}
1384
1385/// Common base class for embox to descriptor conversion.
1386template <typename OP>
1387struct EmboxCommonConversion : public fir::FIROpConversion<OP> {
1388 using fir::FIROpConversion<OP>::FIROpConversion;
1389 using TypePair = typename fir::FIROpConversion<OP>::TypePair;
1390
1391 static int getCFIAttr(fir::BaseBoxType boxTy) {
1392 auto eleTy = boxTy.getEleTy();
1393 if (mlir::isa<fir::PointerType>(eleTy))
1394 return CFI_attribute_pointer;
1395 if (mlir::isa<fir::HeapType>(eleTy))
1396 return CFI_attribute_allocatable;
1397 return CFI_attribute_other;
1398 }
1399
1400 mlir::Value getCharacterByteSize(mlir::Location loc,
1401 mlir::ConversionPatternRewriter &rewriter,
1402 fir::CharacterType charTy,
1403 mlir::ValueRange lenParams) const {
1404 auto i64Ty = mlir::IntegerType::get(rewriter.getContext(), 64);
1405 mlir::Value size = genTypeStrideInBytes(
1406 loc, i64Ty, rewriter, this->convertType(charTy), this->getDataLayout());
1407 if (charTy.hasConstantLen())
1408 return size; // Length accounted for in the genTypeStrideInBytes GEP.
1409 // Otherwise, multiply the single character size by the length.
1410 assert(!lenParams.empty());
1411 auto len64 = fir::FIROpConversion<OP>::integerCast(loc, rewriter, i64Ty,
1412 lenParams.back());
1413 return rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, size, len64);
1414 }
1415
1416 // Get the element size and CFI type code of the boxed value.
1417 std::tuple<mlir::Value, mlir::Value> getSizeAndTypeCode(
1418 mlir::Location loc, mlir::ConversionPatternRewriter &rewriter,
1419 mlir::Type boxEleTy, mlir::ValueRange lenParams = {}) const {
1420 const mlir::DataLayout &dataLayout = this->getDataLayout();
1421 auto i64Ty = mlir::IntegerType::get(rewriter.getContext(), 64);
1422 if (auto eleTy = fir::dyn_cast_ptrEleTy(boxEleTy))
1423 boxEleTy = eleTy;
1424 if (auto seqTy = mlir::dyn_cast<fir::SequenceType>(boxEleTy))
1425 return getSizeAndTypeCode(loc, rewriter, seqTy.getEleTy(), lenParams);
1426 if (mlir::isa<mlir::NoneType>(
1427 boxEleTy)) // unlimited polymorphic or assumed type
1428 return {rewriter.create<mlir::LLVM::ConstantOp>(loc, i64Ty, 0),
1429 this->genConstantOffset(loc, rewriter, CFI_type_other)};
1430 mlir::Value typeCodeVal = this->genConstantOffset(
1431 loc, rewriter,
1432 fir::getTypeCode(boxEleTy, this->lowerTy().getKindMap()));
1433 if (fir::isa_integer(boxEleTy) ||
1434 mlir::dyn_cast<fir::LogicalType>(boxEleTy) || fir::isa_real(boxEleTy) ||
1435 fir::isa_complex(boxEleTy))
1436 return {genTypeStrideInBytes(loc, i64Ty, rewriter,
1437 this->convertType(boxEleTy), dataLayout),
1438 typeCodeVal};
1439 if (auto charTy = mlir::dyn_cast<fir::CharacterType>(boxEleTy))
1440 return {getCharacterByteSize(loc, rewriter, charTy, lenParams),
1441 typeCodeVal};
1442 if (fir::isa_ref_type(boxEleTy)) {
1443 auto ptrTy = ::getLlvmPtrType(rewriter.getContext());
1444 return {genTypeStrideInBytes(loc, i64Ty, rewriter, ptrTy, dataLayout),
1445 typeCodeVal};
1446 }
1447 if (mlir::isa<fir::RecordType>(boxEleTy))
1448 return {genTypeStrideInBytes(loc, i64Ty, rewriter,
1449 this->convertType(boxEleTy), dataLayout),
1450 typeCodeVal};
1451 fir::emitFatalError(loc, "unhandled type in fir.box code generation");
1452 }
1453
1454 /// Basic pattern to write a field in the descriptor
1455 mlir::Value insertField(mlir::ConversionPatternRewriter &rewriter,
1456 mlir::Location loc, mlir::Value dest,
1457 llvm::ArrayRef<std::int64_t> fldIndexes,
1458 mlir::Value value, bool bitcast = false) const {
1459 auto boxTy = dest.getType();
1460 auto fldTy = this->getBoxEleTy(boxTy, fldIndexes);
1461 if (!bitcast)
1462 value = this->integerCast(loc, rewriter, fldTy, value);
1463 // bitcast are no-ops with LLVM opaque pointers.
1464 return rewriter.create<mlir::LLVM::InsertValueOp>(loc, dest, value,
1465 fldIndexes);
1466 }
1467
1468 inline mlir::Value
1469 insertBaseAddress(mlir::ConversionPatternRewriter &rewriter,
1470 mlir::Location loc, mlir::Value dest,
1471 mlir::Value base) const {
1472 return insertField(rewriter, loc, dest, {kAddrPosInBox}, base,
1473 /*bitCast=*/true);
1474 }
1475
1476 inline mlir::Value insertLowerBound(mlir::ConversionPatternRewriter &rewriter,
1477 mlir::Location loc, mlir::Value dest,
1478 unsigned dim, mlir::Value lb) const {
1479 return insertField(rewriter, loc, dest,
1480 {kDimsPosInBox, dim, kDimLowerBoundPos}, lb);
1481 }
1482
1483 inline mlir::Value insertExtent(mlir::ConversionPatternRewriter &rewriter,
1484 mlir::Location loc, mlir::Value dest,
1485 unsigned dim, mlir::Value extent) const {
1486 return insertField(rewriter, loc, dest, {kDimsPosInBox, dim, kDimExtentPos},
1487 extent);
1488 }
1489
1490 inline mlir::Value insertStride(mlir::ConversionPatternRewriter &rewriter,
1491 mlir::Location loc, mlir::Value dest,
1492 unsigned dim, mlir::Value stride) const {
1493 return insertField(rewriter, loc, dest, {kDimsPosInBox, dim, kDimStridePos},
1494 stride);
1495 }
1496
1497 template <typename ModOpTy>
1498 mlir::Value populateDescriptor(mlir::Location loc, ModOpTy mod,
1499 fir::BaseBoxType boxTy, mlir::Type inputType,
1500 mlir::ConversionPatternRewriter &rewriter,
1501 unsigned rank, mlir::Value eleSize,
1502 mlir::Value cfiTy, mlir::Value typeDesc,
1503 int allocatorIdx = kDefaultAllocator,
1504 mlir::Value extraField = {}) const {
1505 auto llvmBoxTy = this->lowerTy().convertBoxTypeAsStruct(boxTy, rank);
1506 bool isUnlimitedPolymorphic = fir::isUnlimitedPolymorphicType(boxTy);
1507 bool useInputType = fir::isPolymorphicType(boxTy) || isUnlimitedPolymorphic;
1508 mlir::Value descriptor =
1509 rewriter.create<mlir::LLVM::UndefOp>(loc, llvmBoxTy);
1510 descriptor =
1511 insertField(rewriter, loc, descriptor, {kElemLenPosInBox}, eleSize);
1512 descriptor = insertField(rewriter, loc, descriptor, {kVersionPosInBox},
1513 this->genI32Constant(loc, rewriter, CFI_VERSION));
1514 descriptor = insertField(rewriter, loc, descriptor, {kRankPosInBox},
1515 this->genI32Constant(loc, rewriter, rank));
1516 descriptor = insertField(rewriter, loc, descriptor, {kTypePosInBox}, cfiTy);
1517 descriptor =
1518 insertField(rewriter, loc, descriptor, {kAttributePosInBox},
1519 this->genI32Constant(loc, rewriter, getCFIAttr(boxTy)));
1520
1521 const bool hasAddendum = fir::boxHasAddendum(boxTy);
1522
1523 if (extraField) {
1524 // Make sure to set the addendum presence flag according to the
1525 // destination box.
1526 if (hasAddendum) {
1527 auto maskAttr = mlir::IntegerAttr::get(
1528 rewriter.getIntegerType(8, /*isSigned=*/false),
1529 llvm::APInt(8, (uint64_t)_CFI_ADDENDUM_FLAG, /*isSigned=*/false));
1530 mlir::LLVM::ConstantOp mask = rewriter.create<mlir::LLVM::ConstantOp>(
1531 loc, rewriter.getI8Type(), maskAttr);
1532 extraField = rewriter.create<mlir::LLVM::OrOp>(loc, extraField, mask);
1533 } else {
1534 auto maskAttr = mlir::IntegerAttr::get(
1535 rewriter.getIntegerType(8, /*isSigned=*/false),
1536 llvm::APInt(8, (uint64_t)~_CFI_ADDENDUM_FLAG, /*isSigned=*/true));
1537 mlir::LLVM::ConstantOp mask = rewriter.create<mlir::LLVM::ConstantOp>(
1538 loc, rewriter.getI8Type(), maskAttr);
1539 extraField = rewriter.create<mlir::LLVM::AndOp>(loc, extraField, mask);
1540 }
1541 // Extra field value is provided so just use it.
1542 descriptor =
1543 insertField(rewriter, loc, descriptor, {kExtraPosInBox}, extraField);
1544 } else {
1545 // Compute the value of the extra field based on allocator_idx and
1546 // addendum present.
1547 unsigned extra = allocatorIdx << _CFI_ALLOCATOR_IDX_SHIFT;
1548 if (hasAddendum)
1549 extra |= _CFI_ADDENDUM_FLAG;
1550 descriptor = insertField(rewriter, loc, descriptor, {kExtraPosInBox},
1551 this->genI32Constant(loc, rewriter, extra));
1552 }
1553
1554 if (hasAddendum) {
1555 unsigned typeDescFieldId = getTypeDescFieldId(boxTy);
1556 if (!typeDesc) {
1557 if (useInputType) {
1558 mlir::Type innerType = fir::unwrapInnerType(inputType);
1559 if (innerType && mlir::isa<fir::RecordType>(innerType)) {
1560 auto recTy = mlir::dyn_cast<fir::RecordType>(innerType);
1561 typeDesc =
1562 getTypeDescriptor(mod, rewriter, loc, recTy, this->options);
1563 } else {
1564 // Unlimited polymorphic type descriptor with no record type. Set
1565 // type descriptor address to a clean state.
1566 typeDesc = rewriter.create<mlir::LLVM::ZeroOp>(
1567 loc, ::getLlvmPtrType(mod.getContext()));
1568 }
1569 } else {
1570 typeDesc = getTypeDescriptor(
1571 mod, rewriter, loc, fir::unwrapIfDerived(boxTy), this->options);
1572 }
1573 }
1574 if (typeDesc)
1575 descriptor =
1576 insertField(rewriter, loc, descriptor, {typeDescFieldId}, typeDesc,
1577 /*bitCast=*/true);
1578 // Always initialize the length parameter field to zero to avoid issues
1579 // with uninitialized values in Fortran code trying to compare physical
1580 // representation of derived types with pointer/allocatable components.
1581 // This has been seen in hashing algorithms using TRANSFER.
1582 mlir::Value zero =
1583 genConstantIndex(loc, rewriter.getI64Type(), rewriter, 0);
1584 descriptor = insertField(rewriter, loc, descriptor,
1585 {getLenParamFieldId(boxTy), 0}, zero);
1586 }
1587 return descriptor;
1588 }
1589
1590 // Template used for fir::EmboxOp and fir::cg::XEmboxOp
1591 template <typename BOX>
1592 std::tuple<fir::BaseBoxType, mlir::Value, mlir::Value>
1593 consDescriptorPrefix(BOX box, mlir::Type inputType,
1594 mlir::ConversionPatternRewriter &rewriter, unsigned rank,
1595 [[maybe_unused]] mlir::ValueRange substrParams,
1596 mlir::ValueRange lenParams, mlir::Value sourceBox = {},
1597 mlir::Type sourceBoxType = {}) const {
1598 auto loc = box.getLoc();
1599 auto boxTy = mlir::dyn_cast<fir::BaseBoxType>(box.getType());
1600 bool useInputType = fir::isPolymorphicType(boxTy) &&
1601 !fir::isUnlimitedPolymorphicType(inputType);
1602 llvm::SmallVector<mlir::Value> typeparams = lenParams;
1603 if constexpr (!std::is_same_v<BOX, fir::EmboxOp>) {
1604 if (!box.getSubstr().empty() && fir::hasDynamicSize(boxTy.getEleTy()))
1605 typeparams.push_back(substrParams[1]);
1606 }
1607
1608 int allocatorIdx = 0;
1609 if constexpr (std::is_same_v<BOX, fir::EmboxOp> ||
1610 std::is_same_v<BOX, fir::cg::XEmboxOp>) {
1611 if (box.getAllocatorIdx())
1612 allocatorIdx = *box.getAllocatorIdx();
1613 }
1614
1615 // Write each of the fields with the appropriate values.
1616 // When emboxing an element to a polymorphic descriptor, use the
1617 // input type since the destination descriptor type has not the exact
1618 // information.
1619 auto [eleSize, cfiTy] = getSizeAndTypeCode(
1620 loc, rewriter, useInputType ? inputType : boxTy.getEleTy(), typeparams);
1621
1622 mlir::Value typeDesc;
1623 mlir::Value extraField;
1624 // When emboxing to a polymorphic box, get the type descriptor, type code
1625 // and element size from the source box if any.
1626 if (fir::isPolymorphicType(boxTy) && sourceBox) {
1627 TypePair sourceBoxTyPair = this->getBoxTypePair(sourceBoxType);
1628 typeDesc =
1629 this->loadTypeDescAddress(loc, sourceBoxTyPair, sourceBox, rewriter);
1630 mlir::Type idxTy = this->lowerTy().indexType();
1631 eleSize = this->getElementSizeFromBox(loc, idxTy, sourceBoxTyPair,
1632 sourceBox, rewriter);
1633 cfiTy = this->getValueFromBox(loc, sourceBoxTyPair, sourceBox,
1634 cfiTy.getType(), rewriter, kTypePosInBox);
1635 extraField =
1636 this->getExtraFromBox(loc, sourceBoxTyPair, sourceBox, rewriter);
1637 }
1638
1639 mlir::Value descriptor;
1640 if (auto gpuMod = box->template getParentOfType<mlir::gpu::GPUModuleOp>())
1641 descriptor = populateDescriptor(loc, gpuMod, boxTy, inputType, rewriter,
1642 rank, eleSize, cfiTy, typeDesc,
1643 allocatorIdx, extraField);
1644 else if (auto mod = box->template getParentOfType<mlir::ModuleOp>())
1645 descriptor = populateDescriptor(loc, mod, boxTy, inputType, rewriter,
1646 rank, eleSize, cfiTy, typeDesc,
1647 allocatorIdx, extraField);
1648
1649 return {boxTy, descriptor, eleSize};
1650 }
1651
1652 std::tuple<fir::BaseBoxType, mlir::Value, mlir::Value>
1653 consDescriptorPrefix(fir::cg::XReboxOp box, mlir::Value loweredBox,
1654 mlir::ConversionPatternRewriter &rewriter, unsigned rank,
1655 mlir::ValueRange substrParams,
1656 mlir::ValueRange lenParams,
1657 mlir::Value typeDesc = {}) const {
1658 auto loc = box.getLoc();
1659 auto boxTy = mlir::dyn_cast<fir::BaseBoxType>(box.getType());
1660 auto inputBoxTy = mlir::dyn_cast<fir::BaseBoxType>(box.getBox().getType());
1661 auto inputBoxTyPair = this->getBoxTypePair(inputBoxTy);
1662 llvm::SmallVector<mlir::Value> typeparams = lenParams;
1663 if (!box.getSubstr().empty() && fir::hasDynamicSize(boxTy.getEleTy()))
1664 typeparams.push_back(substrParams[1]);
1665
1666 auto [eleSize, cfiTy] =
1667 getSizeAndTypeCode(loc, rewriter, boxTy.getEleTy(), typeparams);
1668
1669 // Reboxing to a polymorphic entity. eleSize and type code need to
1670 // be retrieved from the initial box and propagated to the new box.
1671 // If the initial box has an addendum, the type desc must be propagated as
1672 // well.
1673 if (fir::isPolymorphicType(boxTy)) {
1674 mlir::Type idxTy = this->lowerTy().indexType();
1675 eleSize = this->getElementSizeFromBox(loc, idxTy, inputBoxTyPair,
1676 loweredBox, rewriter);
1677 cfiTy = this->getValueFromBox(loc, inputBoxTyPair, loweredBox,
1678 cfiTy.getType(), rewriter, kTypePosInBox);
1679 // TODO: For initial box that are unlimited polymorphic entities, this
1680 // code must be made conditional because unlimited polymorphic entities
1681 // with intrinsic type spec does not have addendum.
1682 if (fir::boxHasAddendum(inputBoxTy))
1683 typeDesc = this->loadTypeDescAddress(loc, inputBoxTyPair, loweredBox,
1684 rewriter);
1685 }
1686
1687 mlir::Value extraField =
1688 this->getExtraFromBox(loc, inputBoxTyPair, loweredBox, rewriter);
1689
1690 mlir::Value descriptor;
1691 if (auto gpuMod = box->template getParentOfType<mlir::gpu::GPUModuleOp>())
1692 descriptor =
1693 populateDescriptor(loc, gpuMod, boxTy, box.getBox().getType(),
1694 rewriter, rank, eleSize, cfiTy, typeDesc,
1695 /*allocatorIdx=*/kDefaultAllocator, extraField);
1696 else if (auto mod = box->template getParentOfType<mlir::ModuleOp>())
1697 descriptor =
1698 populateDescriptor(loc, mod, boxTy, box.getBox().getType(), rewriter,
1699 rank, eleSize, cfiTy, typeDesc,
1700 /*allocatorIdx=*/kDefaultAllocator, extraField);
1701
1702 return {boxTy, descriptor, eleSize};
1703 }
1704
1705 // Compute the base address of a fir.box given the indices from the slice.
1706 // The indices from the "outer" dimensions (every dimension after the first
1707 // one (included) that is not a compile time constant) must have been
1708 // multiplied with the related extents and added together into \p outerOffset.
1709 mlir::Value
1710 genBoxOffsetGep(mlir::ConversionPatternRewriter &rewriter, mlir::Location loc,
1711 mlir::Value base, mlir::Type llvmBaseObjectType,
1712 mlir::Value outerOffset, mlir::ValueRange cstInteriorIndices,
1713 mlir::ValueRange componentIndices,
1714 std::optional<mlir::Value> substringOffset) const {
1715 llvm::SmallVector<mlir::LLVM::GEPArg> gepArgs{outerOffset};
1716 mlir::Type resultTy = llvmBaseObjectType;
1717 // Fortran is column major, llvm GEP is row major: reverse the indices here.
1718 for (mlir::Value interiorIndex : llvm::reverse(cstInteriorIndices)) {
1719 auto arrayTy = mlir::dyn_cast<mlir::LLVM::LLVMArrayType>(resultTy);
1720 if (!arrayTy)
1721 fir::emitFatalError(
1722 loc,
1723 "corrupted GEP generated being generated in fir.embox/fir.rebox");
1724 resultTy = arrayTy.getElementType();
1725 gepArgs.push_back(interiorIndex);
1726 }
1727 llvm::SmallVector<mlir::Value> gepIndices =
1728 convertSubcomponentIndices(loc, resultTy, componentIndices, &resultTy);
1729 gepArgs.append(gepIndices.begin(), gepIndices.end());
1730 if (substringOffset) {
1731 if (auto arrayTy = mlir::dyn_cast<mlir::LLVM::LLVMArrayType>(resultTy)) {
1732 gepArgs.push_back(*substringOffset);
1733 resultTy = arrayTy.getElementType();
1734 } else {
1735 // If the CHARACTER length is dynamic, the whole base type should have
1736 // degenerated to an llvm.ptr<i[width]>, and there should not be any
1737 // cstInteriorIndices/componentIndices. The substring offset can be
1738 // added to the outterOffset since it applies on the same LLVM type.
1739 if (gepArgs.size() != 1)
1740 fir::emitFatalError(loc,
1741 "corrupted substring GEP in fir.embox/fir.rebox");
1742 mlir::Type outterOffsetTy =
1743 llvm::cast<mlir::Value>(gepArgs[0]).getType();
1744 mlir::Value cast =
1745 this->integerCast(loc, rewriter, outterOffsetTy, *substringOffset);
1746
1747 gepArgs[0] = rewriter.create<mlir::LLVM::AddOp>(
1748 loc, outterOffsetTy, llvm::cast<mlir::Value>(gepArgs[0]), cast);
1749 }
1750 }
1751 mlir::Type llvmPtrTy = ::getLlvmPtrType(resultTy.getContext());
1752 return rewriter.create<mlir::LLVM::GEPOp>(
1753 loc, llvmPtrTy, llvmBaseObjectType, base, gepArgs);
1754 }
1755
1756 template <typename BOX>
1757 void
1758 getSubcomponentIndices(BOX xbox, mlir::Value memref,
1759 mlir::ValueRange operands,
1760 mlir::SmallVectorImpl<mlir::Value> &indices) const {
1761 // For each field in the path add the offset to base via the args list.
1762 // In the most general case, some offsets must be computed since
1763 // they are not be known until runtime.
1764 if (fir::hasDynamicSize(fir::unwrapSequenceType(
1765 fir::unwrapPassByRefType(memref.getType()))))
1766 TODO(xbox.getLoc(),
1767 "fir.embox codegen dynamic size component in derived type");
1768 indices.append(operands.begin() + xbox.getSubcomponentOperandIndex(),
1769 operands.begin() + xbox.getSubcomponentOperandIndex() +
1770 xbox.getSubcomponent().size());
1771 }
1772
1773 static bool isInGlobalOp(mlir::ConversionPatternRewriter &rewriter) {
1774 auto *thisBlock = rewriter.getInsertionBlock();
1775 return thisBlock &&
1776 mlir::isa<mlir::LLVM::GlobalOp>(thisBlock->getParentOp());
1777 }
1778
1779 /// If the embox is not in a globalOp body, allocate storage for the box;
1780 /// store the value inside and return the generated alloca. Return the input
1781 /// value otherwise.
1782 mlir::Value
1783 placeInMemoryIfNotGlobalInit(mlir::ConversionPatternRewriter &rewriter,
1784 mlir::Location loc, mlir::Type boxTy,
1785 mlir::Value boxValue,
1786 bool needDeviceAllocation = false) const {
1787 if (isInGlobalOp(rewriter))
1788 return boxValue;
1789 mlir::Type llvmBoxTy = boxValue.getType();
1790 mlir::Value storage;
1791 if (needDeviceAllocation) {
1792 auto mod = boxValue.getDefiningOp()->getParentOfType<mlir::ModuleOp>();
1793 auto baseBoxTy = mlir::dyn_cast<fir::BaseBoxType>(boxTy);
1794 storage =
1795 genCUFAllocDescriptor(loc, rewriter, mod, baseBoxTy, this->lowerTy());
1796 } else {
1797 storage = this->genAllocaAndAddrCastWithType(loc, llvmBoxTy, defaultAlign,
1798 rewriter);
1799 }
1800 auto storeOp = rewriter.create<mlir::LLVM::StoreOp>(loc, boxValue, storage);
1801 this->attachTBAATag(storeOp, boxTy, boxTy, nullptr);
1802 return storage;
1803 }
1804
1805 /// Compute the extent of a triplet slice (lb:ub:step).
1806 mlir::Value computeTripletExtent(mlir::ConversionPatternRewriter &rewriter,
1807 mlir::Location loc, mlir::Value lb,
1808 mlir::Value ub, mlir::Value step,
1809 mlir::Value zero, mlir::Type type) const {
1810 lb = this->integerCast(loc, rewriter, type, lb);
1811 ub = this->integerCast(loc, rewriter, type, ub);
1812 step = this->integerCast(loc, rewriter, type, step);
1813 zero = this->integerCast(loc, rewriter, type, zero);
1814 mlir::Value extent = rewriter.create<mlir::LLVM::SubOp>(loc, type, ub, lb);
1815 extent = rewriter.create<mlir::LLVM::AddOp>(loc, type, extent, step);
1816 extent = rewriter.create<mlir::LLVM::SDivOp>(loc, type, extent, step);
1817 // If the resulting extent is negative (`ub-lb` and `step` have different
1818 // signs), zero must be returned instead.
1819 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>(
1820 loc, mlir::LLVM::ICmpPredicate::sgt, extent, zero);
1821 return rewriter.create<mlir::LLVM::SelectOp>(loc, cmp, extent, zero);
1822 }
1823};
1824
1825/// Create a generic box on a memory reference. This conversions lowers the
1826/// abstract box to the appropriate, initialized descriptor.
1827struct EmboxOpConversion : public EmboxCommonConversion<fir::EmboxOp> {
1828 using EmboxCommonConversion::EmboxCommonConversion;
1829
1830 llvm::LogicalResult
1831 matchAndRewrite(fir::EmboxOp embox, OpAdaptor adaptor,
1832 mlir::ConversionPatternRewriter &rewriter) const override {
1833 mlir::ValueRange operands = adaptor.getOperands();
1834 mlir::Value sourceBox;
1835 mlir::Type sourceBoxType;
1836 if (embox.getSourceBox()) {
1837 sourceBox = operands[embox.getSourceBoxOperandIndex()];
1838 sourceBoxType = embox.getSourceBox().getType();
1839 }
1840 assert(!embox.getShape() && "There should be no dims on this embox op");
1841 auto [boxTy, dest, eleSize] = consDescriptorPrefix(
1842 embox, fir::unwrapRefType(embox.getMemref().getType()), rewriter,
1843 /*rank=*/0, /*substrParams=*/mlir::ValueRange{},
1844 adaptor.getTypeparams(), sourceBox, sourceBoxType);
1845 dest = insertBaseAddress(rewriter, embox.getLoc(), dest, operands[0]);
1846 if (fir::isDerivedTypeWithLenParams(boxTy)) {
1847 TODO(embox.getLoc(),
1848 "fir.embox codegen of derived with length parameters");
1849 return mlir::failure();
1850 }
1851 auto result =
1852 placeInMemoryIfNotGlobalInit(rewriter, embox.getLoc(), boxTy, dest);
1853 rewriter.replaceOp(embox, result);
1854 return mlir::success();
1855 }
1856};
1857
1858static bool isDeviceAllocation(mlir::Value val, mlir::Value adaptorVal) {
1859 if (auto loadOp = mlir::dyn_cast_or_null<fir::LoadOp>(val.getDefiningOp()))
1860 return isDeviceAllocation(loadOp.getMemref(), {});
1861 if (auto boxAddrOp =
1862 mlir::dyn_cast_or_null<fir::BoxAddrOp>(val.getDefiningOp()))
1863 return isDeviceAllocation(boxAddrOp.getVal(), {});
1864 if (auto convertOp =
1865 mlir::dyn_cast_or_null<fir::ConvertOp>(val.getDefiningOp()))
1866 return isDeviceAllocation(convertOp.getValue(), {});
1867 if (!val.getDefiningOp() && adaptorVal) {
1868 if (auto blockArg = llvm::cast<mlir::BlockArgument>(adaptorVal)) {
1869 if (blockArg.getOwner() && blockArg.getOwner()->getParentOp() &&
1870 blockArg.getOwner()->isEntryBlock()) {
1871 if (auto func = mlir::dyn_cast_or_null<mlir::FunctionOpInterface>(
1872 *blockArg.getOwner()->getParentOp())) {
1873 auto argAttrs = func.getArgAttrs(blockArg.getArgNumber());
1874 for (auto attr : argAttrs) {
1875 if (attr.getName().getValue().ends_with(cuf::getDataAttrName())) {
1876 auto dataAttr =
1877 mlir::dyn_cast<cuf::DataAttributeAttr>(attr.getValue());
1878 if (dataAttr.getValue() != cuf::DataAttribute::Pinned &&
1879 dataAttr.getValue() != cuf::DataAttribute::Unified)
1880 return true;
1881 }
1882 }
1883 }
1884 }
1885 }
1886 }
1887 if (auto callOp = mlir::dyn_cast_or_null<fir::CallOp>(val.getDefiningOp()))
1888 if (callOp.getCallee() &&
1889 (callOp.getCallee().value().getRootReference().getValue().starts_with(
1890 RTNAME_STRING(CUFMemAlloc)) ||
1891 callOp.getCallee().value().getRootReference().getValue().starts_with(
1892 RTNAME_STRING(CUFAllocDescriptor)) ||
1893 callOp.getCallee().value().getRootReference().getValue() ==
1894 "__tgt_acc_get_deviceptr"))
1895 return true;
1896 return false;
1897}
1898
1899/// Create a generic box on a memory reference.
1900struct XEmboxOpConversion : public EmboxCommonConversion<fir::cg::XEmboxOp> {
1901 using EmboxCommonConversion::EmboxCommonConversion;
1902
1903 llvm::LogicalResult
1904 matchAndRewrite(fir::cg::XEmboxOp xbox, OpAdaptor adaptor,
1905 mlir::ConversionPatternRewriter &rewriter) const override {
1906 mlir::ValueRange operands = adaptor.getOperands();
1907 mlir::Value sourceBox;
1908 mlir::Type sourceBoxType;
1909 if (xbox.getSourceBox()) {
1910 sourceBox = operands[xbox.getSourceBoxOperandIndex()];
1911 sourceBoxType = xbox.getSourceBox().getType();
1912 }
1913 auto [boxTy, dest, resultEleSize] = consDescriptorPrefix(
1914 xbox, fir::unwrapRefType(xbox.getMemref().getType()), rewriter,
1915 xbox.getOutRank(), adaptor.getSubstr(), adaptor.getLenParams(),
1916 sourceBox, sourceBoxType);
1917 // Generate the triples in the dims field of the descriptor
1918 auto i64Ty = mlir::IntegerType::get(xbox.getContext(), 64);
1919 assert(!xbox.getShape().empty() && "must have a shape");
1920 unsigned shapeOffset = xbox.getShapeOperandIndex();
1921 bool hasShift = !xbox.getShift().empty();
1922 unsigned shiftOffset = xbox.getShiftOperandIndex();
1923 bool hasSlice = !xbox.getSlice().empty();
1924 unsigned sliceOffset = xbox.getSliceOperandIndex();
1925 mlir::Location loc = xbox.getLoc();
1926 mlir::Value zero = genConstantIndex(loc, i64Ty, rewriter, 0);
1927 mlir::Value one = genConstantIndex(loc, i64Ty, rewriter, 1);
1928 mlir::Value prevPtrOff = one;
1929 mlir::Type eleTy = boxTy.getEleTy();
1930 const unsigned rank = xbox.getRank();
1931 llvm::SmallVector<mlir::Value> cstInteriorIndices;
1932 unsigned constRows = 0;
1933 mlir::Value ptrOffset = zero;
1934 mlir::Type memEleTy = fir::dyn_cast_ptrEleTy(xbox.getMemref().getType());
1935 assert(mlir::isa<fir::SequenceType>(memEleTy));
1936 auto seqTy = mlir::cast<fir::SequenceType>(memEleTy);
1937 mlir::Type seqEleTy = seqTy.getEleTy();
1938 // Adjust the element scaling factor if the element is a dependent type.
1939 if (fir::hasDynamicSize(seqEleTy)) {
1940 if (auto charTy = mlir::dyn_cast<fir::CharacterType>(seqEleTy)) {
1941 // The GEP pointer type decays to llvm.ptr<i[width]>.
1942 // The scaling factor is the runtime value of the length.
1943 assert(!adaptor.getLenParams().empty());
1944 prevPtrOff = FIROpConversion::integerCast(
1945 loc, rewriter, i64Ty, adaptor.getLenParams().back());
1946 } else if (mlir::isa<fir::RecordType>(seqEleTy)) {
1947 // prevPtrOff = ;
1948 TODO(loc, "generate call to calculate size of PDT");
1949 } else {
1950 fir::emitFatalError(loc, "unexpected dynamic type");
1951 }
1952 } else {
1953 constRows = seqTy.getConstantRows();
1954 }
1955
1956 const auto hasSubcomp = !xbox.getSubcomponent().empty();
1957 const bool hasSubstr = !xbox.getSubstr().empty();
1958 // Initial element stride that will be use to compute the step in
1959 // each dimension. Initially, this is the size of the input element.
1960 // Note that when there are no components/substring, the resultEleSize
1961 // that was previously computed matches the input element size.
1962 mlir::Value prevDimByteStride = resultEleSize;
1963 if (hasSubcomp) {
1964 // We have a subcomponent. The step value needs to be the number of
1965 // bytes per element (which is a derived type).
1966 prevDimByteStride = genTypeStrideInBytes(
1967 loc, i64Ty, rewriter, convertType(seqEleTy), getDataLayout());
1968 } else if (hasSubstr) {
1969 // We have a substring. The step value needs to be the number of bytes
1970 // per CHARACTER element.
1971 auto charTy = mlir::cast<fir::CharacterType>(seqEleTy);
1972 if (fir::hasDynamicSize(charTy)) {
1973 prevDimByteStride =
1974 getCharacterByteSize(loc, rewriter, charTy, adaptor.getLenParams());
1975 } else {
1976 prevDimByteStride = genConstantIndex(
1977 loc, i64Ty, rewriter,
1978 charTy.getLen() * lowerTy().characterBitsize(charTy) / 8);
1979 }
1980 }
1981
1982 // Process the array subspace arguments (shape, shift, etc.), if any,
1983 // translating everything to values in the descriptor wherever the entity
1984 // has a dynamic array dimension.
1985 for (unsigned di = 0, descIdx = 0; di < rank; ++di) {
1986 mlir::Value extent =
1987 integerCast(loc, rewriter, i64Ty, operands[shapeOffset]);
1988 mlir::Value outerExtent = extent;
1989 bool skipNext = false;
1990 if (hasSlice) {
1991 mlir::Value off =
1992 integerCast(loc, rewriter, i64Ty, operands[sliceOffset]);
1993 mlir::Value adj = one;
1994 if (hasShift)
1995 adj = integerCast(loc, rewriter, i64Ty, operands[shiftOffset]);
1996 auto ao = rewriter.create<mlir::LLVM::SubOp>(loc, i64Ty, off, adj);
1997 if (constRows > 0) {
1998 cstInteriorIndices.push_back(ao);
1999 } else {
2000 auto dimOff =
2001 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, ao, prevPtrOff);
2002 ptrOffset =
2003 rewriter.create<mlir::LLVM::AddOp>(loc, i64Ty, dimOff, ptrOffset);
2004 }
2005 if (mlir::isa_and_nonnull<fir::UndefOp>(
2006 xbox.getSlice()[3 * di + 1].getDefiningOp())) {
2007 // This dimension contains a scalar expression in the array slice op.
2008 // The dimension is loop invariant, will be dropped, and will not
2009 // appear in the descriptor.
2010 skipNext = true;
2011 }
2012 }
2013 if (!skipNext) {
2014 // store extent
2015 if (hasSlice)
2016 extent = computeTripletExtent(rewriter, loc, operands[sliceOffset],
2017 operands[sliceOffset + 1],
2018 operands[sliceOffset + 2], zero, i64Ty);
2019 // Lower bound is normalized to 0 for BIND(C) interoperability.
2020 mlir::Value lb = zero;
2021 const bool isaPointerOrAllocatable =
2022 mlir::isa<fir::PointerType, fir::HeapType>(eleTy);
2023 // Lower bound is defaults to 1 for POINTER, ALLOCATABLE, and
2024 // denormalized descriptors.
2025 if (isaPointerOrAllocatable || !normalizedLowerBound(xbox))
2026 lb = one;
2027 // If there is a shifted origin, and no fir.slice, and this is not
2028 // a normalized descriptor then use the value from the shift op as
2029 // the lower bound.
2030 if (hasShift && !(hasSlice || hasSubcomp || hasSubstr) &&
2031 (isaPointerOrAllocatable || !normalizedLowerBound(xbox))) {
2032 lb = integerCast(loc, rewriter, i64Ty, operands[shiftOffset]);
2033 auto extentIsEmpty = rewriter.create<mlir::LLVM::ICmpOp>(
2034 loc, mlir::LLVM::ICmpPredicate::eq, extent, zero);
2035 lb = rewriter.create<mlir::LLVM::SelectOp>(loc, extentIsEmpty, one,
2036 lb);
2037 }
2038 dest = insertLowerBound(rewriter, loc, dest, descIdx, lb);
2039
2040 dest = insertExtent(rewriter, loc, dest, descIdx, extent);
2041
2042 // store step (scaled by shaped extent)
2043 mlir::Value step = prevDimByteStride;
2044 if (hasSlice) {
2045 mlir::Value sliceStep =
2046 integerCast(loc, rewriter, i64Ty, operands[sliceOffset + 2]);
2047 step =
2048 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, step, sliceStep);
2049 }
2050 dest = insertStride(rewriter, loc, dest, descIdx, step);
2051 ++descIdx;
2052 }
2053
2054 // compute the stride and offset for the next natural dimension
2055 prevDimByteStride = rewriter.create<mlir::LLVM::MulOp>(
2056 loc, i64Ty, prevDimByteStride, outerExtent);
2057 if (constRows == 0)
2058 prevPtrOff = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, prevPtrOff,
2059 outerExtent);
2060 else
2061 --constRows;
2062
2063 // increment iterators
2064 ++shapeOffset;
2065 if (hasShift)
2066 ++shiftOffset;
2067 if (hasSlice)
2068 sliceOffset += 3;
2069 }
2070 mlir::Value base = adaptor.getMemref();
2071 if (hasSlice || hasSubcomp || hasSubstr) {
2072 // Shift the base address.
2073 llvm::SmallVector<mlir::Value> fieldIndices;
2074 std::optional<mlir::Value> substringOffset;
2075 if (hasSubcomp)
2076 getSubcomponentIndices(xbox, xbox.getMemref(), operands, fieldIndices);
2077 if (hasSubstr)
2078 substringOffset = operands[xbox.getSubstrOperandIndex()];
2079 mlir::Type llvmBaseType =
2080 convertType(fir::unwrapRefType(xbox.getMemref().getType()));
2081 base = genBoxOffsetGep(rewriter, loc, base, llvmBaseType, ptrOffset,
2082 cstInteriorIndices, fieldIndices, substringOffset);
2083 }
2084 dest = insertBaseAddress(rewriter, loc, dest, base);
2085 if (fir::isDerivedTypeWithLenParams(boxTy))
2086 TODO(loc, "fir.embox codegen of derived with length parameters");
2087 mlir::Value result = placeInMemoryIfNotGlobalInit(
2088 rewriter, loc, boxTy, dest,
2089 isDeviceAllocation(xbox.getMemref(), adaptor.getMemref()));
2090 rewriter.replaceOp(xbox, result);
2091 return mlir::success();
2092 }
2093
2094 /// Return true if `xbox` has a normalized lower bounds attribute. A box value
2095 /// that is neither a POINTER nor an ALLOCATABLE should be normalized to a
2096 /// zero origin lower bound for interoperability with BIND(C).
2097 inline static bool normalizedLowerBound(fir::cg::XEmboxOp xbox) {
2098 return xbox->hasAttr(fir::getNormalizedLowerBoundAttrName());
2099 }
2100};
2101
2102/// Create a new box given a box reference.
2103struct XReboxOpConversion : public EmboxCommonConversion<fir::cg::XReboxOp> {
2104 using EmboxCommonConversion::EmboxCommonConversion;
2105
2106 llvm::LogicalResult
2107 matchAndRewrite(fir::cg::XReboxOp rebox, OpAdaptor adaptor,
2108 mlir::ConversionPatternRewriter &rewriter) const override {
2109 mlir::Location loc = rebox.getLoc();
2110 mlir::Type idxTy = lowerTy().indexType();
2111 mlir::Value loweredBox = adaptor.getOperands()[0];
2112 mlir::ValueRange operands = adaptor.getOperands();
2113
2114 // Inside a fir.global, the input box was produced as an llvm.struct<>
2115 // because objects cannot be handled in memory inside a fir.global body that
2116 // must be constant foldable. However, the type translation are not
2117 // contextual, so the fir.box<T> type of the operation that produced the
2118 // fir.box was translated to an llvm.ptr<llvm.struct<>> and the MLIR pass
2119 // manager inserted a builtin.unrealized_conversion_cast that was inserted
2120 // and needs to be removed here.
2121 if (isInGlobalOp(rewriter))
2122 if (auto unrealizedCast =
2123 loweredBox.getDefiningOp<mlir::UnrealizedConversionCastOp>())
2124 loweredBox = unrealizedCast.getInputs()[0];
2125
2126 TypePair inputBoxTyPair = getBoxTypePair(rebox.getBox().getType());
2127
2128 // Create new descriptor and fill its non-shape related data.
2129 llvm::SmallVector<mlir::Value, 2> lenParams;
2130 mlir::Type inputEleTy = getInputEleTy(rebox);
2131 if (auto charTy = mlir::dyn_cast<fir::CharacterType>(inputEleTy)) {
2132 if (charTy.hasConstantLen()) {
2133 mlir::Value len =
2134 genConstantIndex(loc, idxTy, rewriter, charTy.getLen());
2135 lenParams.emplace_back(len);
2136 } else {
2137 mlir::Value len = getElementSizeFromBox(loc, idxTy, inputBoxTyPair,
2138 loweredBox, rewriter);
2139 if (charTy.getFKind() != 1) {
2140 assert(!isInGlobalOp(rewriter) &&
2141 "character target in global op must have constant length");
2142 mlir::Value width =
2143 genConstantIndex(loc, idxTy, rewriter, charTy.getFKind());
2144 len = rewriter.create<mlir::LLVM::SDivOp>(loc, idxTy, len, width);
2145 }
2146 lenParams.emplace_back(len);
2147 }
2148 } else if (auto recTy = mlir::dyn_cast<fir::RecordType>(inputEleTy)) {
2149 if (recTy.getNumLenParams() != 0)
2150 TODO(loc, "reboxing descriptor of derived type with length parameters");
2151 }
2152
2153 // Rebox on polymorphic entities needs to carry over the dynamic type.
2154 mlir::Value typeDescAddr;
2155 if (mlir::isa<fir::ClassType>(inputBoxTyPair.fir) &&
2156 mlir::isa<fir::ClassType>(rebox.getType()))
2157 typeDescAddr =
2158 loadTypeDescAddress(loc, inputBoxTyPair, loweredBox, rewriter);
2159
2160 auto [boxTy, dest, eleSize] =
2161 consDescriptorPrefix(rebox, loweredBox, rewriter, rebox.getOutRank(),
2162 adaptor.getSubstr(), lenParams, typeDescAddr);
2163
2164 // Read input extents, strides, and base address
2165 llvm::SmallVector<mlir::Value> inputExtents;
2166 llvm::SmallVector<mlir::Value> inputStrides;
2167 const unsigned inputRank = rebox.getRank();
2168 for (unsigned dim = 0; dim < inputRank; ++dim) {
2169 llvm::SmallVector<mlir::Value, 3> dimInfo =
2170 getDimsFromBox(loc, {idxTy, idxTy, idxTy}, inputBoxTyPair, loweredBox,
2171 dim, rewriter);
2172 inputExtents.emplace_back(dimInfo[1]);
2173 inputStrides.emplace_back(dimInfo[2]);
2174 }
2175
2176 mlir::Value baseAddr =
2177 getBaseAddrFromBox(loc, inputBoxTyPair, loweredBox, rewriter);
2178
2179 if (!rebox.getSlice().empty() || !rebox.getSubcomponent().empty())
2180 return sliceBox(rebox, adaptor, boxTy, dest, baseAddr, inputExtents,
2181 inputStrides, operands, rewriter);
2182 return reshapeBox(rebox, adaptor, boxTy, dest, baseAddr, inputExtents,
2183 inputStrides, operands, rewriter);
2184 }
2185
2186private:
2187 /// Write resulting shape and base address in descriptor, and replace rebox
2188 /// op.
2189 llvm::LogicalResult
2190 finalizeRebox(fir::cg::XReboxOp rebox, OpAdaptor adaptor,
2191 mlir::Type destBoxTy, mlir::Value dest, mlir::Value base,
2192 mlir::ValueRange lbounds, mlir::ValueRange extents,
2193 mlir::ValueRange strides,
2194 mlir::ConversionPatternRewriter &rewriter) const {
2195 mlir::Location loc = rebox.getLoc();
2196 mlir::Value zero =
2197 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0);
2198 mlir::Value one = genConstantIndex(loc, lowerTy().indexType(), rewriter, 1);
2199 for (auto iter : llvm::enumerate(llvm::zip(extents, strides))) {
2200 mlir::Value extent = std::get<0>(iter.value());
2201 unsigned dim = iter.index();
2202 mlir::Value lb = one;
2203 if (!lbounds.empty()) {
2204 lb = integerCast(loc, rewriter, lowerTy().indexType(), lbounds[dim]);
2205 auto extentIsEmpty = rewriter.create<mlir::LLVM::ICmpOp>(
2206 loc, mlir::LLVM::ICmpPredicate::eq, extent, zero);
2207 lb = rewriter.create<mlir::LLVM::SelectOp>(loc, extentIsEmpty, one, lb);
2208 };
2209 dest = insertLowerBound(rewriter, loc, dest, dim, lb);
2210 dest = insertExtent(rewriter, loc, dest, dim, extent);
2211 dest = insertStride(rewriter, loc, dest, dim, std::get<1>(iter.value()));
2212 }
2213 dest = insertBaseAddress(rewriter, loc, dest, base);
2214 mlir::Value result = placeInMemoryIfNotGlobalInit(
2215 rewriter, rebox.getLoc(), destBoxTy, dest,
2216 isDeviceAllocation(rebox.getBox(), adaptor.getBox()));
2217 rewriter.replaceOp(rebox, result);
2218 return mlir::success();
2219 }
2220
2221 // Apply slice given the base address, extents and strides of the input box.
2222 llvm::LogicalResult
2223 sliceBox(fir::cg::XReboxOp rebox, OpAdaptor adaptor, mlir::Type destBoxTy,
2224 mlir::Value dest, mlir::Value base, mlir::ValueRange inputExtents,
2225 mlir::ValueRange inputStrides, mlir::ValueRange operands,
2226 mlir::ConversionPatternRewriter &rewriter) const {
2227 mlir::Location loc = rebox.getLoc();
2228 mlir::Type byteTy = ::getI8Type(rebox.getContext());
2229 mlir::Type idxTy = lowerTy().indexType();
2230 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0);
2231 // Apply subcomponent and substring shift on base address.
2232 if (!rebox.getSubcomponent().empty() || !rebox.getSubstr().empty()) {
2233 // Cast to inputEleTy* so that a GEP can be used.
2234 mlir::Type inputEleTy = getInputEleTy(rebox);
2235 mlir::Type llvmBaseObjectType = convertType(inputEleTy);
2236 llvm::SmallVector<mlir::Value> fieldIndices;
2237 std::optional<mlir::Value> substringOffset;
2238 if (!rebox.getSubcomponent().empty())
2239 getSubcomponentIndices(rebox, rebox.getBox(), operands, fieldIndices);
2240 if (!rebox.getSubstr().empty())
2241 substringOffset = operands[rebox.getSubstrOperandIndex()];
2242 base = genBoxOffsetGep(rewriter, loc, base, llvmBaseObjectType, zero,
2243 /*cstInteriorIndices=*/{}, fieldIndices,
2244 substringOffset);
2245 }
2246
2247 if (rebox.getSlice().empty())
2248 // The array section is of the form array[%component][substring], keep
2249 // the input array extents and strides.
2250 return finalizeRebox(rebox, adaptor, destBoxTy, dest, base,
2251 /*lbounds*/ {}, inputExtents, inputStrides,
2252 rewriter);
2253
2254 // The slice is of the form array(i:j:k)[%component]. Compute new extents
2255 // and strides.
2256 llvm::SmallVector<mlir::Value> slicedExtents;
2257 llvm::SmallVector<mlir::Value> slicedStrides;
2258 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1);
2259 const bool sliceHasOrigins = !rebox.getShift().empty();
2260 unsigned sliceOps = rebox.getSliceOperandIndex();
2261 unsigned shiftOps = rebox.getShiftOperandIndex();
2262 auto strideOps = inputStrides.begin();
2263 const unsigned inputRank = inputStrides.size();
2264 for (unsigned i = 0; i < inputRank;
2265 ++i, ++strideOps, ++shiftOps, sliceOps += 3) {
2266 mlir::Value sliceLb =
2267 integerCast(loc, rewriter, idxTy, operands[sliceOps]);
2268 mlir::Value inputStride = *strideOps; // already idxTy
2269 // Apply origin shift: base += (lb-shift)*input_stride
2270 mlir::Value sliceOrigin =
2271 sliceHasOrigins
2272 ? integerCast(loc, rewriter, idxTy, operands[shiftOps])
2273 : one;
2274 mlir::Value diff =
2275 rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, sliceOrigin);
2276 mlir::Value offset =
2277 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, inputStride);
2278 // Strides from the fir.box are in bytes.
2279 base = genGEP(loc, byteTy, rewriter, base, offset);
2280 // Apply upper bound and step if this is a triplet. Otherwise, the
2281 // dimension is dropped and no extents/strides are computed.
2282 mlir::Value upper = operands[sliceOps + 1];
2283 const bool isTripletSlice =
2284 !mlir::isa_and_nonnull<mlir::LLVM::UndefOp>(upper.getDefiningOp());
2285 if (isTripletSlice) {
2286 mlir::Value step =
2287 integerCast(loc, rewriter, idxTy, operands[sliceOps + 2]);
2288 // extent = ub-lb+step/step
2289 mlir::Value sliceUb = integerCast(loc, rewriter, idxTy, upper);
2290 mlir::Value extent = computeTripletExtent(rewriter, loc, sliceLb,
2291 sliceUb, step, zero, idxTy);
2292 slicedExtents.emplace_back(extent);
2293 // stride = step*input_stride
2294 mlir::Value stride =
2295 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, step, inputStride);
2296 slicedStrides.emplace_back(stride);
2297 }
2298 }
2299 return finalizeRebox(rebox, adaptor, destBoxTy, dest, base,
2300 /*lbounds*/ {}, slicedExtents, slicedStrides,
2301 rewriter);
2302 }
2303
2304 /// Apply a new shape to the data described by a box given the base address,
2305 /// extents and strides of the box.
2306 llvm::LogicalResult
2307 reshapeBox(fir::cg::XReboxOp rebox, OpAdaptor adaptor, mlir::Type destBoxTy,
2308 mlir::Value dest, mlir::Value base, mlir::ValueRange inputExtents,
2309 mlir::ValueRange inputStrides, mlir::ValueRange operands,
2310 mlir::ConversionPatternRewriter &rewriter) const {
2311 mlir::ValueRange reboxShifts{
2312 operands.begin() + rebox.getShiftOperandIndex(),
2313 operands.begin() + rebox.getShiftOperandIndex() +
2314 rebox.getShift().size()};
2315 if (rebox.getShape().empty()) {
2316 // Only setting new lower bounds.
2317 return finalizeRebox(rebox, adaptor, destBoxTy, dest, base, reboxShifts,
2318 inputExtents, inputStrides, rewriter);
2319 }
2320
2321 mlir::Location loc = rebox.getLoc();
2322
2323 llvm::SmallVector<mlir::Value> newStrides;
2324 llvm::SmallVector<mlir::Value> newExtents;
2325 mlir::Type idxTy = lowerTy().indexType();
2326 // First stride from input box is kept. The rest is assumed contiguous
2327 // (it is not possible to reshape otherwise). If the input is scalar,
2328 // which may be OK if all new extents are ones, the stride does not
2329 // matter, use one.
2330 mlir::Value stride = inputStrides.empty()
2331 ? genConstantIndex(loc, idxTy, rewriter, 1)
2332 : inputStrides[0];
2333 for (unsigned i = 0; i < rebox.getShape().size(); ++i) {
2334 mlir::Value rawExtent = operands[rebox.getShapeOperandIndex() + i];
2335 mlir::Value extent = integerCast(loc, rewriter, idxTy, rawExtent);
2336 newExtents.emplace_back(extent);
2337 newStrides.emplace_back(stride);
2338 // nextStride = extent * stride;
2339 stride = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, extent, stride);
2340 }
2341 return finalizeRebox(rebox, adaptor, destBoxTy, dest, base, reboxShifts,
2342 newExtents, newStrides, rewriter);
2343 }
2344
2345 /// Return scalar element type of the input box.
2346 static mlir::Type getInputEleTy(fir::cg::XReboxOp rebox) {
2347 auto ty = fir::dyn_cast_ptrOrBoxEleTy(rebox.getBox().getType());
2348 if (auto seqTy = mlir::dyn_cast<fir::SequenceType>(ty))
2349 return seqTy.getEleTy();
2350 return ty;
2351 }
2352};
2353
2354/// Lower `fir.emboxproc` operation. Creates a procedure box.
2355/// TODO: Part of supporting Fortran 2003 procedure pointers.
2356struct EmboxProcOpConversion : public fir::FIROpConversion<fir::EmboxProcOp> {
2357 using FIROpConversion::FIROpConversion;
2358
2359 llvm::LogicalResult
2360 matchAndRewrite(fir::EmboxProcOp emboxproc, OpAdaptor adaptor,
2361 mlir::ConversionPatternRewriter &rewriter) const override {
2362 TODO(emboxproc.getLoc(), "fir.emboxproc codegen");
2363 return mlir::failure();
2364 }
2365};
2366
2367// Code shared between insert_value and extract_value Ops.
2368struct ValueOpCommon {
2369 // Translate the arguments pertaining to any multidimensional array to
2370 // row-major order for LLVM-IR.
2371 static void toRowMajor(llvm::SmallVectorImpl<int64_t> &indices,
2372 mlir::Type ty) {
2373 assert(ty && "type is null");
2374 const auto end = indices.size();
2375 for (std::remove_const_t<decltype(end)> i = 0; i < end; ++i) {
2376 if (auto seq = mlir::dyn_cast<mlir::LLVM::LLVMArrayType>(ty)) {
2377 const auto dim = getDimension(seq);
2378 if (dim > 1) {
2379 auto ub = std::min(i + dim, end);
2380 std::reverse(indices.begin() + i, indices.begin() + ub);
2381 i += dim - 1;
2382 }
2383 ty = getArrayElementType(seq);
2384 } else if (auto st = mlir::dyn_cast<mlir::LLVM::LLVMStructType>(ty)) {
2385 ty = st.getBody()[indices[i]];
2386 } else {
2387 llvm_unreachable("index into invalid type");
2388 }
2389 }
2390 }
2391
2392 static llvm::SmallVector<int64_t>
2393 collectIndices(mlir::ConversionPatternRewriter &rewriter,
2394 mlir::ArrayAttr arrAttr) {
2395 llvm::SmallVector<int64_t> indices;
2396 for (auto i = arrAttr.begin(), e = arrAttr.end(); i != e; ++i) {
2397 if (auto intAttr = mlir::dyn_cast<mlir::IntegerAttr>(*i)) {
2398 indices.push_back(Elt: intAttr.getInt());
2399 } else {
2400 auto fieldName = mlir::cast<mlir::StringAttr>(*i).getValue();
2401 ++i;
2402 auto ty = mlir::cast<mlir::TypeAttr>(*i).getValue();
2403 auto index = mlir::cast<fir::RecordType>(ty).getFieldIndex(fieldName);
2404 indices.push_back(Elt: index);
2405 }
2406 }
2407 return indices;
2408 }
2409
2410private:
2411 static mlir::Type getArrayElementType(mlir::LLVM::LLVMArrayType ty) {
2412 auto eleTy = ty.getElementType();
2413 while (auto arrTy = mlir::dyn_cast<mlir::LLVM::LLVMArrayType>(eleTy))
2414 eleTy = arrTy.getElementType();
2415 return eleTy;
2416 }
2417};
2418
2419namespace {
2420/// Extract a subobject value from an ssa-value of aggregate type
2421struct ExtractValueOpConversion
2422 : public fir::FIROpAndTypeConversion<fir::ExtractValueOp>,
2423 public ValueOpCommon {
2424 using FIROpAndTypeConversion::FIROpAndTypeConversion;
2425
2426 llvm::LogicalResult
2427 doRewrite(fir::ExtractValueOp extractVal, mlir::Type ty, OpAdaptor adaptor,
2428 mlir::ConversionPatternRewriter &rewriter) const override {
2429 mlir::ValueRange operands = adaptor.getOperands();
2430 auto indices = collectIndices(rewriter, extractVal.getCoor());
2431 toRowMajor(indices, operands[0].getType());
2432 rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>(
2433 extractVal, operands[0], indices);
2434 return mlir::success();
2435 }
2436};
2437
2438/// InsertValue is the generalized instruction for the composition of new
2439/// aggregate type values.
2440struct InsertValueOpConversion
2441 : public mlir::OpConversionPattern<fir::InsertValueOp>,
2442 public ValueOpCommon {
2443 using OpConversionPattern::OpConversionPattern;
2444
2445 llvm::LogicalResult
2446 matchAndRewrite(fir::InsertValueOp insertVal, OpAdaptor adaptor,
2447 mlir::ConversionPatternRewriter &rewriter) const override {
2448 mlir::ValueRange operands = adaptor.getOperands();
2449 auto indices = collectIndices(rewriter, insertVal.getCoor());
2450 toRowMajor(indices, operands[0].getType());
2451 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(
2452 insertVal, operands[0], operands[1], indices);
2453 return mlir::success();
2454 }
2455};
2456
2457/// InsertOnRange inserts a value into a sequence over a range of offsets.
2458struct InsertOnRangeOpConversion
2459 : public fir::FIROpAndTypeConversion<fir::InsertOnRangeOp> {
2460 using FIROpAndTypeConversion::FIROpAndTypeConversion;
2461
2462 // Increments an array of subscripts in a row major fasion.
2463 void incrementSubscripts(llvm::ArrayRef<int64_t> dims,
2464 llvm::SmallVectorImpl<int64_t> &subscripts) const {
2465 for (size_t i = dims.size(); i > 0; --i) {
2466 if (++subscripts[i - 1] < dims[i - 1]) {
2467 return;
2468 }
2469 subscripts[i - 1] = 0;
2470 }
2471 }
2472
2473 llvm::LogicalResult
2474 doRewrite(fir::InsertOnRangeOp range, mlir::Type ty, OpAdaptor adaptor,
2475 mlir::ConversionPatternRewriter &rewriter) const override {
2476
2477 auto arrayType = adaptor.getSeq().getType();
2478
2479 // Iteratively extract the array dimensions from the type.
2480 llvm::SmallVector<std::int64_t> dims;
2481 mlir::Type type = arrayType;
2482 while (auto t = mlir::dyn_cast<mlir::LLVM::LLVMArrayType>(type)) {
2483 dims.push_back(Elt: t.getNumElements());
2484 type = t.getElementType();
2485 }
2486
2487 // Avoid generating long insert chain that are very slow to fold back
2488 // (which is required in globals when later generating LLVM IR). Attempt to
2489 // fold the inserted element value to an attribute and build an ArrayAttr
2490 // for the resulting array.
2491 if (range.isFullRange()) {
2492 llvm::FailureOr<mlir::Attribute> cst =
2493 fir::tryFoldingLLVMInsertChain(adaptor.getVal(), rewriter);
2494 if (llvm::succeeded(cst)) {
2495 mlir::Attribute dimVal = *cst;
2496 for (auto dim : llvm::reverse(C&: dims)) {
2497 // Use std::vector in case the number of elements is big.
2498 std::vector<mlir::Attribute> elements(dim, dimVal);
2499 dimVal = mlir::ArrayAttr::get(range.getContext(), elements);
2500 }
2501 // Replace insert chain with constant.
2502 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>(range, arrayType,
2503 dimVal);
2504 return mlir::success();
2505 }
2506 }
2507
2508 // The inserted value cannot be folded to an attribute, turn the
2509 // insert_range into an llvm.insertvalue chain.
2510 llvm::SmallVector<std::int64_t> lBounds;
2511 llvm::SmallVector<std::int64_t> uBounds;
2512
2513 // Unzip the upper and lower bound and convert to a row major format.
2514 mlir::DenseIntElementsAttr coor = range.getCoor();
2515 auto reversedCoor = llvm::reverse(coor.getValues<int64_t>());
2516 for (auto i = reversedCoor.begin(), e = reversedCoor.end(); i != e; ++i) {
2517 uBounds.push_back(Elt: *i++);
2518 lBounds.push_back(Elt: *i);
2519 }
2520
2521 auto &subscripts = lBounds;
2522 auto loc = range.getLoc();
2523 mlir::Value lastOp = adaptor.getSeq();
2524 mlir::Value insertVal = adaptor.getVal();
2525
2526 while (subscripts != uBounds) {
2527 lastOp = rewriter.create<mlir::LLVM::InsertValueOp>(
2528 loc, lastOp, insertVal, subscripts);
2529
2530 incrementSubscripts(dims, subscripts);
2531 }
2532
2533 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(
2534 range, lastOp, insertVal, subscripts);
2535
2536 return mlir::success();
2537 }
2538};
2539} // namespace
2540
2541namespace {
2542/// XArrayCoor is the address arithmetic on a dynamically shaped, sliced,
2543/// shifted etc. array.
2544/// (See the static restriction on coordinate_of.) array_coor determines the
2545/// coordinate (location) of a specific element.
2546struct XArrayCoorOpConversion
2547 : public fir::FIROpAndTypeConversion<fir::cg::XArrayCoorOp> {
2548 using FIROpAndTypeConversion::FIROpAndTypeConversion;
2549
2550 llvm::LogicalResult
2551 doRewrite(fir::cg::XArrayCoorOp coor, mlir::Type llvmPtrTy, OpAdaptor adaptor,
2552 mlir::ConversionPatternRewriter &rewriter) const override {
2553 auto loc = coor.getLoc();
2554 mlir::ValueRange operands = adaptor.getOperands();
2555 unsigned rank = coor.getRank();
2556 assert(coor.getIndices().size() == rank);
2557 assert(coor.getShape().empty() || coor.getShape().size() == rank);
2558 assert(coor.getShift().empty() || coor.getShift().size() == rank);
2559 assert(coor.getSlice().empty() || coor.getSlice().size() == 3 * rank);
2560 mlir::Type idxTy = lowerTy().indexType();
2561 unsigned indexOffset = coor.getIndicesOperandIndex();
2562 unsigned shapeOffset = coor.getShapeOperandIndex();
2563 unsigned shiftOffset = coor.getShiftOperandIndex();
2564 unsigned sliceOffset = coor.getSliceOperandIndex();
2565 auto sliceOps = coor.getSlice().begin();
2566 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1);
2567 mlir::Value prevExt = one;
2568 mlir::Value offset = genConstantIndex(loc, idxTy, rewriter, 0);
2569 const bool isShifted = !coor.getShift().empty();
2570 const bool isSliced = !coor.getSlice().empty();
2571 const bool baseIsBoxed =
2572 mlir::isa<fir::BaseBoxType>(coor.getMemref().getType());
2573 TypePair baseBoxTyPair =
2574 baseIsBoxed ? getBoxTypePair(coor.getMemref().getType()) : TypePair{};
2575 mlir::LLVM::IntegerOverflowFlags nsw =
2576 mlir::LLVM::IntegerOverflowFlags::nsw;
2577
2578 // For each dimension of the array, generate the offset calculation.
2579 for (unsigned i = 0; i < rank; ++i, ++indexOffset, ++shapeOffset,
2580 ++shiftOffset, sliceOffset += 3, sliceOps += 3) {
2581 mlir::Value index =
2582 integerCast(loc, rewriter, idxTy, operands[indexOffset]);
2583 mlir::Value lb =
2584 isShifted ? integerCast(loc, rewriter, idxTy, operands[shiftOffset])
2585 : one;
2586 mlir::Value step = one;
2587 bool normalSlice = isSliced;
2588 // Compute zero based index in dimension i of the element, applying
2589 // potential triplets and lower bounds.
2590 if (isSliced) {
2591 mlir::Value originalUb = *(sliceOps + 1);
2592 normalSlice =
2593 !mlir::isa_and_nonnull<fir::UndefOp>(originalUb.getDefiningOp());
2594 if (normalSlice)
2595 step = integerCast(loc, rewriter, idxTy, operands[sliceOffset + 2]);
2596 }
2597 auto idx = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, index, lb, nsw);
2598 mlir::Value diff =
2599 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, idx, step, nsw);
2600 if (normalSlice) {
2601 mlir::Value sliceLb =
2602 integerCast(loc, rewriter, idxTy, operands[sliceOffset]);
2603 auto adj =
2604 rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, lb, nsw);
2605 diff = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, diff, adj, nsw);
2606 }
2607 // Update the offset given the stride and the zero based index `diff`
2608 // that was just computed.
2609 if (baseIsBoxed) {
2610 // Use stride in bytes from the descriptor.
2611 mlir::Value stride =
2612 getStrideFromBox(loc, baseBoxTyPair, operands[0], i, rewriter);
2613 auto sc =
2614 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, stride, nsw);
2615 offset =
2616 rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset, nsw);
2617 } else {
2618 // Use stride computed at last iteration.
2619 auto sc =
2620 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, prevExt, nsw);
2621 offset =
2622 rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset, nsw);
2623 // Compute next stride assuming contiguity of the base array
2624 // (in element number).
2625 auto nextExt = integerCast(loc, rewriter, idxTy, operands[shapeOffset]);
2626 prevExt = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, prevExt,
2627 nextExt, nsw);
2628 }
2629 }
2630
2631 // Add computed offset to the base address.
2632 if (baseIsBoxed) {
2633 // Working with byte offsets. The base address is read from the fir.box.
2634 // and used in i8* GEP to do the pointer arithmetic.
2635 mlir::Type byteTy = ::getI8Type(coor.getContext());
2636 mlir::Value base =
2637 getBaseAddrFromBox(loc, baseBoxTyPair, operands[0], rewriter);
2638 llvm::SmallVector<mlir::LLVM::GEPArg> args{offset};
2639 auto addr = rewriter.create<mlir::LLVM::GEPOp>(loc, llvmPtrTy, byteTy,
2640 base, args);
2641 if (coor.getSubcomponent().empty()) {
2642 rewriter.replaceOp(coor, addr);
2643 return mlir::success();
2644 }
2645 // Cast the element address from void* to the derived type so that the
2646 // derived type members can be addresses via a GEP using the index of
2647 // components.
2648 mlir::Type elementType =
2649 getLlvmObjectTypeFromBoxType(coor.getMemref().getType());
2650 while (auto arrayTy =
2651 mlir::dyn_cast<mlir::LLVM::LLVMArrayType>(elementType))
2652 elementType = arrayTy.getElementType();
2653 args.clear();
2654 args.push_back(0);
2655 if (!coor.getLenParams().empty()) {
2656 // If type parameters are present, then we don't want to use a GEPOp
2657 // as below, as the LLVM struct type cannot be statically defined.
2658 TODO(loc, "derived type with type parameters");
2659 }
2660 llvm::SmallVector<mlir::Value> indices = convertSubcomponentIndices(
2661 loc, elementType,
2662 operands.slice(coor.getSubcomponentOperandIndex(),
2663 coor.getSubcomponent().size()));
2664 args.append(indices.begin(), indices.end());
2665 rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, llvmPtrTy,
2666 elementType, addr, args);
2667 return mlir::success();
2668 }
2669
2670 // The array was not boxed, so it must be contiguous. offset is therefore an
2671 // element offset and the base type is kept in the GEP unless the element
2672 // type size is itself dynamic.
2673 mlir::Type objectTy = fir::unwrapRefType(coor.getMemref().getType());
2674 mlir::Type eleType = fir::unwrapSequenceType(objectTy);
2675 mlir::Type gepObjectType = convertType(eleType);
2676 llvm::SmallVector<mlir::LLVM::GEPArg> args;
2677 if (coor.getSubcomponent().empty()) {
2678 // No subcomponent.
2679 if (!coor.getLenParams().empty()) {
2680 // Type parameters. Adjust element size explicitly.
2681 auto eleTy = fir::dyn_cast_ptrEleTy(coor.getType());
2682 assert(eleTy && "result must be a reference-like type");
2683 if (fir::characterWithDynamicLen(eleTy)) {
2684 assert(coor.getLenParams().size() == 1);
2685 auto length = integerCast(loc, rewriter, idxTy,
2686 operands[coor.getLenParamsOperandIndex()]);
2687 offset = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, offset,
2688 length, nsw);
2689 } else {
2690 TODO(loc, "compute size of derived type with type parameters");
2691 }
2692 }
2693 args.push_back(offset);
2694 } else {
2695 // There are subcomponents.
2696 args.push_back(offset);
2697 llvm::SmallVector<mlir::Value> indices = convertSubcomponentIndices(
2698 loc, gepObjectType,
2699 operands.slice(coor.getSubcomponentOperandIndex(),
2700 coor.getSubcomponent().size()));
2701 args.append(indices.begin(), indices.end());
2702 }
2703 rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(
2704 coor, llvmPtrTy, gepObjectType, adaptor.getMemref(), args);
2705 return mlir::success();
2706 }
2707};
2708} // namespace
2709
2710/// Convert to (memory) reference to a reference to a subobject.
2711/// The coordinate_of op is a Swiss army knife operation that can be used on
2712/// (memory) references to records, arrays, complex, etc. as well as boxes.
2713/// With unboxed arrays, there is the restriction that the array have a static
2714/// shape in all but the last column.
2715struct CoordinateOpConversion
2716 : public fir::FIROpAndTypeConversion<fir::CoordinateOp> {
2717 using FIROpAndTypeConversion::FIROpAndTypeConversion;
2718
2719 llvm::LogicalResult
2720 doRewrite(fir::CoordinateOp coor, mlir::Type ty, OpAdaptor adaptor,
2721 mlir::ConversionPatternRewriter &rewriter) const override {
2722 mlir::ValueRange operands = adaptor.getOperands();
2723
2724 mlir::Location loc = coor.getLoc();
2725 mlir::Value base = operands[0];
2726 mlir::Type baseObjectTy = coor.getBaseType();
2727 mlir::Type objectTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy);
2728 assert(objectTy && "fir.coordinate_of expects a reference type");
2729 mlir::Type llvmObjectTy = convertType(objectTy);
2730
2731 // Complex type - basically, extract the real or imaginary part
2732 // FIXME: double check why this is done before the fir.box case below.
2733 if (fir::isa_complex(objectTy)) {
2734 mlir::Value gep =
2735 genGEP(loc, llvmObjectTy, rewriter, base, 0, operands[1]);
2736 rewriter.replaceOp(coor, gep);
2737 return mlir::success();
2738 }
2739
2740 // Boxed type - get the base pointer from the box
2741 if (mlir::dyn_cast<fir::BaseBoxType>(baseObjectTy))
2742 return doRewriteBox(coor, operands, loc, rewriter);
2743
2744 // Reference, pointer or a heap type
2745 if (mlir::isa<fir::ReferenceType, fir::PointerType, fir::HeapType>(
2746 baseObjectTy))
2747 return doRewriteRefOrPtr(coor, llvmObjectTy, operands, loc, rewriter);
2748
2749 return rewriter.notifyMatchFailure(
2750 coor, "fir.coordinate_of base operand has unsupported type");
2751 }
2752
2753 static unsigned getFieldNumber(fir::RecordType ty, mlir::Value op) {
2754 return fir::hasDynamicSize(ty)
2755 ? op.getDefiningOp()
2756 ->getAttrOfType<mlir::IntegerAttr>("field")
2757 .getInt()
2758 : getConstantIntValue(op);
2759 }
2760
2761 static bool hasSubDimensions(mlir::Type type) {
2762 return mlir::isa<fir::SequenceType, fir::RecordType, mlir::TupleType>(type);
2763 }
2764
2765 // Helper structure to analyze the CoordinateOp path and decide if and how
2766 // the GEP should be generated for it.
2767 struct ShapeAnalysis {
2768 bool hasKnownShape;
2769 bool columnIsDeferred;
2770 };
2771
2772 /// Walk the abstract memory layout and determine if the path traverses any
2773 /// array types with unknown shape. Return true iff all the array types have a
2774 /// constant shape along the path.
2775 /// TODO: move the verification logic into the verifier.
2776 static std::optional<ShapeAnalysis>
2777 arraysHaveKnownShape(mlir::Type type, fir::CoordinateOp coor) {
2778 fir::CoordinateIndicesAdaptor indices = coor.getIndices();
2779 auto begin = indices.begin();
2780 bool hasKnownShape = true;
2781 bool columnIsDeferred = false;
2782 for (auto it = begin, end = indices.end(); it != end;) {
2783 if (auto arrTy = mlir::dyn_cast<fir::SequenceType>(type)) {
2784 bool addressingStart = (it == begin);
2785 unsigned arrayDim = arrTy.getDimension();
2786 for (auto dimExtent : llvm::enumerate(arrTy.getShape())) {
2787 if (dimExtent.value() == fir::SequenceType::getUnknownExtent()) {
2788 hasKnownShape = false;
2789 if (addressingStart && dimExtent.index() + 1 == arrayDim) {
2790 // If this point was reached, the raws of the first array have
2791 // constant extents.
2792 columnIsDeferred = true;
2793 } else {
2794 // One of the array dimension that is not the column of the first
2795 // array has dynamic extent. It will not possible to do
2796 // code generation for the CoordinateOp if the base is not a
2797 // fir.box containing the value of that extent.
2798 return ShapeAnalysis{false, false};
2799 }
2800 }
2801 // There may be less operands than the array size if the
2802 // fir.coordinate_of result is not an element but a sub-array.
2803 if (it != end)
2804 ++it;
2805 }
2806 type = arrTy.getEleTy();
2807 continue;
2808 }
2809 if (auto strTy = mlir::dyn_cast<fir::RecordType>(type)) {
2810 auto intAttr = llvm::dyn_cast<mlir::IntegerAttr>(*it);
2811 if (!intAttr) {
2812 mlir::emitError(coor.getLoc(),
2813 "expected field name in fir.coordinate_of");
2814 return std::nullopt;
2815 }
2816 type = strTy.getType(intAttr.getInt());
2817 } else if (auto strTy = mlir::dyn_cast<mlir::TupleType>(type)) {
2818 auto value = llvm::dyn_cast<mlir::Value>(*it);
2819 if (!value) {
2820 mlir::emitError(
2821 coor.getLoc(),
2822 "expected constant value to address tuple in fir.coordinate_of");
2823 return std::nullopt;
2824 }
2825 type = strTy.getType(getConstantIntValue(value));
2826 } else if (auto charType = mlir::dyn_cast<fir::CharacterType>(type)) {
2827 // Addressing character in string. Fortran strings degenerate to arrays
2828 // in LLVM, so they are handled like arrays of characters here.
2829 if (charType.getLen() == fir::CharacterType::unknownLen())
2830 return ShapeAnalysis{.hasKnownShape: false, .columnIsDeferred: true};
2831 type = fir::CharacterType::getSingleton(charType.getContext(),
2832 charType.getFKind());
2833 }
2834 ++it;
2835 }
2836 return ShapeAnalysis{.hasKnownShape: hasKnownShape, .columnIsDeferred: columnIsDeferred};
2837 }
2838
2839private:
2840 llvm::LogicalResult
2841 doRewriteBox(fir::CoordinateOp coor, mlir::ValueRange operands,
2842 mlir::Location loc,
2843 mlir::ConversionPatternRewriter &rewriter) const {
2844 mlir::Type boxObjTy = coor.getBaseType();
2845 assert(mlir::dyn_cast<fir::BaseBoxType>(boxObjTy) &&
2846 "This is not a `fir.box`");
2847 TypePair boxTyPair = getBoxTypePair(boxObjTy);
2848
2849 mlir::Value boxBaseAddr = operands[0];
2850
2851 // 1. SPECIAL CASE (uses `fir.len_param_index`):
2852 // %box = ... : !fir.box<!fir.type<derived{len1:i32}>>
2853 // %lenp = fir.len_param_index len1, !fir.type<derived{len1:i32}>
2854 // %addr = coordinate_of %box, %lenp
2855 if (coor.getNumOperands() == 2) {
2856 mlir::Operation *coordinateDef =
2857 (*coor.getCoor().begin()).getDefiningOp();
2858 if (mlir::isa_and_nonnull<fir::LenParamIndexOp>(coordinateDef))
2859 TODO(loc,
2860 "fir.coordinate_of - fir.len_param_index is not supported yet");
2861 }
2862
2863 // 2. GENERAL CASE:
2864 // 2.1. (`fir.array`)
2865 // %box = ... : !fix.box<!fir.array<?xU>>
2866 // %idx = ... : index
2867 // %resultAddr = coordinate_of %box, %idx : !fir.ref<U>
2868 // 2.2 (`fir.derived`)
2869 // %box = ... : !fix.box<!fir.type<derived_type{field_1:i32}>>
2870 // %idx = ... : i32
2871 // %resultAddr = coordinate_of %box, %idx : !fir.ref<i32>
2872 // 2.3 (`fir.derived` inside `fir.array`)
2873 // %box = ... : !fir.box<!fir.array<10 x !fir.type<derived_1{field_1:f32,
2874 // field_2:f32}>>> %idx1 = ... : index %idx2 = ... : i32 %resultAddr =
2875 // coordinate_of %box, %idx1, %idx2 : !fir.ref<f32>
2876 // 2.4. TODO: Either document or disable any other case that the following
2877 // implementation might convert.
2878 mlir::Value resultAddr =
2879 getBaseAddrFromBox(loc, boxTyPair, boxBaseAddr, rewriter);
2880 // Component Type
2881 auto cpnTy = fir::dyn_cast_ptrOrBoxEleTy(boxObjTy);
2882 mlir::Type llvmPtrTy = ::getLlvmPtrType(coor.getContext());
2883 mlir::Type byteTy = ::getI8Type(coor.getContext());
2884 mlir::LLVM::IntegerOverflowFlags nsw =
2885 mlir::LLVM::IntegerOverflowFlags::nsw;
2886
2887 int nextIndexValue = 1;
2888 fir::CoordinateIndicesAdaptor indices = coor.getIndices();
2889 for (auto it = indices.begin(), end = indices.end(); it != end;) {
2890 if (auto arrTy = mlir::dyn_cast<fir::SequenceType>(cpnTy)) {
2891 if (it != indices.begin())
2892 TODO(loc, "fir.array nested inside other array and/or derived type");
2893 // Applies byte strides from the box. Ignore lower bound from box
2894 // since fir.coordinate_of indexes are zero based. Lowering takes care
2895 // of lower bound aspects. This both accounts for dynamically sized
2896 // types and non contiguous arrays.
2897 auto idxTy = lowerTy().indexType();
2898 mlir::Value off = genConstantIndex(loc, idxTy, rewriter, 0);
2899 unsigned arrayDim = arrTy.getDimension();
2900 for (unsigned dim = 0; dim < arrayDim && it != end; ++dim, ++it) {
2901 mlir::Value stride =
2902 getStrideFromBox(loc, boxTyPair, operands[0], dim, rewriter);
2903 auto sc = rewriter.create<mlir::LLVM::MulOp>(
2904 loc, idxTy, operands[nextIndexValue + dim], stride, nsw);
2905 off = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, off, nsw);
2906 }
2907 nextIndexValue += arrayDim;
2908 resultAddr = rewriter.create<mlir::LLVM::GEPOp>(
2909 loc, llvmPtrTy, byteTy, resultAddr,
2910 llvm::ArrayRef<mlir::LLVM::GEPArg>{off});
2911 cpnTy = arrTy.getEleTy();
2912 } else if (auto recTy = mlir::dyn_cast<fir::RecordType>(cpnTy)) {
2913 auto intAttr = llvm::dyn_cast<mlir::IntegerAttr>(*it);
2914 if (!intAttr)
2915 return mlir::emitError(loc,
2916 "expected field name in fir.coordinate_of");
2917 int fieldIndex = intAttr.getInt();
2918 ++it;
2919 cpnTy = recTy.getType(fieldIndex);
2920 auto llvmRecTy = lowerTy().convertType(recTy);
2921 resultAddr = rewriter.create<mlir::LLVM::GEPOp>(
2922 loc, llvmPtrTy, llvmRecTy, resultAddr,
2923 llvm::ArrayRef<mlir::LLVM::GEPArg>{0, fieldIndex});
2924 } else {
2925 fir::emitFatalError(loc, "unexpected type in coordinate_of");
2926 }
2927 }
2928
2929 rewriter.replaceOp(coor, resultAddr);
2930 return mlir::success();
2931 }
2932
2933 llvm::LogicalResult
2934 doRewriteRefOrPtr(fir::CoordinateOp coor, mlir::Type llvmObjectTy,
2935 mlir::ValueRange operands, mlir::Location loc,
2936 mlir::ConversionPatternRewriter &rewriter) const {
2937 mlir::Type baseObjectTy = coor.getBaseType();
2938
2939 // Component Type
2940 mlir::Type cpnTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy);
2941
2942 const std::optional<ShapeAnalysis> shapeAnalysis =
2943 arraysHaveKnownShape(cpnTy, coor);
2944 if (!shapeAnalysis)
2945 return mlir::failure();
2946
2947 if (fir::hasDynamicSize(fir::unwrapSequenceType(cpnTy)))
2948 return mlir::emitError(
2949 loc, "fir.coordinate_of with a dynamic element size is unsupported");
2950
2951 if (shapeAnalysis->hasKnownShape || shapeAnalysis->columnIsDeferred) {
2952 llvm::SmallVector<mlir::LLVM::GEPArg> offs;
2953 if (shapeAnalysis->hasKnownShape) {
2954 offs.push_back(0);
2955 }
2956 // Else, only the column is `?` and we can simply place the column value
2957 // in the 0-th GEP position.
2958
2959 std::optional<int> dims;
2960 llvm::SmallVector<mlir::Value> arrIdx;
2961 int nextIndexValue = 1;
2962 for (auto index : coor.getIndices()) {
2963 if (auto intAttr = llvm::dyn_cast<mlir::IntegerAttr>(index)) {
2964 // Addressing derived type component.
2965 auto recordType = llvm::dyn_cast<fir::RecordType>(cpnTy);
2966 if (!recordType)
2967 return mlir::emitError(
2968 loc,
2969 "fir.coordinate base type is not consistent with operands");
2970 int fieldId = intAttr.getInt();
2971 cpnTy = recordType.getType(fieldId);
2972 offs.push_back(fieldId);
2973 continue;
2974 }
2975 // Value index (addressing array, tuple, or complex part).
2976 mlir::Value indexValue = operands[nextIndexValue++];
2977 if (auto tupTy = mlir::dyn_cast<mlir::TupleType>(cpnTy)) {
2978 cpnTy = tupTy.getType(getConstantIntValue(indexValue));
2979 offs.push_back(indexValue);
2980 } else {
2981 if (!dims) {
2982 if (auto arrayType = llvm::dyn_cast<fir::SequenceType>(cpnTy)) {
2983 // Starting addressing array or array component.
2984 dims = arrayType.getDimension();
2985 cpnTy = arrayType.getElementType();
2986 }
2987 }
2988 if (dims) {
2989 arrIdx.push_back(indexValue);
2990 if (--(*dims) == 0) {
2991 // Append array range in reverse (FIR arrays are column-major).
2992 offs.append(arrIdx.rbegin(), arrIdx.rend());
2993 arrIdx.clear();
2994 dims.reset();
2995 }
2996 } else {
2997 offs.push_back(indexValue);
2998 }
2999 }
3000 }
3001 // It is possible the fir.coordinate_of result is a sub-array, in which
3002 // case there may be some "unfinished" array indices to reverse and push.
3003 if (!arrIdx.empty())
3004 offs.append(arrIdx.rbegin(), arrIdx.rend());
3005
3006 mlir::Value base = operands[0];
3007 mlir::Value retval = genGEP(loc, llvmObjectTy, rewriter, base, offs);
3008 rewriter.replaceOp(coor, retval);
3009 return mlir::success();
3010 }
3011
3012 return mlir::emitError(
3013 loc, "fir.coordinate_of base operand has unsupported type");
3014 }
3015};
3016
3017/// Convert `fir.field_index`. The conversion depends on whether the size of
3018/// the record is static or dynamic.
3019struct FieldIndexOpConversion : public fir::FIROpConversion<fir::FieldIndexOp> {
3020 using FIROpConversion::FIROpConversion;
3021
3022 // NB: most field references should be resolved by this point
3023 llvm::LogicalResult
3024 matchAndRewrite(fir::FieldIndexOp field, OpAdaptor adaptor,
3025 mlir::ConversionPatternRewriter &rewriter) const override {
3026 auto recTy = mlir::cast<fir::RecordType>(field.getOnType());
3027 unsigned index = recTy.getFieldIndex(field.getFieldId());
3028
3029 if (!fir::hasDynamicSize(recTy)) {
3030 // Derived type has compile-time constant layout. Return index of the
3031 // component type in the parent type (to be used in GEP).
3032 rewriter.replaceOp(field, mlir::ValueRange{genConstantOffset(
3033 field.getLoc(), rewriter, index)});
3034 return mlir::success();
3035 }
3036
3037 // Derived type has compile-time constant layout. Call the compiler
3038 // generated function to determine the byte offset of the field at runtime.
3039 // This returns a non-constant.
3040 mlir::FlatSymbolRefAttr symAttr = mlir::SymbolRefAttr::get(
3041 field.getContext(), getOffsetMethodName(recTy, field.getFieldId()));
3042 mlir::NamedAttribute callAttr = rewriter.getNamedAttr("callee", symAttr);
3043 mlir::NamedAttribute fieldAttr = rewriter.getNamedAttr(
3044 "field", mlir::IntegerAttr::get(lowerTy().indexType(), index));
3045 rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>(
3046 field, lowerTy().offsetType(), adaptor.getOperands(),
3047 addLLVMOpBundleAttrs(rewriter, {callAttr, fieldAttr},
3048 adaptor.getOperands().size()));
3049 return mlir::success();
3050 }
3051
3052 // Re-Construct the name of the compiler generated method that calculates the
3053 // offset
3054 inline static std::string getOffsetMethodName(fir::RecordType recTy,
3055 llvm::StringRef field) {
3056 return recTy.getName().str() + "P." + field.str() + ".offset";
3057 }
3058};
3059
3060/// Convert `fir.end`
3061struct FirEndOpConversion : public fir::FIROpConversion<fir::FirEndOp> {
3062 using FIROpConversion::FIROpConversion;
3063
3064 llvm::LogicalResult
3065 matchAndRewrite(fir::FirEndOp firEnd, OpAdaptor,
3066 mlir::ConversionPatternRewriter &rewriter) const override {
3067 TODO(firEnd.getLoc(), "fir.end codegen");
3068 return mlir::failure();
3069 }
3070};
3071
3072/// Lower `fir.type_desc` to a global addr.
3073struct TypeDescOpConversion : public fir::FIROpConversion<fir::TypeDescOp> {
3074 using FIROpConversion::FIROpConversion;
3075
3076 llvm::LogicalResult
3077 matchAndRewrite(fir::TypeDescOp typeDescOp, OpAdaptor adaptor,
3078 mlir::ConversionPatternRewriter &rewriter) const override {
3079 mlir::Type inTy = typeDescOp.getInType();
3080 assert(mlir::isa<fir::RecordType>(inTy) && "expecting fir.type");
3081 auto recordType = mlir::dyn_cast<fir::RecordType>(inTy);
3082 auto module = typeDescOp.getOperation()->getParentOfType<mlir::ModuleOp>();
3083 mlir::Value typeDesc = getTypeDescriptor(
3084 module, rewriter, typeDescOp.getLoc(), recordType, this->options);
3085 rewriter.replaceOp(typeDescOp, typeDesc);
3086 return mlir::success();
3087 }
3088};
3089
3090/// Lower `fir.has_value` operation to `llvm.return` operation.
3091struct HasValueOpConversion
3092 : public mlir::OpConversionPattern<fir::HasValueOp> {
3093 using OpConversionPattern::OpConversionPattern;
3094
3095 llvm::LogicalResult
3096 matchAndRewrite(fir::HasValueOp op, OpAdaptor adaptor,
3097 mlir::ConversionPatternRewriter &rewriter) const override {
3098 rewriter.replaceOpWithNewOp<mlir::LLVM::ReturnOp>(op,
3099 adaptor.getOperands());
3100 return mlir::success();
3101 }
3102};
3103
3104#ifndef NDEBUG
3105// Check if attr's type is compatible with ty.
3106//
3107// This is done by comparing attr's element type, converted to LLVM type,
3108// with ty's element type.
3109//
3110// Only integer and floating point (including complex) attributes are
3111// supported. Also, attr is expected to have a TensorType and ty is expected
3112// to be of LLVMArrayType. If any of the previous conditions is false, then
3113// the specified attr and ty are not supported by this function and are
3114// assumed to be compatible.
3115static inline bool attributeTypeIsCompatible(mlir::MLIRContext *ctx,
3116 mlir::Attribute attr,
3117 mlir::Type ty) {
3118 // Get attr's LLVM element type.
3119 if (!attr)
3120 return true;
3121 auto intOrFpEleAttr = mlir::dyn_cast<mlir::DenseIntOrFPElementsAttr>(attr);
3122 if (!intOrFpEleAttr)
3123 return true;
3124 auto tensorTy = mlir::dyn_cast<mlir::TensorType>(intOrFpEleAttr.getType());
3125 if (!tensorTy)
3126 return true;
3127 mlir::Type attrEleTy =
3128 mlir::LLVMTypeConverter(ctx).convertType(tensorTy.getElementType());
3129
3130 // Get ty's element type.
3131 auto arrTy = mlir::dyn_cast<mlir::LLVM::LLVMArrayType>(ty);
3132 if (!arrTy)
3133 return true;
3134 mlir::Type eleTy = arrTy.getElementType();
3135 while ((arrTy = mlir::dyn_cast<mlir::LLVM::LLVMArrayType>(eleTy)))
3136 eleTy = arrTy.getElementType();
3137
3138 return attrEleTy == eleTy;
3139}
3140#endif
3141
3142/// Lower `fir.global` operation to `llvm.global` operation.
3143/// `fir.insert_on_range` operations are replaced with constant dense attribute
3144/// if they are applied on the full range.
3145struct GlobalOpConversion : public fir::FIROpConversion<fir::GlobalOp> {
3146 using FIROpConversion::FIROpConversion;
3147
3148 llvm::LogicalResult
3149 matchAndRewrite(fir::GlobalOp global, OpAdaptor adaptor,
3150 mlir::ConversionPatternRewriter &rewriter) const override {
3151
3152 llvm::SmallVector<mlir::Attribute> dbgExprs;
3153
3154 if (auto fusedLoc = mlir::dyn_cast<mlir::FusedLoc>(global.getLoc())) {
3155 if (auto gvExprAttr = mlir::dyn_cast_if_present<mlir::ArrayAttr>(
3156 fusedLoc.getMetadata())) {
3157 for (auto attr : gvExprAttr.getAsRange<mlir::Attribute>())
3158 if (auto dbgAttr =
3159 mlir::dyn_cast<mlir::LLVM::DIGlobalVariableExpressionAttr>(
3160 attr))
3161 dbgExprs.push_back(dbgAttr);
3162 }
3163 }
3164
3165 auto tyAttr = convertType(global.getType());
3166 if (auto boxType = mlir::dyn_cast<fir::BaseBoxType>(global.getType()))
3167 tyAttr = this->lowerTy().convertBoxTypeAsStruct(boxType);
3168 auto loc = global.getLoc();
3169 mlir::Attribute initAttr = global.getInitVal().value_or(mlir::Attribute());
3170 assert(attributeTypeIsCompatible(global.getContext(), initAttr, tyAttr));
3171 auto linkage = convertLinkage(global.getLinkName());
3172 auto isConst = global.getConstant().has_value();
3173 mlir::SymbolRefAttr comdat;
3174 llvm::ArrayRef<mlir::NamedAttribute> attrs;
3175 auto g = rewriter.create<mlir::LLVM::GlobalOp>(
3176 loc, tyAttr, isConst, linkage, global.getSymName(), initAttr, 0,
3177 getGlobalAddressSpace(rewriter), false, false, comdat, attrs, dbgExprs);
3178
3179 if (global.getAlignment() && *global.getAlignment() > 0)
3180 g.setAlignment(*global.getAlignment());
3181
3182 auto module = global->getParentOfType<mlir::ModuleOp>();
3183 auto gpuMod = global->getParentOfType<mlir::gpu::GPUModuleOp>();
3184 // Add comdat if necessary
3185 if (fir::getTargetTriple(module).supportsCOMDAT() &&
3186 (linkage == mlir::LLVM::Linkage::Linkonce ||
3187 linkage == mlir::LLVM::Linkage::LinkonceODR) &&
3188 !gpuMod) {
3189 addComdat(g, rewriter, module);
3190 }
3191
3192 // Apply all non-Fir::GlobalOp attributes to the LLVM::GlobalOp, preserving
3193 // them; whilst taking care not to apply attributes that are lowered in
3194 // other ways.
3195 llvm::SmallDenseSet<llvm::StringRef> elidedAttrsSet(
3196 global.getAttributeNames().begin(), global.getAttributeNames().end());
3197 for (auto &attr : global->getAttrs())
3198 if (!elidedAttrsSet.contains(attr.getName().strref()))
3199 g->setAttr(attr.getName(), attr.getValue());
3200
3201 auto &gr = g.getInitializerRegion();
3202 rewriter.inlineRegionBefore(global.getRegion(), gr, gr.end());
3203 if (!gr.empty()) {
3204 // Replace insert_on_range with a constant dense attribute if the
3205 // initialization is on the full range.
3206 auto insertOnRangeOps = gr.front().getOps<fir::InsertOnRangeOp>();
3207 for (auto insertOp : insertOnRangeOps) {
3208 if (insertOp.isFullRange()) {
3209 auto seqTyAttr = convertType(insertOp.getType());
3210 auto *op = insertOp.getVal().getDefiningOp();
3211 auto constant = mlir::dyn_cast<mlir::arith::ConstantOp>(op);
3212 if (!constant) {
3213 auto convertOp = mlir::dyn_cast<fir::ConvertOp>(op);
3214 if (!convertOp)
3215 continue;
3216 constant = mlir::cast<mlir::arith::ConstantOp>(
3217 convertOp.getValue().getDefiningOp());
3218 }
3219 mlir::Type vecType = mlir::VectorType::get(
3220 insertOp.getType().getShape(), constant.getType());
3221 auto denseAttr = mlir::DenseElementsAttr::get(
3222 mlir::cast<mlir::ShapedType>(vecType), constant.getValue());
3223 rewriter.setInsertionPointAfter(insertOp);
3224 rewriter.replaceOpWithNewOp<mlir::arith::ConstantOp>(
3225 insertOp, seqTyAttr, denseAttr);
3226 }
3227 }
3228 }
3229
3230 if (global.getDataAttr() &&
3231 *global.getDataAttr() == cuf::DataAttribute::Shared)
3232 g.setAddrSpace(mlir::NVVM::NVVMMemorySpace::kSharedMemorySpace);
3233
3234 rewriter.eraseOp(global);
3235 return mlir::success();
3236 }
3237
3238 // TODO: String comparisons should be avoided. Replace linkName with an
3239 // enumeration.
3240 mlir::LLVM::Linkage
3241 convertLinkage(std::optional<llvm::StringRef> optLinkage) const {
3242 if (optLinkage) {
3243 auto name = *optLinkage;
3244 if (name == "internal")
3245 return mlir::LLVM::Linkage::Internal;
3246 if (name == "linkonce")
3247 return mlir::LLVM::Linkage::Linkonce;
3248 if (name == "linkonce_odr")
3249 return mlir::LLVM::Linkage::LinkonceODR;
3250 if (name == "common")
3251 return mlir::LLVM::Linkage::Common;
3252 if (name == "weak")
3253 return mlir::LLVM::Linkage::Weak;
3254 }
3255 return mlir::LLVM::Linkage::External;
3256 }
3257
3258private:
3259 static void addComdat(mlir::LLVM::GlobalOp &global,
3260 mlir::ConversionPatternRewriter &rewriter,
3261 mlir::ModuleOp module) {
3262 const char *comdatName = "__llvm_comdat";
3263 mlir::LLVM::ComdatOp comdatOp =
3264 module.lookupSymbol<mlir::LLVM::ComdatOp>(comdatName);
3265 if (!comdatOp) {
3266 comdatOp =
3267 rewriter.create<mlir::LLVM::ComdatOp>(module.getLoc(), comdatName);
3268 }
3269 if (auto select = comdatOp.lookupSymbol<mlir::LLVM::ComdatSelectorOp>(
3270 global.getSymName()))
3271 return;
3272 mlir::OpBuilder::InsertionGuard guard(rewriter);
3273 rewriter.setInsertionPointToEnd(&comdatOp.getBody().back());
3274 auto selectorOp = rewriter.create<mlir::LLVM::ComdatSelectorOp>(
3275 comdatOp.getLoc(), global.getSymName(),
3276 mlir::LLVM::comdat::Comdat::Any);
3277 global.setComdatAttr(mlir::SymbolRefAttr::get(
3278 rewriter.getContext(), comdatName,
3279 mlir::FlatSymbolRefAttr::get(selectorOp.getSymNameAttr())));
3280 }
3281};
3282
3283/// `fir.load` --> `llvm.load`
3284struct LoadOpConversion : public fir::FIROpConversion<fir::LoadOp> {
3285 using FIROpConversion::FIROpConversion;
3286
3287 llvm::LogicalResult
3288 matchAndRewrite(fir::LoadOp load, OpAdaptor adaptor,
3289 mlir::ConversionPatternRewriter &rewriter) const override {
3290
3291 mlir::Type llvmLoadTy = convertObjectType(load.getType());
3292 const bool isVolatile = fir::isa_volatile_type(load.getMemref().getType());
3293 if (auto boxTy = mlir::dyn_cast<fir::BaseBoxType>(load.getType())) {
3294 // fir.box is a special case because it is considered an ssa value in
3295 // fir, but it is lowered as a pointer to a descriptor. So
3296 // fir.ref<fir.box> and fir.box end up being the same llvm types and
3297 // loading a fir.ref<fir.box> is implemented as taking a snapshot of the
3298 // descriptor value into a new descriptor temp.
3299 auto inputBoxStorage = adaptor.getOperands()[0];
3300 mlir::Value newBoxStorage;
3301 mlir::Location loc = load.getLoc();
3302 if (auto callOp = mlir::dyn_cast_or_null<mlir::LLVM::CallOp>(
3303 inputBoxStorage.getDefiningOp())) {
3304 if (callOp.getCallee() &&
3305 ((*callOp.getCallee())
3306 .starts_with(RTNAME_STRING(CUFAllocDescriptor)) ||
3307 (*callOp.getCallee()).starts_with("__tgt_acc_get_deviceptr"))) {
3308 // CUDA Fortran local descriptor are allocated in managed memory. So
3309 // new storage must be allocated the same way.
3310 auto mod = load->getParentOfType<mlir::ModuleOp>();
3311 newBoxStorage =
3312 genCUFAllocDescriptor(loc, rewriter, mod, boxTy, lowerTy());
3313 }
3314 }
3315 if (!newBoxStorage)
3316 newBoxStorage = genAllocaAndAddrCastWithType(loc, llvmLoadTy,
3317 defaultAlign, rewriter);
3318
3319 TypePair boxTypePair{boxTy, llvmLoadTy};
3320 mlir::Value boxSize =
3321 computeBoxSize(loc, boxTypePair, inputBoxStorage, rewriter);
3322 auto memcpy = rewriter.create<mlir::LLVM::MemcpyOp>(
3323 loc, newBoxStorage, inputBoxStorage, boxSize, isVolatile);
3324
3325 if (std::optional<mlir::ArrayAttr> optionalTag = load.getTbaa())
3326 memcpy.setTBAATags(*optionalTag);
3327 else
3328 attachTBAATag(memcpy, boxTy, boxTy, nullptr);
3329 rewriter.replaceOp(load, newBoxStorage);
3330 } else {
3331 mlir::LLVM::LoadOp loadOp = rewriter.create<mlir::LLVM::LoadOp>(
3332 load.getLoc(), llvmLoadTy, adaptor.getOperands(), load->getAttrs());
3333 loadOp.setVolatile_(isVolatile);
3334 if (std::optional<mlir::ArrayAttr> optionalTag = load.getTbaa())
3335 loadOp.setTBAATags(*optionalTag);
3336 else
3337 attachTBAATag(loadOp, load.getType(), load.getType(), nullptr);
3338 rewriter.replaceOp(load, loadOp.getResult());
3339 }
3340 return mlir::success();
3341 }
3342};
3343
3344template <typename OpTy>
3345struct DoConcurrentSpecifierOpConversion : public fir::FIROpConversion<OpTy> {
3346 using fir::FIROpConversion<OpTy>::FIROpConversion;
3347 llvm::LogicalResult
3348 matchAndRewrite(OpTy specifier, typename OpTy::Adaptor adaptor,
3349 mlir::ConversionPatternRewriter &rewriter) const override {
3350#ifdef EXPENSIVE_CHECKS
3351 auto uses = mlir::SymbolTable::getSymbolUses(
3352 specifier, specifier->getParentOfType<mlir::ModuleOp>());
3353
3354 // `fir.local|fir.declare_reduction` ops are not supposed to have any uses
3355 // at this point (i.e. during lowering to LLVM). In case of serialization,
3356 // the `fir.do_concurrent` users are expected to have been lowered to
3357 // `fir.do_loop` nests. In case of parallelization, the `fir.do_concurrent`
3358 // users are expected to have been lowered to the target parallel model
3359 // (e.g. OpenMP).
3360 assert(uses && uses->empty());
3361#endif
3362
3363 rewriter.eraseOp(specifier);
3364 return mlir::success();
3365 }
3366};
3367
3368/// Lower `fir.no_reassoc` to LLVM IR dialect.
3369/// TODO: how do we want to enforce this in LLVM-IR? Can we manipulate the fast
3370/// math flags?
3371struct NoReassocOpConversion : public fir::FIROpConversion<fir::NoReassocOp> {
3372 using FIROpConversion::FIROpConversion;
3373
3374 llvm::LogicalResult
3375 matchAndRewrite(fir::NoReassocOp noreassoc, OpAdaptor adaptor,
3376 mlir::ConversionPatternRewriter &rewriter) const override {
3377 rewriter.replaceOp(noreassoc, adaptor.getOperands()[0]);
3378 return mlir::success();
3379 }
3380};
3381
3382static void genCondBrOp(mlir::Location loc, mlir::Value cmp, mlir::Block *dest,
3383 std::optional<mlir::ValueRange> destOps,
3384 mlir::ConversionPatternRewriter &rewriter,
3385 mlir::Block *newBlock) {
3386 if (destOps)
3387 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, *destOps, newBlock,
3388 mlir::ValueRange());
3389 else
3390 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, newBlock);
3391}
3392
3393template <typename A, typename B>
3394static void genBrOp(A caseOp, mlir::Block *dest, std::optional<B> destOps,
3395 mlir::ConversionPatternRewriter &rewriter) {
3396 if (destOps)
3397 rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, *destOps, dest);
3398 else
3399 rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, B{}, dest);
3400}
3401
3402static void genCaseLadderStep(mlir::Location loc, mlir::Value cmp,
3403 mlir::Block *dest,
3404 std::optional<mlir::ValueRange> destOps,
3405 mlir::ConversionPatternRewriter &rewriter) {
3406 auto *thisBlock = rewriter.getInsertionBlock();
3407 auto *newBlock = createBlock(rewriter, dest);
3408 rewriter.setInsertionPointToEnd(thisBlock);
3409 genCondBrOp(loc, cmp, dest, destOps, rewriter, newBlock);
3410 rewriter.setInsertionPointToEnd(newBlock);
3411}
3412
3413/// Conversion of `fir.select_case`
3414///
3415/// The `fir.select_case` operation is converted to a if-then-else ladder.
3416/// Depending on the case condition type, one or several comparison and
3417/// conditional branching can be generated.
3418///
3419/// A point value case such as `case(4)`, a lower bound case such as
3420/// `case(5:)` or an upper bound case such as `case(:3)` are converted to a
3421/// simple comparison between the selector value and the constant value in the
3422/// case. The block associated with the case condition is then executed if
3423/// the comparison succeed otherwise it branch to the next block with the
3424/// comparison for the next case conditon.
3425///
3426/// A closed interval case condition such as `case(7:10)` is converted with a
3427/// first comparison and conditional branching for the lower bound. If
3428/// successful, it branch to a second block with the comparison for the
3429/// upper bound in the same case condition.
3430///
3431/// TODO: lowering of CHARACTER type cases is not handled yet.
3432struct SelectCaseOpConversion : public fir::FIROpConversion<fir::SelectCaseOp> {
3433 using FIROpConversion::FIROpConversion;
3434
3435 llvm::LogicalResult
3436 matchAndRewrite(fir::SelectCaseOp caseOp, OpAdaptor adaptor,
3437 mlir::ConversionPatternRewriter &rewriter) const override {
3438 unsigned conds = caseOp.getNumConditions();
3439 llvm::ArrayRef<mlir::Attribute> cases = caseOp.getCases().getValue();
3440 // Type can be CHARACTER, INTEGER, or LOGICAL (C1145)
3441 auto ty = caseOp.getSelector().getType();
3442 if (mlir::isa<fir::CharacterType>(ty)) {
3443 TODO(caseOp.getLoc(), "fir.select_case codegen with character type");
3444 return mlir::failure();
3445 }
3446 mlir::Value selector = caseOp.getSelector(adaptor.getOperands());
3447 auto loc = caseOp.getLoc();
3448 for (unsigned t = 0; t != conds; ++t) {
3449 mlir::Block *dest = caseOp.getSuccessor(t);
3450 std::optional<mlir::ValueRange> destOps =
3451 caseOp.getSuccessorOperands(adaptor.getOperands(), t);
3452 std::optional<mlir::ValueRange> cmpOps =
3453 *caseOp.getCompareOperands(adaptor.getOperands(), t);
3454 mlir::Attribute attr = cases[t];
3455 assert(mlir::isa<mlir::UnitAttr>(attr) || cmpOps.has_value());
3456 if (mlir::isa<fir::PointIntervalAttr>(attr)) {
3457 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>(
3458 loc, mlir::LLVM::ICmpPredicate::eq, selector, cmpOps->front());
3459 genCaseLadderStep(loc, cmp, dest, destOps, rewriter);
3460 continue;
3461 }
3462 if (mlir::isa<fir::LowerBoundAttr>(attr)) {
3463 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>(
3464 loc, mlir::LLVM::ICmpPredicate::sle, cmpOps->front(), selector);
3465 genCaseLadderStep(loc, cmp, dest, destOps, rewriter);
3466 continue;
3467 }
3468 if (mlir::isa<fir::UpperBoundAttr>(attr)) {
3469 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>(
3470 loc, mlir::LLVM::ICmpPredicate::sle, selector, cmpOps->front());
3471 genCaseLadderStep(loc, cmp, dest, destOps, rewriter);
3472 continue;
3473 }
3474 if (mlir::isa<fir::ClosedIntervalAttr>(attr)) {
3475 mlir::Value caseArg0 = *cmpOps->begin();
3476 auto cmp0 = rewriter.create<mlir::LLVM::ICmpOp>(
3477 loc, mlir::LLVM::ICmpPredicate::sle, caseArg0, selector);
3478 auto *thisBlock = rewriter.getInsertionBlock();
3479 auto *newBlock1 = createBlock(rewriter, dest);
3480 auto *newBlock2 = createBlock(rewriter, dest);
3481 rewriter.setInsertionPointToEnd(thisBlock);
3482 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp0, newBlock1, newBlock2);
3483 rewriter.setInsertionPointToEnd(newBlock1);
3484 mlir::Value caseArg1 = *(cmpOps->begin() + 1);
3485 auto cmp1 = rewriter.create<mlir::LLVM::ICmpOp>(
3486 loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg1);
3487 genCondBrOp(loc, cmp1, dest, destOps, rewriter, newBlock2);
3488 rewriter.setInsertionPointToEnd(newBlock2);
3489 continue;
3490 }
3491 assert(mlir::isa<mlir::UnitAttr>(attr));
3492 assert((t + 1 == conds) && "unit must be last");
3493 genBrOp(caseOp, dest, destOps, rewriter);
3494 }
3495 return mlir::success();
3496 }
3497};
3498
3499/// Helper function for converting select ops. This function converts the
3500/// signature of the given block. If the new block signature is different from
3501/// `expectedTypes`, returns "failure".
3502static llvm::FailureOr<mlir::Block *>
3503getConvertedBlock(mlir::ConversionPatternRewriter &rewriter,
3504 const mlir::TypeConverter *converter,
3505 mlir::Operation *branchOp, mlir::Block *block,
3506 mlir::TypeRange expectedTypes) {
3507 assert(converter && "expected non-null type converter");
3508 assert(!block->isEntryBlock() && "entry blocks have no predecessors");
3509
3510 // There is nothing to do if the types already match.
3511 if (block->getArgumentTypes() == expectedTypes)
3512 return block;
3513
3514 // Compute the new block argument types and convert the block.
3515 std::optional<mlir::TypeConverter::SignatureConversion> conversion =
3516 converter->convertBlockSignature(block);
3517 if (!conversion)
3518 return rewriter.notifyMatchFailure(branchOp,
3519 "could not compute block signature");
3520 if (expectedTypes != conversion->getConvertedTypes())
3521 return rewriter.notifyMatchFailure(
3522 branchOp,
3523 "mismatch between adaptor operand types and computed block signature");
3524 return rewriter.applySignatureConversion(block, *conversion, converter);
3525}
3526
3527template <typename OP>
3528static llvm::LogicalResult
3529selectMatchAndRewrite(const fir::LLVMTypeConverter &lowering, OP select,
3530 typename OP::Adaptor adaptor,
3531 mlir::ConversionPatternRewriter &rewriter,
3532 const mlir::TypeConverter *converter) {
3533 unsigned conds = select.getNumConditions();
3534 auto cases = select.getCases().getValue();
3535 mlir::Value selector = adaptor.getSelector();
3536 auto loc = select.getLoc();
3537 assert(conds > 0 && "select must have cases");
3538
3539 llvm::SmallVector<mlir::Block *> destinations;
3540 llvm::SmallVector<mlir::ValueRange> destinationsOperands;
3541 mlir::Block *defaultDestination;
3542 mlir::ValueRange defaultOperands;
3543 llvm::SmallVector<int32_t> caseValues;
3544
3545 for (unsigned t = 0; t != conds; ++t) {
3546 mlir::Block *dest = select.getSuccessor(t);
3547 auto destOps = select.getSuccessorOperands(adaptor.getOperands(), t);
3548 const mlir::Attribute &attr = cases[t];
3549 if (auto intAttr = mlir::dyn_cast<mlir::IntegerAttr>(attr)) {
3550 destinationsOperands.push_back(destOps ? *destOps : mlir::ValueRange{});
3551 auto convertedBlock =
3552 getConvertedBlock(rewriter, converter, select, dest,
3553 mlir::TypeRange(destinationsOperands.back()));
3554 if (mlir::failed(convertedBlock))
3555 return mlir::failure();
3556 destinations.push_back(*convertedBlock);
3557 caseValues.push_back(Elt: intAttr.getInt());
3558 continue;
3559 }
3560 assert(mlir::dyn_cast_or_null<mlir::UnitAttr>(attr));
3561 assert((t + 1 == conds) && "unit must be last");
3562 defaultOperands = destOps ? *destOps : mlir::ValueRange{};
3563 auto convertedBlock = getConvertedBlock(rewriter, converter, select, dest,
3564 mlir::TypeRange(defaultOperands));
3565 if (mlir::failed(convertedBlock))
3566 return mlir::failure();
3567 defaultDestination = *convertedBlock;
3568 }
3569
3570 // LLVM::SwitchOp takes a i32 type for the selector.
3571 if (select.getSelector().getType() != rewriter.getI32Type())
3572 selector = rewriter.create<mlir::LLVM::TruncOp>(loc, rewriter.getI32Type(),
3573 selector);
3574
3575 rewriter.replaceOpWithNewOp<mlir::LLVM::SwitchOp>(
3576 select, selector,
3577 /*defaultDestination=*/defaultDestination,
3578 /*defaultOperands=*/defaultOperands,
3579 /*caseValues=*/caseValues,
3580 /*caseDestinations=*/destinations,
3581 /*caseOperands=*/destinationsOperands,
3582 /*branchWeights=*/llvm::ArrayRef<std::int32_t>());
3583 return mlir::success();
3584}
3585
3586/// conversion of fir::SelectOp to an if-then-else ladder
3587struct SelectOpConversion : public fir::FIROpConversion<fir::SelectOp> {
3588 using FIROpConversion::FIROpConversion;
3589
3590 llvm::LogicalResult
3591 matchAndRewrite(fir::SelectOp op, OpAdaptor adaptor,
3592 mlir::ConversionPatternRewriter &rewriter) const override {
3593 return selectMatchAndRewrite<fir::SelectOp>(lowerTy(), op, adaptor,
3594 rewriter, getTypeConverter());
3595 }
3596};
3597
3598/// conversion of fir::SelectRankOp to an if-then-else ladder
3599struct SelectRankOpConversion : public fir::FIROpConversion<fir::SelectRankOp> {
3600 using FIROpConversion::FIROpConversion;
3601
3602 llvm::LogicalResult
3603 matchAndRewrite(fir::SelectRankOp op, OpAdaptor adaptor,
3604 mlir::ConversionPatternRewriter &rewriter) const override {
3605 return selectMatchAndRewrite<fir::SelectRankOp>(
3606 lowerTy(), op, adaptor, rewriter, getTypeConverter());
3607 }
3608};
3609
3610/// Lower `fir.select_type` to LLVM IR dialect.
3611struct SelectTypeOpConversion : public fir::FIROpConversion<fir::SelectTypeOp> {
3612 using FIROpConversion::FIROpConversion;
3613
3614 llvm::LogicalResult
3615 matchAndRewrite(fir::SelectTypeOp select, OpAdaptor adaptor,
3616 mlir::ConversionPatternRewriter &rewriter) const override {
3617 mlir::emitError(select.getLoc(),
3618 "fir.select_type should have already been converted");
3619 return mlir::failure();
3620 }
3621};
3622
3623/// `fir.store` --> `llvm.store`
3624struct StoreOpConversion : public fir::FIROpConversion<fir::StoreOp> {
3625 using FIROpConversion::FIROpConversion;
3626
3627 llvm::LogicalResult
3628 matchAndRewrite(fir::StoreOp store, OpAdaptor adaptor,
3629 mlir::ConversionPatternRewriter &rewriter) const override {
3630 mlir::Location loc = store.getLoc();
3631 mlir::Type storeTy = store.getValue().getType();
3632 mlir::Value llvmValue = adaptor.getValue();
3633 mlir::Value llvmMemref = adaptor.getMemref();
3634 mlir::LLVM::AliasAnalysisOpInterface newOp;
3635 const bool isVolatile =
3636 fir::isa_volatile_type(store.getMemref().getType()) ||
3637 fir::isa_volatile_type(store.getValue().getType());
3638 if (auto boxTy = mlir::dyn_cast<fir::BaseBoxType>(storeTy)) {
3639 mlir::Type llvmBoxTy = lowerTy().convertBoxTypeAsStruct(boxTy);
3640 // Always use memcpy because LLVM is not as effective at optimizing
3641 // aggregate loads/stores as it is optimizing memcpy.
3642 TypePair boxTypePair{boxTy, llvmBoxTy};
3643 mlir::Value boxSize =
3644 computeBoxSize(loc, boxTypePair, llvmValue, rewriter);
3645 newOp = rewriter.create<mlir::LLVM::MemcpyOp>(loc, llvmMemref, llvmValue,
3646 boxSize, isVolatile);
3647 } else {
3648 mlir::LLVM::StoreOp storeOp =
3649 rewriter.create<mlir::LLVM::StoreOp>(loc, llvmValue, llvmMemref);
3650
3651 if (isVolatile)
3652 storeOp.setVolatile_(true);
3653
3654 if (store.getNontemporal())
3655 storeOp.setNontemporal(true);
3656
3657 newOp = storeOp;
3658 }
3659 if (std::optional<mlir::ArrayAttr> optionalTag = store.getTbaa())
3660 newOp.setTBAATags(*optionalTag);
3661 else
3662 attachTBAATag(newOp, storeTy, storeTy, nullptr);
3663 rewriter.eraseOp(store);
3664 return mlir::success();
3665 }
3666};
3667
3668/// `fir.copy` --> `llvm.memcpy` or `llvm.memmove`
3669struct CopyOpConversion : public fir::FIROpConversion<fir::CopyOp> {
3670 using FIROpConversion::FIROpConversion;
3671
3672 llvm::LogicalResult
3673 matchAndRewrite(fir::CopyOp copy, OpAdaptor adaptor,
3674 mlir::ConversionPatternRewriter &rewriter) const override {
3675 mlir::Location loc = copy.getLoc();
3676 const bool isVolatile =
3677 fir::isa_volatile_type(copy.getSource().getType()) ||
3678 fir::isa_volatile_type(copy.getDestination().getType());
3679 mlir::Value llvmSource = adaptor.getSource();
3680 mlir::Value llvmDestination = adaptor.getDestination();
3681 mlir::Type i64Ty = mlir::IntegerType::get(rewriter.getContext(), 64);
3682 mlir::Type copyTy = fir::unwrapRefType(copy.getSource().getType());
3683 mlir::Value copySize = genTypeStrideInBytes(
3684 loc, i64Ty, rewriter, convertType(copyTy), getDataLayout());
3685
3686 mlir::LLVM::AliasAnalysisOpInterface newOp;
3687 if (copy.getNoOverlap())
3688 newOp = rewriter.create<mlir::LLVM::MemcpyOp>(
3689 loc, llvmDestination, llvmSource, copySize, isVolatile);
3690 else
3691 newOp = rewriter.create<mlir::LLVM::MemmoveOp>(
3692 loc, llvmDestination, llvmSource, copySize, isVolatile);
3693
3694 // TODO: propagate TBAA once FirAliasTagOpInterface added to CopyOp.
3695 attachTBAATag(newOp, copyTy, copyTy, nullptr);
3696 rewriter.eraseOp(copy);
3697 return mlir::success();
3698 }
3699};
3700
3701namespace {
3702
3703/// Convert `fir.unboxchar` into two `llvm.extractvalue` instructions. One for
3704/// the character buffer and one for the buffer length.
3705struct UnboxCharOpConversion : public fir::FIROpConversion<fir::UnboxCharOp> {
3706 using FIROpConversion::FIROpConversion;
3707
3708 llvm::LogicalResult
3709 matchAndRewrite(fir::UnboxCharOp unboxchar, OpAdaptor adaptor,
3710 mlir::ConversionPatternRewriter &rewriter) const override {
3711 mlir::Type lenTy = convertType(unboxchar.getType(1));
3712 mlir::Value tuple = adaptor.getOperands()[0];
3713
3714 mlir::Location loc = unboxchar.getLoc();
3715 mlir::Value ptrToBuffer =
3716 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, tuple, 0);
3717
3718 auto len = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, tuple, 1);
3719 mlir::Value lenAfterCast = integerCast(loc, rewriter, lenTy, len);
3720
3721 rewriter.replaceOp(unboxchar,
3722 llvm::ArrayRef<mlir::Value>{ptrToBuffer, lenAfterCast});
3723 return mlir::success();
3724 }
3725};
3726
3727/// Lower `fir.unboxproc` operation. Unbox a procedure box value, yielding its
3728/// components.
3729/// TODO: Part of supporting Fortran 2003 procedure pointers.
3730struct UnboxProcOpConversion : public fir::FIROpConversion<fir::UnboxProcOp> {
3731 using FIROpConversion::FIROpConversion;
3732
3733 llvm::LogicalResult
3734 matchAndRewrite(fir::UnboxProcOp unboxproc, OpAdaptor adaptor,
3735 mlir::ConversionPatternRewriter &rewriter) const override {
3736 TODO(unboxproc.getLoc(), "fir.unboxproc codegen");
3737 return mlir::failure();
3738 }
3739};
3740
3741/// convert to LLVM IR dialect `undef`
3742struct UndefOpConversion : public fir::FIROpConversion<fir::UndefOp> {
3743 using FIROpConversion::FIROpConversion;
3744
3745 llvm::LogicalResult
3746 matchAndRewrite(fir::UndefOp undef, OpAdaptor,
3747 mlir::ConversionPatternRewriter &rewriter) const override {
3748 if (mlir::isa<fir::DummyScopeType>(undef.getType())) {
3749 // Dummy scoping is used for Fortran analyses like AA. Once it gets to
3750 // pre-codegen rewrite it is erased and a fir.undef is created to
3751 // feed to the fir declare operation. Thus, during codegen, we can
3752 // simply erase is as it is no longer used.
3753 rewriter.eraseOp(undef);
3754 return mlir::success();
3755 }
3756 rewriter.replaceOpWithNewOp<mlir::LLVM::UndefOp>(
3757 undef, convertType(undef.getType()));
3758 return mlir::success();
3759 }
3760};
3761
3762struct ZeroOpConversion : public fir::FIROpConversion<fir::ZeroOp> {
3763 using FIROpConversion::FIROpConversion;
3764
3765 llvm::LogicalResult
3766 matchAndRewrite(fir::ZeroOp zero, OpAdaptor,
3767 mlir::ConversionPatternRewriter &rewriter) const override {
3768 mlir::Type ty = convertType(zero.getType());
3769 rewriter.replaceOpWithNewOp<mlir::LLVM::ZeroOp>(zero, ty);
3770 return mlir::success();
3771 }
3772};
3773
3774/// `fir.unreachable` --> `llvm.unreachable`
3775struct UnreachableOpConversion
3776 : public fir::FIROpConversion<fir::UnreachableOp> {
3777 using FIROpConversion::FIROpConversion;
3778
3779 llvm::LogicalResult
3780 matchAndRewrite(fir::UnreachableOp unreach, OpAdaptor adaptor,
3781 mlir::ConversionPatternRewriter &rewriter) const override {
3782 rewriter.replaceOpWithNewOp<mlir::LLVM::UnreachableOp>(unreach);
3783 return mlir::success();
3784 }
3785};
3786
3787/// `fir.is_present` -->
3788/// ```
3789/// %0 = llvm.mlir.constant(0 : i64)
3790/// %1 = llvm.ptrtoint %0
3791/// %2 = llvm.icmp "ne" %1, %0 : i64
3792/// ```
3793struct IsPresentOpConversion : public fir::FIROpConversion<fir::IsPresentOp> {
3794 using FIROpConversion::FIROpConversion;
3795
3796 llvm::LogicalResult
3797 matchAndRewrite(fir::IsPresentOp isPresent, OpAdaptor adaptor,
3798 mlir::ConversionPatternRewriter &rewriter) const override {
3799 mlir::Type idxTy = lowerTy().indexType();
3800 mlir::Location loc = isPresent.getLoc();
3801 auto ptr = adaptor.getOperands()[0];
3802
3803 if (mlir::isa<fir::BoxCharType>(isPresent.getVal().getType())) {
3804 [[maybe_unused]] auto structTy =
3805 mlir::cast<mlir::LLVM::LLVMStructType>(ptr.getType());
3806 assert(!structTy.isOpaque() && !structTy.getBody().empty());
3807
3808 ptr = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ptr, 0);
3809 }
3810 mlir::LLVM::ConstantOp c0 =
3811 genConstantIndex(isPresent.getLoc(), idxTy, rewriter, 0);
3812 auto addr = rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, ptr);
3813 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>(
3814 isPresent, mlir::LLVM::ICmpPredicate::ne, addr, c0);
3815
3816 return mlir::success();
3817 }
3818};
3819
3820/// Create value signaling an absent optional argument in a call, e.g.
3821/// `fir.absent !fir.ref<i64>` --> `llvm.mlir.zero : !llvm.ptr<i64>`
3822struct AbsentOpConversion : public fir::FIROpConversion<fir::AbsentOp> {
3823 using FIROpConversion::FIROpConversion;
3824
3825 llvm::LogicalResult
3826 matchAndRewrite(fir::AbsentOp absent, OpAdaptor,
3827 mlir::ConversionPatternRewriter &rewriter) const override {
3828 mlir::Type ty = convertType(absent.getType());
3829 rewriter.replaceOpWithNewOp<mlir::LLVM::ZeroOp>(absent, ty);
3830 return mlir::success();
3831 }
3832};
3833
3834//
3835// Primitive operations on Complex types
3836//
3837
3838template <typename OPTY>
3839static inline mlir::LLVM::FastmathFlagsAttr getLLVMFMFAttr(OPTY op) {
3840 return mlir::LLVM::FastmathFlagsAttr::get(
3841 op.getContext(),
3842 mlir::arith::convertArithFastMathFlagsToLLVM(op.getFastmath()));
3843}
3844
3845/// Generate inline code for complex addition/subtraction
3846template <typename LLVMOP, typename OPTY>
3847static mlir::LLVM::InsertValueOp
3848complexSum(OPTY sumop, mlir::ValueRange opnds,
3849 mlir::ConversionPatternRewriter &rewriter,
3850 const fir::LLVMTypeConverter &lowering) {
3851 mlir::LLVM::FastmathFlagsAttr fmf = getLLVMFMFAttr(sumop);
3852 mlir::Value a = opnds[0];
3853 mlir::Value b = opnds[1];
3854 auto loc = sumop.getLoc();
3855 mlir::Type eleTy = lowering.convertType(getComplexEleTy(sumop.getType()));
3856 mlir::Type ty = lowering.convertType(sumop.getType());
3857 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, a, 0);
3858 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, a, 1);
3859 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, b, 0);
3860 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, b, 1);
3861 auto rx = rewriter.create<LLVMOP>(loc, eleTy, x0, x1, fmf);
3862 auto ry = rewriter.create<LLVMOP>(loc, eleTy, y0, y1, fmf);
3863 auto r0 = rewriter.create<mlir::LLVM::UndefOp>(loc, ty);
3864 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, r0, rx, 0);
3865 return rewriter.create<mlir::LLVM::InsertValueOp>(loc, r1, ry, 1);
3866}
3867} // namespace
3868
3869namespace {
3870struct AddcOpConversion : public fir::FIROpConversion<fir::AddcOp> {
3871 using FIROpConversion::FIROpConversion;
3872
3873 llvm::LogicalResult
3874 matchAndRewrite(fir::AddcOp addc, OpAdaptor adaptor,
3875 mlir::ConversionPatternRewriter &rewriter) const override {
3876 // given: (x + iy) + (x' + iy')
3877 // result: (x + x') + i(y + y')
3878 auto r = complexSum<mlir::LLVM::FAddOp>(addc, adaptor.getOperands(),
3879 rewriter, lowerTy());
3880 rewriter.replaceOp(addc, r.getResult());
3881 return mlir::success();
3882 }
3883};
3884
3885struct SubcOpConversion : public fir::FIROpConversion<fir::SubcOp> {
3886 using FIROpConversion::FIROpConversion;
3887
3888 llvm::LogicalResult
3889 matchAndRewrite(fir::SubcOp subc, OpAdaptor adaptor,
3890 mlir::ConversionPatternRewriter &rewriter) const override {
3891 // given: (x + iy) - (x' + iy')
3892 // result: (x - x') + i(y - y')
3893 auto r = complexSum<mlir::LLVM::FSubOp>(subc, adaptor.getOperands(),
3894 rewriter, lowerTy());
3895 rewriter.replaceOp(subc, r.getResult());
3896 return mlir::success();
3897 }
3898};
3899
3900/// Inlined complex multiply
3901struct MulcOpConversion : public fir::FIROpConversion<fir::MulcOp> {
3902 using FIROpConversion::FIROpConversion;
3903
3904 llvm::LogicalResult
3905 matchAndRewrite(fir::MulcOp mulc, OpAdaptor adaptor,
3906 mlir::ConversionPatternRewriter &rewriter) const override {
3907 // TODO: Can we use a call to __muldc3 ?
3908 // given: (x + iy) * (x' + iy')
3909 // result: (xx'-yy')+i(xy'+yx')
3910 mlir::LLVM::FastmathFlagsAttr fmf = getLLVMFMFAttr(mulc);
3911 mlir::Value a = adaptor.getOperands()[0];
3912 mlir::Value b = adaptor.getOperands()[1];
3913 auto loc = mulc.getLoc();
3914 mlir::Type eleTy = convertType(getComplexEleTy(mulc.getType()));
3915 mlir::Type ty = convertType(mulc.getType());
3916 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, a, 0);
3917 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, a, 1);
3918 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, b, 0);
3919 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, b, 1);
3920 auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1, fmf);
3921 auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1, fmf);
3922 auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1, fmf);
3923 auto ri = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xy, yx, fmf);
3924 auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1, fmf);
3925 auto rr = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, xx, yy, fmf);
3926 auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty);
3927 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ra, rr, 0);
3928 auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, r1, ri, 1);
3929 rewriter.replaceOp(mulc, r0.getResult());
3930 return mlir::success();
3931 }
3932};
3933
3934/// Inlined complex division
3935struct DivcOpConversion : public fir::FIROpConversion<fir::DivcOp> {
3936 using FIROpConversion::FIROpConversion;
3937
3938 llvm::LogicalResult
3939 matchAndRewrite(fir::DivcOp divc, OpAdaptor adaptor,
3940 mlir::ConversionPatternRewriter &rewriter) const override {
3941 // TODO: Can we use a call to __divdc3 instead?
3942 // Just generate inline code for now.
3943 // given: (x + iy) / (x' + iy')
3944 // result: ((xx'+yy')/d) + i((yx'-xy')/d) where d = x'x' + y'y'
3945 mlir::LLVM::FastmathFlagsAttr fmf = getLLVMFMFAttr(divc);
3946 mlir::Value a = adaptor.getOperands()[0];
3947 mlir::Value b = adaptor.getOperands()[1];
3948 auto loc = divc.getLoc();
3949 mlir::Type eleTy = convertType(getComplexEleTy(divc.getType()));
3950 mlir::Type ty = convertType(divc.getType());
3951 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, a, 0);
3952 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, a, 1);
3953 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, b, 0);
3954 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, b, 1);
3955 auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1, fmf);
3956 auto x1x1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x1, x1, fmf);
3957 auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1, fmf);
3958 auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1, fmf);
3959 auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1, fmf);
3960 auto y1y1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y1, y1, fmf);
3961 auto d = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, x1x1, y1y1, fmf);
3962 auto rrn = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xx, yy, fmf);
3963 auto rin = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, yx, xy, fmf);
3964 auto rr = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rrn, d, fmf);
3965 auto ri = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rin, d, fmf);
3966 auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty);
3967 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ra, rr, 0);
3968 auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, r1, ri, 1);
3969 rewriter.replaceOp(divc, r0.getResult());
3970 return mlir::success();
3971 }
3972};
3973
3974/// Inlined complex negation
3975struct NegcOpConversion : public fir::FIROpConversion<fir::NegcOp> {
3976 using FIROpConversion::FIROpConversion;
3977
3978 llvm::LogicalResult
3979 matchAndRewrite(fir::NegcOp neg, OpAdaptor adaptor,
3980 mlir::ConversionPatternRewriter &rewriter) const override {
3981 // given: -(x + iy)
3982 // result: -x - iy
3983 auto eleTy = convertType(getComplexEleTy(neg.getType()));
3984 auto loc = neg.getLoc();
3985 mlir::Value o0 = adaptor.getOperands()[0];
3986 auto rp = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, o0, 0);
3987 auto ip = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, o0, 1);
3988 auto nrp = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, rp);
3989 auto nip = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, ip);
3990 auto r = rewriter.create<mlir::LLVM::InsertValueOp>(loc, o0, nrp, 0);
3991 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(neg, r, nip, 1);
3992 return mlir::success();
3993 }
3994};
3995
3996struct BoxOffsetOpConversion : public fir::FIROpConversion<fir::BoxOffsetOp> {
3997 using FIROpConversion::FIROpConversion;
3998
3999 llvm::LogicalResult
4000 matchAndRewrite(fir::BoxOffsetOp boxOffset, OpAdaptor adaptor,
4001 mlir::ConversionPatternRewriter &rewriter) const override {
4002
4003 mlir::Type pty = ::getLlvmPtrType(boxOffset.getContext());
4004 mlir::Type boxRefType = fir::unwrapRefType(boxOffset.getBoxRef().getType());
4005
4006 assert((mlir::isa<fir::BaseBoxType>(boxRefType) ||
4007 mlir::isa<fir::BoxCharType>(boxRefType)) &&
4008 "boxRef should be a reference to either fir.box or fir.boxchar");
4009
4010 mlir::Type llvmBoxTy;
4011 int fieldId;
4012 if (auto boxType = mlir::dyn_cast_or_null<fir::BaseBoxType>(boxRefType)) {
4013 llvmBoxTy = lowerTy().convertBoxTypeAsStruct(
4014 mlir::cast<fir::BaseBoxType>(boxType));
4015 fieldId = boxOffset.getField() == fir::BoxFieldAttr::derived_type
4016 ? getTypeDescFieldId(boxType)
4017 : kAddrPosInBox;
4018 } else {
4019 auto boxCharType = mlir::cast<fir::BoxCharType>(boxRefType);
4020 llvmBoxTy = lowerTy().convertType(boxCharType);
4021 fieldId = kAddrPosInBox;
4022 }
4023 rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(
4024 boxOffset, pty, llvmBoxTy, adaptor.getBoxRef(),
4025 llvm::ArrayRef<mlir::LLVM::GEPArg>{0, fieldId});
4026 return mlir::success();
4027 }
4028};
4029
4030/// Conversion pattern for operation that must be dead. The information in these
4031/// operations is used by other operation. At this point they should not have
4032/// anymore uses.
4033/// These operations are normally dead after the pre-codegen pass.
4034template <typename FromOp>
4035struct MustBeDeadConversion : public fir::FIROpConversion<FromOp> {
4036 explicit MustBeDeadConversion(const fir::LLVMTypeConverter &lowering,
4037 const fir::FIRToLLVMPassOptions &options)
4038 : fir::FIROpConversion<FromOp>(lowering, options) {}
4039 using OpAdaptor = typename FromOp::Adaptor;
4040
4041 llvm::LogicalResult
4042 matchAndRewrite(FromOp op, OpAdaptor adaptor,
4043 mlir::ConversionPatternRewriter &rewriter) const final {
4044 if (!op->getUses().empty())
4045 return rewriter.notifyMatchFailure(op, "op must be dead");
4046 rewriter.eraseOp(op);
4047 return mlir::success();
4048 }
4049};
4050
4051struct ShapeOpConversion : public MustBeDeadConversion<fir::ShapeOp> {
4052 using MustBeDeadConversion::MustBeDeadConversion;
4053};
4054
4055struct ShapeShiftOpConversion : public MustBeDeadConversion<fir::ShapeShiftOp> {
4056 using MustBeDeadConversion::MustBeDeadConversion;
4057};
4058
4059struct ShiftOpConversion : public MustBeDeadConversion<fir::ShiftOp> {
4060 using MustBeDeadConversion::MustBeDeadConversion;
4061};
4062
4063struct SliceOpConversion : public MustBeDeadConversion<fir::SliceOp> {
4064 using MustBeDeadConversion::MustBeDeadConversion;
4065};
4066
4067} // namespace
4068
4069namespace {
4070class RenameMSVCLibmCallees
4071 : public mlir::OpRewritePattern<mlir::LLVM::CallOp> {
4072public:
4073 using OpRewritePattern::OpRewritePattern;
4074
4075 llvm::LogicalResult
4076 matchAndRewrite(mlir::LLVM::CallOp op,
4077 mlir::PatternRewriter &rewriter) const override {
4078 rewriter.startOpModification(op);
4079 auto callee = op.getCallee();
4080 if (callee)
4081 if (*callee == "hypotf")
4082 op.setCalleeAttr(mlir::SymbolRefAttr::get(op.getContext(), "_hypotf"));
4083
4084 rewriter.finalizeOpModification(op);
4085 return mlir::success();
4086 }
4087};
4088
4089class RenameMSVCLibmFuncs
4090 : public mlir::OpRewritePattern<mlir::LLVM::LLVMFuncOp> {
4091public:
4092 using OpRewritePattern::OpRewritePattern;
4093
4094 llvm::LogicalResult
4095 matchAndRewrite(mlir::LLVM::LLVMFuncOp op,
4096 mlir::PatternRewriter &rewriter) const override {
4097 rewriter.startOpModification(op);
4098 if (op.getSymName() == "hypotf")
4099 op.setSymNameAttr(rewriter.getStringAttr("_hypotf"));
4100 rewriter.finalizeOpModification(op);
4101 return mlir::success();
4102 }
4103};
4104} // namespace
4105
4106namespace {
4107/// Convert FIR dialect to LLVM dialect
4108///
4109/// This pass lowers all FIR dialect operations to LLVM IR dialect. An
4110/// MLIR pass is used to lower residual Std dialect to LLVM IR dialect.
4111class FIRToLLVMLowering
4112 : public fir::impl::FIRToLLVMLoweringBase<FIRToLLVMLowering> {
4113public:
4114 FIRToLLVMLowering() = default;
4115 FIRToLLVMLowering(fir::FIRToLLVMPassOptions options) : options{options} {}
4116 mlir::ModuleOp getModule() { return getOperation(); }
4117
4118 void runOnOperation() override final {
4119 auto mod = getModule();
4120 if (!forcedTargetTriple.empty())
4121 fir::setTargetTriple(mod, forcedTargetTriple);
4122
4123 if (!forcedDataLayout.empty()) {
4124 llvm::DataLayout dl(forcedDataLayout);
4125 fir::support::setMLIRDataLayout(mod, dl);
4126 }
4127
4128 if (!forcedTargetCPU.empty())
4129 fir::setTargetCPU(mod, forcedTargetCPU);
4130
4131 if (!forcedTuneCPU.empty())
4132 fir::setTuneCPU(mod, forcedTuneCPU);
4133
4134 if (!forcedTargetFeatures.empty())
4135 fir::setTargetFeatures(mod, forcedTargetFeatures);
4136
4137 if (typeDescriptorsRenamedForAssembly)
4138 options.typeDescriptorsRenamedForAssembly =
4139 typeDescriptorsRenamedForAssembly;
4140
4141 // Run dynamic pass pipeline for converting Math dialect
4142 // operations into other dialects (llvm, func, etc.).
4143 // Some conversions of Math operations cannot be done
4144 // by just using conversion patterns. This is true for
4145 // conversions that affect the ModuleOp, e.g. create new
4146 // function operations in it. We have to run such conversions
4147 // as passes here.
4148 mlir::OpPassManager mathConvertionPM("builtin.module");
4149
4150 bool isAMDGCN = fir::getTargetTriple(mod).isAMDGCN();
4151 // If compiling for AMD target some math operations must be lowered to AMD
4152 // GPU library calls, the rest can be converted to LLVM intrinsics, which
4153 // is handled in the mathToLLVM conversion. The lowering to libm calls is
4154 // not needed since all math operations are handled this way.
4155 if (isAMDGCN)
4156 mathConvertionPM.addPass(mlir::createConvertMathToROCDL());
4157
4158 // Convert math::FPowI operations to inline implementation
4159 // only if the exponent's width is greater than 32, otherwise,
4160 // it will be lowered to LLVM intrinsic operation by a later conversion.
4161 mlir::ConvertMathToFuncsOptions mathToFuncsOptions{};
4162 mathToFuncsOptions.minWidthOfFPowIExponent = 33;
4163 mathConvertionPM.addPass(
4164 mlir::createConvertMathToFuncs(mathToFuncsOptions));
4165
4166 mlir::ConvertComplexToStandardPassOptions complexToStandardOptions{};
4167 if (options.ComplexRange ==
4168 Fortran::frontend::CodeGenOptions::ComplexRangeKind::CX_Basic) {
4169 complexToStandardOptions.complexRange =
4170 mlir::complex::ComplexRangeFlags::basic;
4171 } else if (options.ComplexRange == Fortran::frontend::CodeGenOptions::
4172 ComplexRangeKind::CX_Improved) {
4173 complexToStandardOptions.complexRange =
4174 mlir::complex::ComplexRangeFlags::improved;
4175 }
4176 mathConvertionPM.addPass(
4177 mlir::createConvertComplexToStandardPass(complexToStandardOptions));
4178
4179 // Convert Math dialect operations into LLVM dialect operations.
4180 // There is no way to prefer MathToLLVM patterns over MathToLibm
4181 // patterns (applied below), so we have to run MathToLLVM conversion here.
4182 mathConvertionPM.addNestedPass<mlir::func::FuncOp>(
4183 mlir::createConvertMathToLLVMPass());
4184 if (mlir::failed(runPipeline(mathConvertionPM, mod)))
4185 return signalPassFailure();
4186
4187 std::optional<mlir::DataLayout> dl =
4188 fir::support::getOrSetMLIRDataLayout(mod, /*allowDefaultLayout=*/true);
4189 if (!dl) {
4190 mlir::emitError(mod.getLoc(),
4191 "module operation must carry a data layout attribute "
4192 "to generate llvm IR from FIR");
4193 signalPassFailure();
4194 return;
4195 }
4196
4197 auto *context = getModule().getContext();
4198 fir::LLVMTypeConverter typeConverter{getModule(),
4199 options.applyTBAA || applyTBAA,
4200 options.forceUnifiedTBAATree, *dl};
4201 mlir::RewritePatternSet pattern(context);
4202 fir::populateFIRToLLVMConversionPatterns(typeConverter, pattern, options);
4203 mlir::populateFuncToLLVMConversionPatterns(typeConverter, pattern);
4204 mlir::populateOpenMPToLLVMConversionPatterns(typeConverter, pattern);
4205 mlir::arith::populateArithToLLVMConversionPatterns(typeConverter, pattern);
4206 mlir::cf::populateControlFlowToLLVMConversionPatterns(typeConverter,
4207 pattern);
4208 mlir::cf::populateAssertToLLVMConversionPattern(typeConverter, pattern);
4209 // Math operations that have not been converted yet must be converted
4210 // to Libm.
4211 if (!isAMDGCN)
4212 mlir::populateMathToLibmConversionPatterns(pattern);
4213 mlir::populateComplexToLLVMConversionPatterns(typeConverter, pattern);
4214 mlir::populateVectorToLLVMConversionPatterns(typeConverter, pattern);
4215
4216 // Flang specific overloads for OpenMP operations, to allow for special
4217 // handling of things like Box types.
4218 fir::populateOpenMPFIRToLLVMConversionPatterns(typeConverter, pattern);
4219
4220 mlir::ConversionTarget target{*context};
4221 target.addLegalDialect<mlir::LLVM::LLVMDialect>();
4222 // The OpenMP dialect is legal for Operations without regions, for those
4223 // which contains regions it is legal if the region contains only the
4224 // LLVM dialect. Add OpenMP dialect as a legal dialect for conversion and
4225 // legalize conversion of OpenMP operations without regions.
4226 mlir::configureOpenMPToLLVMConversionLegality(target, typeConverter);
4227 target.addLegalDialect<mlir::omp::OpenMPDialect>();
4228 target.addLegalDialect<mlir::acc::OpenACCDialect>();
4229 target.addLegalDialect<mlir::gpu::GPUDialect>();
4230
4231 // required NOPs for applying a full conversion
4232 target.addLegalOp<mlir::ModuleOp>();
4233
4234 // If we're on Windows, we might need to rename some libm calls.
4235 bool isMSVC = fir::getTargetTriple(mod).isOSMSVCRT();
4236 if (isMSVC) {
4237 pattern.insert<RenameMSVCLibmCallees, RenameMSVCLibmFuncs>(context);
4238
4239 target.addDynamicallyLegalOp<mlir::LLVM::CallOp>(
4240 [](mlir::LLVM::CallOp op) {
4241 auto callee = op.getCallee();
4242 if (!callee)
4243 return true;
4244 return *callee != "hypotf";
4245 });
4246 target.addDynamicallyLegalOp<mlir::LLVM::LLVMFuncOp>(
4247 [](mlir::LLVM::LLVMFuncOp op) {
4248 return op.getSymName() != "hypotf";
4249 });
4250 }
4251
4252 // apply the patterns
4253 if (mlir::failed(mlir::applyFullConversion(getModule(), target,
4254 std::move(pattern)))) {
4255 signalPassFailure();
4256 }
4257
4258 // Run pass to add comdats to functions that have weak linkage on relevant
4259 // platforms
4260 if (fir::getTargetTriple(mod).supportsCOMDAT()) {
4261 mlir::OpPassManager comdatPM("builtin.module");
4262 comdatPM.addPass(mlir::LLVM::createLLVMAddComdats());
4263 if (mlir::failed(runPipeline(comdatPM, mod)))
4264 return signalPassFailure();
4265 }
4266 }
4267
4268private:
4269 fir::FIRToLLVMPassOptions options;
4270};
4271
4272/// Lower from LLVM IR dialect to proper LLVM-IR and dump the module
4273struct LLVMIRLoweringPass
4274 : public mlir::PassWrapper<LLVMIRLoweringPass,
4275 mlir::OperationPass<mlir::ModuleOp>> {
4276 MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(LLVMIRLoweringPass)
4277
4278 LLVMIRLoweringPass(llvm::raw_ostream &output, fir::LLVMIRLoweringPrinter p)
4279 : output{output}, printer{p} {}
4280
4281 mlir::ModuleOp getModule() { return getOperation(); }
4282
4283 void runOnOperation() override final {
4284 auto *ctx = getModule().getContext();
4285 auto optName = getModule().getName();
4286 llvm::LLVMContext llvmCtx;
4287 if (auto llvmModule = mlir::translateModuleToLLVMIR(
4288 getModule(), llvmCtx, optName ? *optName : "FIRModule")) {
4289 printer(*llvmModule, output);
4290 return;
4291 }
4292
4293 mlir::emitError(mlir::UnknownLoc::get(ctx), "could not emit LLVM-IR\n");
4294 signalPassFailure();
4295 }
4296
4297private:
4298 llvm::raw_ostream &output;
4299 fir::LLVMIRLoweringPrinter printer;
4300};
4301
4302} // namespace
4303
4304std::unique_ptr<mlir::Pass> fir::createFIRToLLVMPass() {
4305 return std::make_unique<FIRToLLVMLowering>();
4306}
4307
4308std::unique_ptr<mlir::Pass>
4309fir::createFIRToLLVMPass(fir::FIRToLLVMPassOptions options) {
4310 return std::make_unique<FIRToLLVMLowering>(options);
4311}
4312
4313std::unique_ptr<mlir::Pass>
4314fir::createLLVMDialectToLLVMPass(llvm::raw_ostream &output,
4315 fir::LLVMIRLoweringPrinter printer) {
4316 return std::make_unique<LLVMIRLoweringPass>(output, printer);
4317}
4318
4319void fir::populateFIRToLLVMConversionPatterns(
4320 const fir::LLVMTypeConverter &converter, mlir::RewritePatternSet &patterns,
4321 fir::FIRToLLVMPassOptions &options) {
4322 patterns.insert<
4323 AbsentOpConversion, AddcOpConversion, AddrOfOpConversion,
4324 AllocaOpConversion, AllocMemOpConversion, BoxAddrOpConversion,
4325 BoxCharLenOpConversion, BoxDimsOpConversion, BoxEleSizeOpConversion,
4326 BoxIsAllocOpConversion, BoxIsArrayOpConversion, BoxIsPtrOpConversion,
4327 BoxOffsetOpConversion, BoxProcHostOpConversion, BoxRankOpConversion,
4328 BoxTypeCodeOpConversion, BoxTypeDescOpConversion, CallOpConversion,
4329 CmpcOpConversion, VolatileCastOpConversion, ConvertOpConversion,
4330 CoordinateOpConversion, CopyOpConversion, DTEntryOpConversion,
4331 DeclareOpConversion,
4332 DoConcurrentSpecifierOpConversion<fir::LocalitySpecifierOp>,
4333 DoConcurrentSpecifierOpConversion<fir::DeclareReductionOp>,
4334 DivcOpConversion, EmboxOpConversion, EmboxCharOpConversion,
4335 EmboxProcOpConversion, ExtractValueOpConversion, FieldIndexOpConversion,
4336 FirEndOpConversion, FreeMemOpConversion, GlobalLenOpConversion,
4337 GlobalOpConversion, InsertOnRangeOpConversion, IsPresentOpConversion,
4338 LenParamIndexOpConversion, LoadOpConversion, MulcOpConversion,
4339 NegcOpConversion, NoReassocOpConversion, SelectCaseOpConversion,
4340 SelectOpConversion, SelectRankOpConversion, SelectTypeOpConversion,
4341 ShapeOpConversion, ShapeShiftOpConversion, ShiftOpConversion,
4342 SliceOpConversion, StoreOpConversion, StringLitOpConversion,
4343 SubcOpConversion, TypeDescOpConversion, TypeInfoOpConversion,
4344 UnboxCharOpConversion, UnboxProcOpConversion, UndefOpConversion,
4345 UnreachableOpConversion, XArrayCoorOpConversion, XEmboxOpConversion,
4346 XReboxOpConversion, ZeroOpConversion>(converter, options);
4347
4348 // Patterns that are populated without a type converter do not trigger
4349 // target materializations for the operands of the root op.
4350 patterns.insert<HasValueOpConversion, InsertValueOpConversion>(
4351 patterns.getContext());
4352}
4353

source code of flang/lib/Optimizer/CodeGen/CodeGen.cpp