1//===- MapInfoFinalization.cpp -----------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9//===----------------------------------------------------------------------===//
10/// \file
11/// An OpenMP dialect related pass for FIR/HLFIR which performs some
12/// pre-processing of MapInfoOp's after the module has been lowered to
13/// finalize them.
14///
15/// For example, it expands MapInfoOp's containing descriptor related
16/// types (fir::BoxType's) into multiple MapInfoOp's containing the parent
17/// descriptor and pointer member components for individual mapping,
18/// treating the descriptor type as a record type for later lowering in the
19/// OpenMP dialect.
20///
21/// The pass also adds MapInfoOp's that are members of a parent object but are
22/// not directly used in the body of a target region to its BlockArgument list
23/// to maintain consistency across all MapInfoOp's tied to a region directly or
24/// indirectly via a parent object.
25//===----------------------------------------------------------------------===//
26
27#include "flang/Optimizer/Builder/DirectivesCommon.h"
28#include "flang/Optimizer/Builder/FIRBuilder.h"
29#include "flang/Optimizer/Builder/HLFIRTools.h"
30#include "flang/Optimizer/Dialect/FIRType.h"
31#include "flang/Optimizer/Dialect/Support/KindMapping.h"
32#include "flang/Optimizer/HLFIR/HLFIROps.h"
33#include "flang/Optimizer/OpenMP/Passes.h"
34#include "mlir/Analysis/SliceAnalysis.h"
35#include "mlir/Dialect/Func/IR/FuncOps.h"
36#include "mlir/Dialect/OpenMP/OpenMPDialect.h"
37#include "mlir/IR/BuiltinDialect.h"
38#include "mlir/IR/BuiltinOps.h"
39#include "mlir/IR/Operation.h"
40#include "mlir/IR/SymbolTable.h"
41#include "mlir/Pass/Pass.h"
42#include "mlir/Support/LLVM.h"
43#include "llvm/ADT/SmallPtrSet.h"
44#include "llvm/Frontend/OpenMP/OMPConstants.h"
45#include <algorithm>
46#include <cstddef>
47#include <iterator>
48#include <numeric>
49
50#define DEBUG_TYPE "omp-map-info-finalization"
51
52namespace flangomp {
53#define GEN_PASS_DEF_MAPINFOFINALIZATIONPASS
54#include "flang/Optimizer/OpenMP/Passes.h.inc"
55} // namespace flangomp
56
57namespace {
58class MapInfoFinalizationPass
59 : public flangomp::impl::MapInfoFinalizationPassBase<
60 MapInfoFinalizationPass> {
61 /// Helper class tracking a members parent and its
62 /// placement in the parents member list
63 struct ParentAndPlacement {
64 mlir::omp::MapInfoOp parent;
65 size_t index;
66 };
67
68 /// Tracks any intermediate function/subroutine local allocations we
69 /// generate for the descriptors of box type dummy arguments, so that
70 /// we can retrieve it for subsequent reuses within the functions
71 /// scope.
72 ///
73 /// descriptor defining op
74 /// | corresponding local alloca
75 /// | |
76 std::map<mlir::Operation *, mlir::Value> localBoxAllocas;
77
78 /// getMemberUserList gathers all users of a particular MapInfoOp that are
79 /// other MapInfoOp's and places them into the mapMemberUsers list, which
80 /// records the map that the current argument MapInfoOp "op" is part of
81 /// alongside the placement of "op" in the recorded users members list. The
82 /// intent of the generated list is to find all MapInfoOp's that may be
83 /// considered parents of the passed in "op" and in which it shows up in the
84 /// member list, alongside collecting the placement information of "op" in its
85 /// parents member list.
86 void
87 getMemberUserList(mlir::omp::MapInfoOp op,
88 llvm::SmallVectorImpl<ParentAndPlacement> &mapMemberUsers) {
89 for (auto *user : op->getUsers())
90 if (auto map = mlir::dyn_cast_if_present<mlir::omp::MapInfoOp>(user))
91 for (auto [i, mapMember] : llvm::enumerate(map.getMembers()))
92 if (mapMember.getDefiningOp() == op)
93 mapMemberUsers.push_back({map, i});
94 }
95
96 void getAsIntegers(llvm::ArrayRef<mlir::Attribute> values,
97 llvm::SmallVectorImpl<int64_t> &ints) {
98 ints.reserve(values.size());
99 llvm::transform(values, std::back_inserter(ints),
100 [](mlir::Attribute value) {
101 return mlir::cast<mlir::IntegerAttr>(value).getInt();
102 });
103 }
104
105 /// This function will expand a MapInfoOp's member indices back into a vector
106 /// so that they can be trivially modified as unfortunately the attribute type
107 /// that's used does not have modifiable fields at the moment (generally
108 /// awkward to work with)
109 void getMemberIndicesAsVectors(
110 mlir::omp::MapInfoOp mapInfo,
111 llvm::SmallVectorImpl<llvm::SmallVector<int64_t>> &indices) {
112 indices.reserve(mapInfo.getMembersIndexAttr().getValue().size());
113 llvm::transform(mapInfo.getMembersIndexAttr().getValue(),
114 std::back_inserter(indices), [this](mlir::Attribute value) {
115 auto memberIndex = mlir::cast<mlir::ArrayAttr>(value);
116 llvm::SmallVector<int64_t> indexes;
117 getAsIntegers(memberIndex.getValue(), indexes);
118 return indexes;
119 });
120 }
121
122 /// When provided a MapInfoOp containing a descriptor type that
123 /// we must expand into multiple maps this function will extract
124 /// the value from it and return it, in certain cases we must
125 /// generate a new allocation to store into so that the
126 /// fir::BoxOffsetOp we utilise to access the descriptor datas
127 /// base address can be utilised.
128 mlir::Value getDescriptorFromBoxMap(mlir::omp::MapInfoOp boxMap,
129 fir::FirOpBuilder &builder) {
130 mlir::Value descriptor = boxMap.getVarPtr();
131 if (!fir::isTypeWithDescriptor(boxMap.getVarType()))
132 if (auto addrOp = mlir::dyn_cast_if_present<fir::BoxAddrOp>(
133 boxMap.getVarPtr().getDefiningOp()))
134 descriptor = addrOp.getVal();
135
136 if (!mlir::isa<fir::BaseBoxType>(descriptor.getType()) &&
137 !fir::factory::isOptionalArgument(descriptor.getDefiningOp()))
138 return descriptor;
139
140 mlir::Value &slot = localBoxAllocas[descriptor.getDefiningOp()];
141 if (slot) {
142 return slot;
143 }
144
145 // The fir::BoxOffsetOp only works with !fir.ref<!fir.box<...>> types, as
146 // allowing it to access non-reference box operations can cause some
147 // problematic SSA IR. However, in the case of assumed shape's the type
148 // is not a !fir.ref, in these cases to retrieve the appropriate
149 // !fir.ref<!fir.box<...>> to access the data we need to map we must
150 // perform an alloca and then store to it and retrieve the data from the new
151 // alloca.
152 mlir::OpBuilder::InsertPoint insPt = builder.saveInsertionPoint();
153 mlir::Block *allocaBlock = builder.getAllocaBlock();
154 mlir::Location loc = boxMap->getLoc();
155 assert(allocaBlock && "No alloca block found for this top level op");
156 builder.setInsertionPointToStart(allocaBlock);
157
158 mlir::Type allocaType = descriptor.getType();
159 if (fir::isBoxAddress(allocaType))
160 allocaType = fir::unwrapRefType(allocaType);
161 auto alloca = builder.create<fir::AllocaOp>(loc, allocaType);
162 builder.restoreInsertionPoint(insPt);
163 // We should only emit a store if the passed in data is present, it is
164 // possible a user passes in no argument to an optional parameter, in which
165 // case we cannot store or we'll segfault on the emitted memcpy.
166 auto isPresent =
167 builder.create<fir::IsPresentOp>(loc, builder.getI1Type(), descriptor);
168 builder.genIfOp(loc, {}, isPresent, false)
169 .genThen([&]() {
170 descriptor = builder.loadIfRef(loc, descriptor);
171 builder.create<fir::StoreOp>(loc, descriptor, alloca);
172 })
173 .end();
174 return slot = alloca;
175 }
176
177 /// Function that generates a FIR operation accessing the descriptor's
178 /// base address (BoxOffsetOp) and a MapInfoOp for it. The most
179 /// important thing to note is that we normally move the bounds from
180 /// the descriptor map onto the base address map.
181 mlir::omp::MapInfoOp genBaseAddrMap(mlir::Value descriptor,
182 mlir::OperandRange bounds,
183 int64_t mapType,
184 fir::FirOpBuilder &builder) {
185 mlir::Location loc = descriptor.getLoc();
186 mlir::Value baseAddrAddr = builder.create<fir::BoxOffsetOp>(
187 loc, descriptor, fir::BoxFieldAttr::base_addr);
188
189 mlir::Type underlyingVarType =
190 llvm::cast<mlir::omp::PointerLikeType>(
191 fir::unwrapRefType(baseAddrAddr.getType()))
192 .getElementType();
193 if (auto seqType = llvm::dyn_cast<fir::SequenceType>(underlyingVarType))
194 if (seqType.hasDynamicExtents())
195 underlyingVarType = seqType.getEleTy();
196
197 // Member of the descriptor pointing at the allocated data
198 return builder.create<mlir::omp::MapInfoOp>(
199 loc, baseAddrAddr.getType(), descriptor,
200 mlir::TypeAttr::get(underlyingVarType),
201 builder.getIntegerAttr(builder.getIntegerType(64, false), mapType),
202 builder.getAttr<mlir::omp::VariableCaptureKindAttr>(
203 mlir::omp::VariableCaptureKind::ByRef),
204 baseAddrAddr, /*members=*/mlir::SmallVector<mlir::Value>{},
205 /*membersIndex=*/mlir::ArrayAttr{}, bounds,
206 /*mapperId*/ mlir::FlatSymbolRefAttr(),
207 /*name=*/builder.getStringAttr(""),
208 /*partial_map=*/builder.getBoolAttr(false));
209 }
210
211 /// This function adjusts the member indices vector to include a new
212 /// base address member. We take the position of the descriptor in
213 /// the member indices list, which is the index data that the base
214 /// addresses index will be based off of, as the base address is
215 /// a member of the descriptor. We must also alter other members
216 /// that are members of this descriptor to account for the addition
217 /// of the base address index.
218 void adjustMemberIndices(
219 llvm::SmallVectorImpl<llvm::SmallVector<int64_t>> &memberIndices,
220 size_t memberIndex) {
221 llvm::SmallVector<int64_t> baseAddrIndex = memberIndices[memberIndex];
222
223 // If we find another member that is "derived/a member of" the descriptor
224 // that is not the descriptor itself, we must insert a 0 for the new base
225 // address we have just added for the descriptor into the list at the
226 // appropriate position to maintain correctness of the positional/index data
227 // for that member.
228 for (llvm::SmallVector<int64_t> &member : memberIndices)
229 if (member.size() > baseAddrIndex.size() &&
230 std::equal(baseAddrIndex.begin(), baseAddrIndex.end(),
231 member.begin()))
232 member.insert(std::next(member.begin(), baseAddrIndex.size()), 0);
233
234 // Add the base address index to the main base address member data
235 baseAddrIndex.push_back(0);
236
237 // Insert our newly created baseAddrIndex into the larger list of indices at
238 // the correct location.
239 memberIndices.insert(std::next(memberIndices.begin(), memberIndex + 1),
240 baseAddrIndex);
241 }
242
243 /// Adjusts the descriptor's map type. The main alteration that is done
244 /// currently is transforming the map type to `OMP_MAP_TO` where possible.
245 /// This is because we will always need to map the descriptor to device
246 /// (or at the very least it seems to be the case currently with the
247 /// current lowered kernel IR), as without the appropriate descriptor
248 /// information on the device there is a risk of the kernel IR
249 /// requesting for various data that will not have been copied to
250 /// perform things like indexing. This can cause segfaults and
251 /// memory access errors. However, we do not need this data mapped
252 /// back to the host from the device, as per the OpenMP spec we cannot alter
253 /// the data via resizing or deletion on the device. Discarding any
254 /// descriptor alterations via no map back is reasonable (and required
255 /// for certain segments of descriptor data like the type descriptor that are
256 /// global constants). This alteration is only inapplicable to `target exit`
257 /// and `target update` currently, and that's due to `target exit` not
258 /// allowing `to` mappings, and `target update` not allowing both `to` and
259 /// `from` simultaneously. We currently try to maintain the `implicit` flag
260 /// where necessary, although it does not seem strictly required.
261 unsigned long getDescriptorMapType(unsigned long mapTypeFlag,
262 mlir::Operation *target) {
263 using mapFlags = llvm::omp::OpenMPOffloadMappingFlags;
264 if (llvm::isa_and_nonnull<mlir::omp::TargetExitDataOp,
265 mlir::omp::TargetUpdateOp>(target))
266 return mapTypeFlag;
267
268 mapFlags flags = mapFlags::OMP_MAP_TO |
269 (mapFlags(mapTypeFlag) &
270 (mapFlags::OMP_MAP_IMPLICIT | mapFlags::OMP_MAP_CLOSE |
271 mapFlags::OMP_MAP_ALWAYS));
272 return llvm::to_underlying(flags);
273 }
274
275 /// Check if the mapOp is present in the HasDeviceAddr clause on
276 /// the userOp. Only applies to TargetOp.
277 bool isHasDeviceAddr(mlir::omp::MapInfoOp mapOp, mlir::Operation *userOp) {
278 assert(userOp && "Expecting non-null argument");
279 if (auto targetOp = llvm::dyn_cast<mlir::omp::TargetOp>(userOp)) {
280 for (mlir::Value hda : targetOp.getHasDeviceAddrVars()) {
281 if (hda.getDefiningOp() == mapOp)
282 return true;
283 }
284 }
285 return false;
286 }
287
288 mlir::omp::MapInfoOp genBoxcharMemberMap(mlir::omp::MapInfoOp op,
289 fir::FirOpBuilder &builder) {
290 if (!op.getMembers().empty())
291 return op;
292 mlir::Location loc = op.getVarPtr().getLoc();
293 mlir::Value boxChar = op.getVarPtr();
294
295 if (mlir::isa<fir::ReferenceType>(op.getVarPtr().getType()))
296 boxChar = builder.create<fir::LoadOp>(loc, op.getVarPtr());
297
298 fir::BoxCharType boxCharType =
299 mlir::dyn_cast<fir::BoxCharType>(boxChar.getType());
300 mlir::Value boxAddr = builder.create<fir::BoxOffsetOp>(
301 loc, op.getVarPtr(), fir::BoxFieldAttr::base_addr);
302
303 uint64_t mapTypeToImplicit = static_cast<
304 std::underlying_type_t<llvm::omp::OpenMPOffloadMappingFlags>>(
305 llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_TO |
306 llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_IMPLICIT);
307
308 mlir::ArrayAttr newMembersAttr;
309 llvm::SmallVector<llvm::SmallVector<int64_t>> memberIdx = {{0}};
310 newMembersAttr = builder.create2DI64ArrayAttr(memberIdx);
311
312 mlir::Value varPtr = op.getVarPtr();
313 mlir::omp::MapInfoOp memberMapInfoOp = builder.create<mlir::omp::MapInfoOp>(
314 op.getLoc(), varPtr.getType(), varPtr,
315 mlir::TypeAttr::get(boxCharType.getEleTy()),
316 builder.getIntegerAttr(builder.getIntegerType(64, /*isSigned=*/false),
317 mapTypeToImplicit),
318 builder.getAttr<mlir::omp::VariableCaptureKindAttr>(
319 mlir::omp::VariableCaptureKind::ByRef),
320 /*varPtrPtr=*/boxAddr,
321 /*members=*/llvm::SmallVector<mlir::Value>{},
322 /*member_index=*/mlir::ArrayAttr{},
323 /*bounds=*/op.getBounds(),
324 /*mapperId=*/mlir::FlatSymbolRefAttr(), /*name=*/op.getNameAttr(),
325 builder.getBoolAttr(false));
326
327 mlir::omp::MapInfoOp newMapInfoOp = builder.create<mlir::omp::MapInfoOp>(
328 op.getLoc(), op.getResult().getType(), varPtr,
329 mlir::TypeAttr::get(
330 llvm::cast<mlir::omp::PointerLikeType>(varPtr.getType())
331 .getElementType()),
332 op.getMapTypeAttr(), op.getMapCaptureTypeAttr(),
333 /*varPtrPtr=*/mlir::Value{},
334 /*members=*/llvm::SmallVector<mlir::Value>{memberMapInfoOp},
335 /*member_index=*/newMembersAttr,
336 /*bounds=*/llvm::SmallVector<mlir::Value>{},
337 /*mapperId=*/mlir::FlatSymbolRefAttr(), op.getNameAttr(),
338 /*partial_map=*/builder.getBoolAttr(false));
339 op.replaceAllUsesWith(newMapInfoOp.getResult());
340 op->erase();
341 return newMapInfoOp;
342 }
343
344 mlir::omp::MapInfoOp genDescriptorMemberMaps(mlir::omp::MapInfoOp op,
345 fir::FirOpBuilder &builder,
346 mlir::Operation *target) {
347 llvm::SmallVector<ParentAndPlacement> mapMemberUsers;
348 getMemberUserList(op, mapMemberUsers);
349
350 // TODO: map the addendum segment of the descriptor, similarly to the
351 // base address/data pointer member.
352 mlir::Value descriptor = getDescriptorFromBoxMap(op, builder);
353
354 mlir::ArrayAttr newMembersAttr;
355 mlir::SmallVector<mlir::Value> newMembers;
356 llvm::SmallVector<llvm::SmallVector<int64_t>> memberIndices;
357 bool IsHasDeviceAddr = isHasDeviceAddr(op, target);
358
359 if (!mapMemberUsers.empty() || !op.getMembers().empty())
360 getMemberIndicesAsVectors(
361 !mapMemberUsers.empty() ? mapMemberUsers[0].parent : op,
362 memberIndices);
363
364 // If the operation that we are expanding with a descriptor has a user
365 // (parent), then we have to expand the parent's member indices to reflect
366 // the adjusted member indices for the base address insertion. However, if
367 // it does not then we are expanding a MapInfoOp without any pre-existing
368 // member information to now have one new member for the base address, or
369 // we are expanding a parent that is a descriptor and we have to adjust
370 // all of its members to reflect the insertion of the base address.
371 //
372 // If we're expanding a top-level descriptor for a map operation that
373 // resulted from "has_device_addr" clause, then we want the base pointer
374 // from the descriptor to be used verbatim, i.e. without additional
375 // remapping. To avoid this remapping, simply don't generate any map
376 // information for the descriptor members.
377 if (!mapMemberUsers.empty()) {
378 // Currently, there should only be one user per map when this pass
379 // is executed. Either a parent map, holding the current map in its
380 // member list, or a target operation that holds a map clause. This
381 // may change in the future if we aim to refactor the MLIR for map
382 // clauses to allow sharing of duplicate maps across target
383 // operations.
384 assert(mapMemberUsers.size() == 1 &&
385 "OMPMapInfoFinalization currently only supports single users of a "
386 "MapInfoOp");
387 auto baseAddr =
388 genBaseAddrMap(descriptor, op.getBounds(), op.getMapType(), builder);
389 ParentAndPlacement mapUser = mapMemberUsers[0];
390 adjustMemberIndices(memberIndices, mapUser.index);
391 llvm::SmallVector<mlir::Value> newMemberOps;
392 for (auto v : mapUser.parent.getMembers()) {
393 newMemberOps.push_back(v);
394 if (v == op)
395 newMemberOps.push_back(baseAddr);
396 }
397 mapUser.parent.getMembersMutable().assign(newMemberOps);
398 mapUser.parent.setMembersIndexAttr(
399 builder.create2DI64ArrayAttr(memberIndices));
400 } else if (!IsHasDeviceAddr) {
401 auto baseAddr =
402 genBaseAddrMap(descriptor, op.getBounds(), op.getMapType(), builder);
403 newMembers.push_back(baseAddr);
404 if (!op.getMembers().empty()) {
405 for (auto &indices : memberIndices)
406 indices.insert(indices.begin(), 0);
407 memberIndices.insert(memberIndices.begin(), {0});
408 newMembersAttr = builder.create2DI64ArrayAttr(memberIndices);
409 newMembers.append(op.getMembers().begin(), op.getMembers().end());
410 } else {
411 llvm::SmallVector<llvm::SmallVector<int64_t>> memberIdx = {{0}};
412 newMembersAttr = builder.create2DI64ArrayAttr(memberIdx);
413 }
414 }
415
416 // Descriptors for objects listed on the `has_device_addr` will always
417 // be copied. This is because the descriptor can be rematerialized by the
418 // compiler, and so the address of the descriptor for a given object at
419 // one place in the code may differ from that address in another place.
420 // The contents of the descriptor (the base address in particular) will
421 // remain unchanged though.
422 uint64_t mapType = op.getMapType();
423 if (IsHasDeviceAddr) {
424 mapType |= llvm::to_underlying(
425 llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_ALWAYS);
426 }
427
428 mlir::omp::MapInfoOp newDescParentMapOp =
429 builder.create<mlir::omp::MapInfoOp>(
430 op->getLoc(), op.getResult().getType(), descriptor,
431 mlir::TypeAttr::get(fir::unwrapRefType(descriptor.getType())),
432 builder.getIntegerAttr(builder.getIntegerType(64, false),
433 getDescriptorMapType(mapType, target)),
434 op.getMapCaptureTypeAttr(), /*varPtrPtr=*/mlir::Value{}, newMembers,
435 newMembersAttr, /*bounds=*/mlir::SmallVector<mlir::Value>{},
436 /*mapperId*/ mlir::FlatSymbolRefAttr(), op.getNameAttr(),
437 /*partial_map=*/builder.getBoolAttr(false));
438 op.replaceAllUsesWith(newDescParentMapOp.getResult());
439 op->erase();
440 return newDescParentMapOp;
441 }
442
443 // We add all mapped record members not directly used in the target region
444 // to the block arguments in front of their parent and we place them into
445 // the map operands list for consistency.
446 //
447 // These indirect uses (via accesses to their parent) will still be
448 // mapped individually in most cases, and a parent mapping doesn't
449 // guarantee the parent will be mapped in its totality, partial
450 // mapping is common.
451 //
452 // For example:
453 // map(tofrom: x%y)
454 //
455 // Will generate a mapping for "x" (the parent) and "y" (the member).
456 // The parent "x" will not be mapped, but the member "y" will.
457 // However, we must have the parent as a BlockArg and MapOperand
458 // in these cases, to maintain the correct uses within the region and
459 // to help tracking that the member is part of a larger object.
460 //
461 // In the case of:
462 // map(tofrom: x%y, x%z)
463 //
464 // The parent member becomes more critical, as we perform a partial
465 // structure mapping where we link the mapping of the members y
466 // and z together via the parent x. We do this at a kernel argument
467 // level in LLVM IR and not just MLIR, which is important to maintain
468 // similarity to Clang and for the runtime to do the correct thing.
469 // However, we still do not map the structure in its totality but
470 // rather we generate an un-sized "binding" map entry for it.
471 //
472 // In the case of:
473 // map(tofrom: x, x%y, x%z)
474 //
475 // We do actually map the entirety of "x", so the explicit mapping of
476 // x%y, x%z becomes unnecessary. It is redundant to write this from a
477 // Fortran OpenMP perspective (although it is legal), as even if the
478 // members were allocatables or pointers, we are mandated by the
479 // specification to map these (and any recursive components) in their
480 // entirety, which is different to the C++ equivalent, which requires
481 // explicit mapping of these segments.
482 void addImplicitMembersToTarget(mlir::omp::MapInfoOp op,
483 fir::FirOpBuilder &builder,
484 mlir::Operation *target) {
485 auto mapClauseOwner =
486 llvm::dyn_cast_if_present<mlir::omp::MapClauseOwningOpInterface>(
487 target);
488 // TargetDataOp is technically a MapClauseOwningOpInterface, so we
489 // do not need to explicitly check for the extra cases here for use_device
490 // addr/ptr
491 if (!mapClauseOwner)
492 return;
493
494 auto addOperands = [&](mlir::MutableOperandRange &mutableOpRange,
495 mlir::Operation *directiveOp,
496 unsigned blockArgInsertIndex = 0) {
497 if (!llvm::is_contained(mutableOpRange.getAsOperandRange(),
498 op.getResult()))
499 return;
500
501 // There doesn't appear to be a simple way to convert MutableOperandRange
502 // to a vector currently, so we instead use a for_each to populate our
503 // vector.
504 llvm::SmallVector<mlir::Value> newMapOps;
505 newMapOps.reserve(mutableOpRange.size());
506 llvm::for_each(
507 mutableOpRange.getAsOperandRange(),
508 [&newMapOps](mlir::Value oper) { newMapOps.push_back(oper); });
509
510 for (auto mapMember : op.getMembers()) {
511 if (llvm::is_contained(mutableOpRange.getAsOperandRange(), mapMember))
512 continue;
513 newMapOps.push_back(mapMember);
514 if (directiveOp) {
515 directiveOp->getRegion(0).insertArgument(
516 blockArgInsertIndex, mapMember.getType(), mapMember.getLoc());
517 blockArgInsertIndex++;
518 }
519 }
520
521 mutableOpRange.assign(newMapOps);
522 };
523
524 auto argIface =
525 llvm::dyn_cast<mlir::omp::BlockArgOpenMPOpInterface>(target);
526
527 if (auto mapClauseOwner =
528 llvm::dyn_cast<mlir::omp::MapClauseOwningOpInterface>(target)) {
529 mlir::MutableOperandRange mapMutableOpRange =
530 mapClauseOwner.getMapVarsMutable();
531 unsigned blockArgInsertIndex =
532 argIface
533 ? argIface.getMapBlockArgsStart() + argIface.numMapBlockArgs()
534 : 0;
535 addOperands(mapMutableOpRange,
536 llvm::dyn_cast_if_present<mlir::omp::TargetOp>(
537 argIface.getOperation()),
538 blockArgInsertIndex);
539 }
540
541 if (auto targetDataOp = llvm::dyn_cast<mlir::omp::TargetDataOp>(target)) {
542 mlir::MutableOperandRange useDevAddrMutableOpRange =
543 targetDataOp.getUseDeviceAddrVarsMutable();
544 addOperands(useDevAddrMutableOpRange, target,
545 argIface.getUseDeviceAddrBlockArgsStart() +
546 argIface.numUseDeviceAddrBlockArgs());
547
548 mlir::MutableOperandRange useDevPtrMutableOpRange =
549 targetDataOp.getUseDevicePtrVarsMutable();
550 addOperands(useDevPtrMutableOpRange, target,
551 argIface.getUseDevicePtrBlockArgsStart() +
552 argIface.numUseDevicePtrBlockArgs());
553 } else if (auto targetOp = llvm::dyn_cast<mlir::omp::TargetOp>(target)) {
554 mlir::MutableOperandRange hasDevAddrMutableOpRange =
555 targetOp.getHasDeviceAddrVarsMutable();
556 addOperands(hasDevAddrMutableOpRange, target,
557 argIface.getHasDeviceAddrBlockArgsStart() +
558 argIface.numHasDeviceAddrBlockArgs());
559 }
560 }
561
562 // We retrieve the first user that is a Target operation, of which
563 // there should only be one currently. Every MapInfoOp can be tied to
564 // at most one Target operation and at the minimum no operations.
565 // This may change in the future with IR cleanups/modifications,
566 // in which case this pass will need updating to support cases
567 // where a map can have more than one user and more than one of
568 // those users can be a Target operation. For now, we simply
569 // return the first target operation encountered, which may
570 // be on the parent MapInfoOp in the case of a member mapping.
571 // In that case, we traverse the MapInfoOp chain until we
572 // find the first TargetOp user.
573 mlir::Operation *getFirstTargetUser(mlir::omp::MapInfoOp mapOp) {
574 for (auto *user : mapOp->getUsers()) {
575 if (llvm::isa<mlir::omp::TargetOp, mlir::omp::TargetDataOp,
576 mlir::omp::TargetUpdateOp, mlir::omp::TargetExitDataOp,
577 mlir::omp::TargetEnterDataOp,
578 mlir::omp::DeclareMapperInfoOp>(user))
579 return user;
580
581 if (auto mapUser = llvm::dyn_cast<mlir::omp::MapInfoOp>(user))
582 return getFirstTargetUser(mapUser);
583 }
584
585 return nullptr;
586 }
587
588 // This pass executes on omp::MapInfoOp's containing descriptor based types
589 // (allocatables, pointers, assumed shape etc.) and expanding them into
590 // multiple omp::MapInfoOp's for each pointer member contained within the
591 // descriptor.
592 //
593 // From the perspective of the MLIR pass manager this runs on the top level
594 // operation (usually function) containing the MapInfoOp because this pass
595 // will mutate siblings of MapInfoOp.
596 void runOnOperation() override {
597 mlir::ModuleOp module = getOperation();
598 if (!module)
599 module = getOperation()->getParentOfType<mlir::ModuleOp>();
600 fir::KindMapping kindMap = fir::getKindMapping(module);
601 fir::FirOpBuilder builder{module, std::move(kindMap)};
602
603 // We wish to maintain some function level scope (currently
604 // just local function scope variables used to load and store box
605 // variables into so we can access their base address, an
606 // quirk of box_offset requires us to have an in memory box, but Fortran
607 // in certain cases does not provide this) whilst not subjecting
608 // ourselves to the possibility of race conditions while this pass
609 // undergoes frequent re-iteration for the near future. So we loop
610 // over function in the module and then map.info inside of those.
611 getOperation()->walk([&](mlir::Operation *func) {
612 if (!mlir::isa<mlir::func::FuncOp, mlir::omp::DeclareMapperOp>(func))
613 return;
614 // clear all local allocations we made for any boxes in any prior
615 // iterations from previous function scopes.
616 localBoxAllocas.clear();
617
618 // First, walk `omp.map.info` ops to see if any of them have varPtrs
619 // with an underlying type of fir.char<k, ?>, i.e a character
620 // with dynamic length. If so, check if they need bounds added.
621 func->walk([&](mlir::omp::MapInfoOp op) {
622 if (!op.getBounds().empty())
623 return;
624
625 mlir::Value varPtr = op.getVarPtr();
626 mlir::Type underlyingVarType = fir::unwrapRefType(varPtr.getType());
627
628 if (!fir::characterWithDynamicLen(underlyingVarType))
629 return;
630
631 fir::factory::AddrAndBoundsInfo info =
632 fir::factory::getDataOperandBaseAddr(
633 builder, varPtr, /*isOptional=*/false, varPtr.getLoc());
634
635 fir::ExtendedValue extendedValue =
636 hlfir::translateToExtendedValue(varPtr.getLoc(), builder,
637 hlfir::Entity{info.addr},
638 /*continguousHint=*/true)
639 .first;
640 builder.setInsertionPoint(op);
641 llvm::SmallVector<mlir::Value> boundsOps =
642 fir::factory::genImplicitBoundsOps<mlir::omp::MapBoundsOp,
643 mlir::omp::MapBoundsType>(
644 builder, info, extendedValue,
645 /*dataExvIsAssumedSize=*/false, varPtr.getLoc());
646
647 op.getBoundsMutable().append(boundsOps);
648 });
649
650 // Next, walk `omp.map.info` ops to see if any record members should be
651 // implicitly mapped.
652 func->walk([&](mlir::omp::MapInfoOp op) {
653 mlir::Type underlyingType =
654 fir::unwrapRefType(op.getVarPtr().getType());
655
656 // TODO Test with and support more complicated cases; like arrays for
657 // records, for example.
658 if (!fir::isRecordWithAllocatableMember(underlyingType))
659 return mlir::WalkResult::advance();
660
661 // TODO For now, only consider `omp.target` ops. Other ops that support
662 // `map` clauses will follow later.
663 mlir::omp::TargetOp target =
664 mlir::dyn_cast_if_present<mlir::omp::TargetOp>(
665 getFirstTargetUser(op));
666
667 if (!target)
668 return mlir::WalkResult::advance();
669
670 auto mapClauseOwner =
671 llvm::dyn_cast<mlir::omp::MapClauseOwningOpInterface>(*target);
672
673 int64_t mapVarIdx = mapClauseOwner.getOperandIndexForMap(op);
674 assert(mapVarIdx >= 0 &&
675 mapVarIdx <
676 static_cast<int64_t>(mapClauseOwner.getMapVars().size()));
677
678 auto argIface =
679 llvm::dyn_cast<mlir::omp::BlockArgOpenMPOpInterface>(*target);
680 // TODO How should `map` block argument that correspond to: `private`,
681 // `use_device_addr`, `use_device_ptr`, be handled?
682 mlir::BlockArgument opBlockArg = argIface.getMapBlockArgs()[mapVarIdx];
683 llvm::SetVector<mlir::Operation *> mapVarForwardSlice;
684 mlir::getForwardSlice(opBlockArg, &mapVarForwardSlice);
685
686 mapVarForwardSlice.remove_if([&](mlir::Operation *sliceOp) {
687 // TODO Support coordinate_of ops.
688 //
689 // TODO Support call ops by recursively examining the forward slice of
690 // the corresponding parameter to the field in the called function.
691 return !mlir::isa<hlfir::DesignateOp>(sliceOp);
692 });
693
694 auto recordType = mlir::cast<fir::RecordType>(underlyingType);
695 llvm::SmallVector<mlir::Value> newMapOpsForFields;
696 llvm::SmallVector<int64_t> fieldIndicies;
697
698 for (auto fieldMemTyPair : recordType.getTypeList()) {
699 auto &field = fieldMemTyPair.first;
700 auto memTy = fieldMemTyPair.second;
701
702 bool shouldMapField =
703 llvm::find_if(mapVarForwardSlice, [&](mlir::Operation *sliceOp) {
704 if (!fir::isAllocatableType(memTy))
705 return false;
706
707 auto designateOp = mlir::dyn_cast<hlfir::DesignateOp>(sliceOp);
708 if (!designateOp)
709 return false;
710
711 return designateOp.getComponent() &&
712 designateOp.getComponent()->strref() == field;
713 }) != mapVarForwardSlice.end();
714
715 // TODO Handle recursive record types. Adapting
716 // `createParentSymAndGenIntermediateMaps` to work direclty on MLIR
717 // entities might be helpful here.
718
719 if (!shouldMapField)
720 continue;
721
722 int32_t fieldIdx = recordType.getFieldIndex(field);
723 bool alreadyMapped = [&]() {
724 if (op.getMembersIndexAttr())
725 for (auto indexList : op.getMembersIndexAttr()) {
726 auto indexListAttr = mlir::cast<mlir::ArrayAttr>(indexList);
727 if (indexListAttr.size() == 1 &&
728 mlir::cast<mlir::IntegerAttr>(indexListAttr[0]).getInt() ==
729 fieldIdx)
730 return true;
731 }
732
733 return false;
734 }();
735
736 if (alreadyMapped)
737 continue;
738
739 builder.setInsertionPoint(op);
740 fir::IntOrValue idxConst =
741 mlir::IntegerAttr::get(builder.getI32Type(), fieldIdx);
742 auto fieldCoord = builder.create<fir::CoordinateOp>(
743 op.getLoc(), builder.getRefType(memTy), op.getVarPtr(),
744 llvm::SmallVector<fir::IntOrValue, 1>{idxConst});
745 fir::factory::AddrAndBoundsInfo info =
746 fir::factory::getDataOperandBaseAddr(
747 builder, fieldCoord, /*isOptional=*/false, op.getLoc());
748 llvm::SmallVector<mlir::Value> bounds =
749 fir::factory::genImplicitBoundsOps<mlir::omp::MapBoundsOp,
750 mlir::omp::MapBoundsType>(
751 builder, info,
752 hlfir::translateToExtendedValue(op.getLoc(), builder,
753 hlfir::Entity{fieldCoord})
754 .first,
755 /*dataExvIsAssumedSize=*/false, op.getLoc());
756
757 mlir::omp::MapInfoOp fieldMapOp =
758 builder.create<mlir::omp::MapInfoOp>(
759 op.getLoc(), fieldCoord.getResult().getType(),
760 fieldCoord.getResult(),
761 mlir::TypeAttr::get(
762 fir::unwrapRefType(fieldCoord.getResult().getType())),
763 op.getMapTypeAttr(),
764 builder.getAttr<mlir::omp::VariableCaptureKindAttr>(
765 mlir::omp::VariableCaptureKind::ByRef),
766 /*varPtrPtr=*/mlir::Value{}, /*members=*/mlir::ValueRange{},
767 /*members_index=*/mlir::ArrayAttr{}, bounds,
768 /*mapperId=*/mlir::FlatSymbolRefAttr(),
769 builder.getStringAttr(op.getNameAttr().strref() + "." +
770 field + ".implicit_map"),
771 /*partial_map=*/builder.getBoolAttr(false));
772 newMapOpsForFields.emplace_back(fieldMapOp);
773 fieldIndicies.emplace_back(fieldIdx);
774 }
775
776 if (newMapOpsForFields.empty())
777 return mlir::WalkResult::advance();
778
779 op.getMembersMutable().append(newMapOpsForFields);
780 llvm::SmallVector<llvm::SmallVector<int64_t>> newMemberIndices;
781 mlir::ArrayAttr oldMembersIdxAttr = op.getMembersIndexAttr();
782
783 if (oldMembersIdxAttr)
784 for (mlir::Attribute indexList : oldMembersIdxAttr) {
785 llvm::SmallVector<int64_t> listVec;
786
787 for (mlir::Attribute index : mlir::cast<mlir::ArrayAttr>(indexList))
788 listVec.push_back(mlir::cast<mlir::IntegerAttr>(index).getInt());
789
790 newMemberIndices.emplace_back(std::move(listVec));
791 }
792
793 for (int64_t newFieldIdx : fieldIndicies)
794 newMemberIndices.emplace_back(
795 llvm::SmallVector<int64_t>(1, newFieldIdx));
796
797 op.setMembersIndexAttr(builder.create2DI64ArrayAttr(newMemberIndices));
798 op.setPartialMap(true);
799
800 return mlir::WalkResult::advance();
801 });
802
803 func->walk([&](mlir::omp::MapInfoOp op) {
804 if (!op.getMembers().empty())
805 return;
806
807 if (!mlir::isa<fir::BoxCharType>(fir::unwrapRefType(op.getVarType())))
808 return;
809
810 // POSSIBLE_HACK_ALERT: If the boxchar has been implicitly mapped then
811 // it is likely that the underlying pointer to the data
812 // (!fir.ref<fir.char<k,?>>) has already been mapped. So, skip such
813 // boxchars. We are primarily interested in boxchars that were mapped
814 // by passes such as MapsForPrivatizedSymbols that map boxchars that
815 // are privatized. At present, such boxchar maps are not marked
816 // implicit. Should they be? I don't know. If they should be then
817 // we need to change this check for early return OR live with
818 // over-mapping.
819 bool hasImplicitMap =
820 (llvm::omp::OpenMPOffloadMappingFlags(op.getMapType()) &
821 llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_IMPLICIT) ==
822 llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_IMPLICIT;
823 if (hasImplicitMap)
824 return;
825
826 assert(llvm::hasSingleElement(op->getUsers()) &&
827 "OMPMapInfoFinalization currently only supports single users "
828 "of a MapInfoOp");
829
830 builder.setInsertionPoint(op);
831 genBoxcharMemberMap(op, builder);
832 });
833
834 func->walk([&](mlir::omp::MapInfoOp op) {
835 // TODO: Currently only supports a single user for the MapInfoOp. This
836 // is fine for the moment, as the Fortran frontend will generate a
837 // new MapInfoOp with at most one user currently. In the case of
838 // members of other objects, like derived types, the user would be the
839 // parent. In cases where it's a regular non-member map, the user would
840 // be the target operation it is being mapped by.
841 //
842 // However, when/if we optimise/cleanup the IR we will have to extend
843 // this pass to support multiple users, as we may wish to have a map
844 // be re-used by multiple users (e.g. across multiple targets that map
845 // the variable and have identical map properties).
846 assert(llvm::hasSingleElement(op->getUsers()) &&
847 "OMPMapInfoFinalization currently only supports single users "
848 "of a MapInfoOp");
849
850 if (fir::isTypeWithDescriptor(op.getVarType()) ||
851 mlir::isa_and_present<fir::BoxAddrOp>(
852 op.getVarPtr().getDefiningOp())) {
853 builder.setInsertionPoint(op);
854 mlir::Operation *targetUser = getFirstTargetUser(op);
855 assert(targetUser && "expected user of map operation was not found");
856 genDescriptorMemberMaps(op, builder, targetUser);
857 }
858 });
859
860 // Wait until after we have generated all of our maps to add them onto
861 // the target's block arguments, simplifying the process as there would be
862 // no need to avoid accidental duplicate additions.
863 func->walk([&](mlir::omp::MapInfoOp op) {
864 mlir::Operation *targetUser = getFirstTargetUser(op);
865 assert(targetUser && "expected user of map operation was not found");
866 addImplicitMembersToTarget(op, builder, targetUser);
867 });
868 });
869 }
870};
871
872} // namespace
873

source code of flang/lib/Optimizer/OpenMP/MapInfoFinalization.cpp