1//===-- Utils..cpp ----------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Coding style: https://mlir.llvm.org/getting_started/DeveloperGuide/
10//
11//===----------------------------------------------------------------------===//
12
13#include "Utils.h"
14
15#include "ClauseFinder.h"
16#include "flang/Lower/OpenMP/Clauses.h"
17#include <flang/Lower/AbstractConverter.h>
18#include <flang/Lower/ConvertType.h>
19#include <flang/Lower/DirectivesCommon.h>
20#include <flang/Lower/PFTBuilder.h>
21#include <flang/Optimizer/Builder/FIRBuilder.h>
22#include <flang/Optimizer/Builder/Todo.h>
23#include <flang/Parser/parse-tree.h>
24#include <flang/Parser/tools.h>
25#include <flang/Semantics/tools.h>
26#include <llvm/Support/CommandLine.h>
27
28#include <iterator>
29
30llvm::cl::opt<bool> treatIndexAsSection(
31 "openmp-treat-index-as-section",
32 llvm::cl::desc("In the OpenMP data clauses treat `a(N)` as `a(N:N)`."),
33 llvm::cl::init(Val: true));
34
35namespace Fortran {
36namespace lower {
37namespace omp {
38
39int64_t getCollapseValue(const List<Clause> &clauses) {
40 auto iter = llvm::find_if(clauses, [](const Clause &clause) {
41 return clause.id == llvm::omp::Clause::OMPC_collapse;
42 });
43 if (iter != clauses.end()) {
44 const auto &collapse = std::get<clause::Collapse>(iter->u);
45 return evaluate::ToInt64(collapse.v).value();
46 }
47 return 1;
48}
49
50void genObjectList(const ObjectList &objects,
51 lower::AbstractConverter &converter,
52 llvm::SmallVectorImpl<mlir::Value> &operands) {
53 for (const Object &object : objects) {
54 const semantics::Symbol *sym = object.sym();
55 assert(sym && "Expected Symbol");
56 if (mlir::Value variable = converter.getSymbolAddress(*sym)) {
57 operands.push_back(variable);
58 } else if (const auto *details =
59 sym->detailsIf<semantics::HostAssocDetails>()) {
60 operands.push_back(converter.getSymbolAddress(details->symbol()));
61 converter.copySymbolBinding(details->symbol(), *sym);
62 }
63 }
64}
65
66mlir::Type getLoopVarType(lower::AbstractConverter &converter,
67 std::size_t loopVarTypeSize) {
68 // OpenMP runtime requires 32-bit or 64-bit loop variables.
69 loopVarTypeSize = loopVarTypeSize * 8;
70 if (loopVarTypeSize < 32) {
71 loopVarTypeSize = 32;
72 } else if (loopVarTypeSize > 64) {
73 loopVarTypeSize = 64;
74 mlir::emitWarning(converter.getCurrentLocation(),
75 "OpenMP loop iteration variable cannot have more than 64 "
76 "bits size and will be narrowed into 64 bits.");
77 }
78 assert((loopVarTypeSize == 32 || loopVarTypeSize == 64) &&
79 "OpenMP loop iteration variable size must be transformed into 32-bit "
80 "or 64-bit");
81 return converter.getFirOpBuilder().getIntegerType(loopVarTypeSize);
82}
83
84semantics::Symbol *
85getIterationVariableSymbol(const lower::pft::Evaluation &eval) {
86 return eval.visit(common::visitors{
87 [&](const parser::DoConstruct &doLoop) {
88 if (const auto &maybeCtrl = doLoop.GetLoopControl()) {
89 using LoopControl = parser::LoopControl;
90 if (auto *bounds = std::get_if<LoopControl::Bounds>(&maybeCtrl->u)) {
91 static_assert(std::is_same_v<decltype(bounds->name),
92 parser::Scalar<parser::Name>>);
93 return bounds->name.thing.symbol;
94 }
95 }
96 return static_cast<semantics::Symbol *>(nullptr);
97 },
98 [](auto &&) { return static_cast<semantics::Symbol *>(nullptr); },
99 });
100}
101
102void gatherFuncAndVarSyms(
103 const ObjectList &objects, mlir::omp::DeclareTargetCaptureClause clause,
104 llvm::SmallVectorImpl<DeclareTargetCapturePair> &symbolAndClause) {
105 for (const Object &object : objects)
106 symbolAndClause.emplace_back(clause, *object.sym());
107}
108
109mlir::omp::MapInfoOp
110createMapInfoOp(fir::FirOpBuilder &builder, mlir::Location loc,
111 mlir::Value baseAddr, mlir::Value varPtrPtr,
112 llvm::StringRef name, llvm::ArrayRef<mlir::Value> bounds,
113 llvm::ArrayRef<mlir::Value> members,
114 mlir::ArrayAttr membersIndex, uint64_t mapType,
115 mlir::omp::VariableCaptureKind mapCaptureType, mlir::Type retTy,
116 bool partialMap, mlir::FlatSymbolRefAttr mapperId) {
117 if (auto boxTy = llvm::dyn_cast<fir::BaseBoxType>(baseAddr.getType())) {
118 baseAddr = builder.create<fir::BoxAddrOp>(loc, baseAddr);
119 retTy = baseAddr.getType();
120 }
121
122 mlir::TypeAttr varType = mlir::TypeAttr::get(
123 llvm::cast<mlir::omp::PointerLikeType>(retTy).getElementType());
124
125 // For types with unknown extents such as <2x?xi32> we discard the incomplete
126 // type info and only retain the base type. The correct dimensions are later
127 // recovered through the bounds info.
128 if (auto seqType = llvm::dyn_cast<fir::SequenceType>(varType.getValue()))
129 if (seqType.hasDynamicExtents())
130 varType = mlir::TypeAttr::get(seqType.getEleTy());
131
132 mlir::omp::MapInfoOp op = builder.create<mlir::omp::MapInfoOp>(
133 loc, retTy, baseAddr, varType,
134 builder.getIntegerAttr(builder.getIntegerType(64, false), mapType),
135 builder.getAttr<mlir::omp::VariableCaptureKindAttr>(mapCaptureType),
136 varPtrPtr, members, membersIndex, bounds, mapperId,
137 builder.getStringAttr(name), builder.getBoolAttr(partialMap));
138 return op;
139}
140
141// This function gathers the individual omp::Object's that make up a
142// larger omp::Object symbol.
143//
144// For example, provided the larger symbol: "parent%child%member", this
145// function breaks it up into its constituent components ("parent",
146// "child", "member"), so we can access each individual component and
147// introspect details. Important to note is this function breaks it up from
148// RHS to LHS ("member" to "parent") and then we reverse it so that the
149// returned omp::ObjectList is LHS to RHS, with the "parent" at the
150// beginning.
151omp::ObjectList gatherObjectsOf(omp::Object derivedTypeMember,
152 semantics::SemanticsContext &semaCtx) {
153 omp::ObjectList objList;
154 std::optional<omp::Object> baseObj = derivedTypeMember;
155 while (baseObj.has_value()) {
156 objList.push_back(baseObj.value());
157 baseObj = getBaseObject(baseObj.value(), semaCtx);
158 }
159 return omp::ObjectList{llvm::reverse(objList)};
160}
161
162// This function generates a series of indices from a provided omp::Object,
163// that devolves to an ArrayRef symbol, e.g. "array(2,3,4)", this function
164// would generate a series of indices of "[1][2][3]" for the above example,
165// offsetting by -1 to account for the non-zero fortran indexes.
166//
167// These indices can then be provided to a coordinate operation or other
168// GEP-like operation to access the relevant positional member of the
169// array.
170//
171// It is of note that the function only supports subscript integers currently
172// and not Triplets i.e. Array(1:2:3).
173static void generateArrayIndices(lower::AbstractConverter &converter,
174 fir::FirOpBuilder &firOpBuilder,
175 lower::StatementContext &stmtCtx,
176 mlir::Location clauseLocation,
177 llvm::SmallVectorImpl<mlir::Value> &indices,
178 omp::Object object) {
179 auto maybeRef = evaluate::ExtractDataRef(*object.ref());
180 if (!maybeRef)
181 return;
182
183 auto *arr = std::get_if<evaluate::ArrayRef>(&maybeRef->u);
184 if (!arr)
185 return;
186
187 for (auto v : arr->subscript()) {
188 if (std::holds_alternative<Triplet>(v.u))
189 TODO(clauseLocation, "Triplet indexing in map clause is unsupported");
190
191 auto expr = std::get<Fortran::evaluate::IndirectSubscriptIntegerExpr>(v.u);
192 mlir::Value subscript =
193 fir::getBase(converter.genExprValue(toEvExpr(expr.value()), stmtCtx));
194 mlir::Value one = firOpBuilder.createIntegerConstant(
195 clauseLocation, firOpBuilder.getIndexType(), 1);
196 subscript = firOpBuilder.createConvert(
197 clauseLocation, firOpBuilder.getIndexType(), subscript);
198 indices.push_back(firOpBuilder.create<mlir::arith::SubIOp>(clauseLocation,
199 subscript, one));
200 }
201}
202
203/// When mapping members of derived types, there is a chance that one of the
204/// members along the way to a mapped member is an descriptor. In which case
205/// we have to make sure we generate a map for those along the way otherwise
206/// we will be missing a chunk of data required to actually map the member
207/// type to device. This function effectively generates these maps and the
208/// appropriate data accesses required to generate these maps. It will avoid
209/// creating duplicate maps, as duplicates are just as bad as unmapped
210/// descriptor data in a lot of cases for the runtime (and unnecessary
211/// data movement should be avoided where possible).
212///
213/// As an example for the following mapping:
214///
215/// type :: vertexes
216/// integer(4), allocatable :: vertexx(:)
217/// integer(4), allocatable :: vertexy(:)
218/// end type vertexes
219///
220/// type :: dtype
221/// real(4) :: i
222/// type(vertexes), allocatable :: vertexes(:)
223/// end type dtype
224///
225/// type(dtype), allocatable :: alloca_dtype
226///
227/// !$omp target map(tofrom: alloca_dtype%vertexes(N1)%vertexx)
228///
229/// The below HLFIR/FIR is generated (trimmed for conciseness):
230///
231/// On the first iteration we index into the record type alloca_dtype
232/// to access "vertexes", we then generate a map for this descriptor
233/// alongside bounds to indicate we only need the 1 member, rather than
234/// the whole array block in this case (In theory we could map its
235/// entirety at the cost of data transfer bandwidth).
236///
237/// %13:2 = hlfir.declare ... "alloca_dtype" ...
238/// %39 = fir.load %13#0 : ...
239/// %40 = fir.coordinate_of %39, %c1 : ...
240/// %51 = omp.map.info var_ptr(%40 : ...) map_clauses(to) capture(ByRef) ...
241/// %52 = fir.load %40 : ...
242///
243/// Second iteration generating access to "vertexes(N1) utilising the N1 index
244/// %53 = load N1 ...
245/// %54 = fir.convert %53 : (i32) -> i64
246/// %55 = fir.convert %54 : (i64) -> index
247/// %56 = arith.subi %55, %c1 : index
248/// %57 = fir.coordinate_of %52, %56 : ...
249///
250/// Still in the second iteration we access the allocatable member "vertexx",
251/// we return %58 from the function and provide it to the final and "main"
252/// map of processMap (generated by the record type segment of the below
253/// function), if this were not the final symbol in the list, i.e. we accessed
254/// a member below vertexx, we would have generated the map below as we did in
255/// the first iteration and then continue to generate further coordinates to
256/// access further components as required.
257///
258/// %58 = fir.coordinate_of %57, %c0 : ...
259/// %61 = omp.map.info var_ptr(%58 : ...) map_clauses(to) capture(ByRef) ...
260///
261/// Parent mapping containing prior generated mapped members, generated at
262/// a later step but here to showcase the "end" result
263///
264/// omp.map.info var_ptr(%13#1 : ...) map_clauses(to) capture(ByRef)
265/// members(%50, %61 : [0, 1, 0], [0, 1, 0] : ...
266///
267/// \param objectList - The list of omp::Object symbol data for each parent
268/// to the mapped member (also includes the mapped member), generated via
269/// gatherObjectsOf.
270/// \param indices - List of index data associated with the mapped member
271/// symbol, which identifies the placement of the member in its parent,
272/// this helps generate the appropriate member accesses. These indices
273/// can be generated via generateMemberPlacementIndices.
274/// \param asFortran - A string generated from the mapped variable to be
275/// associated with the main map, generally (but not restricted to)
276/// generated via gatherDataOperandAddrAndBounds or other
277/// DirectiveCommons.hpp utilities.
278/// \param mapTypeBits - The map flags that will be associated with the
279/// generated maps, minus alterations of the TO and FROM bits for the
280/// intermediate components to prevent accidental overwriting on device
281/// write back.
282mlir::Value createParentSymAndGenIntermediateMaps(
283 mlir::Location clauseLocation, lower::AbstractConverter &converter,
284 semantics::SemanticsContext &semaCtx, lower::StatementContext &stmtCtx,
285 omp::ObjectList &objectList, llvm::SmallVectorImpl<int64_t> &indices,
286 OmpMapParentAndMemberData &parentMemberIndices, llvm::StringRef asFortran,
287 llvm::omp::OpenMPOffloadMappingFlags mapTypeBits) {
288 fir::FirOpBuilder &firOpBuilder = converter.getFirOpBuilder();
289
290 /// Checks if an omp::Object is an array expression with a subscript, e.g.
291 /// array(1,2).
292 auto isArrayExprWithSubscript = [](omp::Object obj) {
293 if (auto maybeRef = evaluate::ExtractDataRef(obj.ref())) {
294 evaluate::DataRef ref = *maybeRef;
295 if (auto *arr = std::get_if<evaluate::ArrayRef>(&ref.u))
296 return !arr->subscript().empty();
297 }
298 return false;
299 };
300
301 // Generate the access to the original parent base address.
302 fir::factory::AddrAndBoundsInfo parentBaseAddr =
303 lower::getDataOperandBaseAddr(converter, firOpBuilder,
304 *objectList[0].sym(), clauseLocation);
305 mlir::Value curValue = parentBaseAddr.addr;
306
307 // Iterate over all objects in the objectList, this should consist of all
308 // record types between the parent and the member being mapped (including
309 // the parent). The object list may also contain array objects as well,
310 // this can occur when specifying bounds or a specific element access
311 // within a member map, we skip these.
312 size_t currentIndicesIdx = 0;
313 for (size_t i = 0; i < objectList.size(); ++i) {
314 // If we encounter a sequence type, i.e. an array, we must generate the
315 // correct coordinate operation to index into the array to proceed further,
316 // this is only relevant in cases where we encounter subscripts currently.
317 //
318 // For example in the following case:
319 //
320 // map(tofrom: array_dtype(4)%internal_dtypes(3)%float_elements(4))
321 //
322 // We must generate coordinate operation accesses for each subscript
323 // we encounter.
324 if (fir::SequenceType arrType = mlir::dyn_cast<fir::SequenceType>(
325 fir::unwrapPassByRefType(curValue.getType()))) {
326 if (isArrayExprWithSubscript(objectList[i])) {
327 llvm::SmallVector<mlir::Value> subscriptIndices;
328 generateArrayIndices(converter, firOpBuilder, stmtCtx, clauseLocation,
329 subscriptIndices, objectList[i]);
330 assert(!subscriptIndices.empty() &&
331 "missing expected indices for map clause");
332 curValue = firOpBuilder.create<fir::CoordinateOp>(
333 clauseLocation, firOpBuilder.getRefType(arrType.getEleTy()),
334 curValue, subscriptIndices);
335 }
336 }
337
338 // If we encounter a record type, we must access the subsequent member
339 // by indexing into it and creating a coordinate operation to do so, we
340 // utilise the index information generated previously and passed in to
341 // work out the correct member to access and the corresponding member
342 // type.
343 if (fir::RecordType recordType = mlir::dyn_cast<fir::RecordType>(
344 fir::unwrapPassByRefType(curValue.getType()))) {
345 fir::IntOrValue idxConst = mlir::IntegerAttr::get(
346 firOpBuilder.getI32Type(), indices[currentIndicesIdx]);
347 mlir::Type memberTy = recordType.getType(indices[currentIndicesIdx]);
348 curValue = firOpBuilder.create<fir::CoordinateOp>(
349 clauseLocation, firOpBuilder.getRefType(memberTy), curValue,
350 llvm::SmallVector<fir::IntOrValue, 1>{idxConst});
351
352 // If we're a final member, the map will be generated by the processMap
353 // call that invoked this function.
354 if (currentIndicesIdx == indices.size() - 1)
355 break;
356
357 // Skip mapping and the subsequent load if we're not
358 // a type with a descriptor such as a pointer/allocatable. If we're not a
359 // type with a descriptor then we have no need of generating an
360 // intermediate map for it, as we only need to generate a map if a member
361 // is a descriptor type (and thus obscures the members it contains via a
362 // pointer in which it's data needs mapped).
363 if (!fir::isTypeWithDescriptor(memberTy)) {
364 currentIndicesIdx++;
365 continue;
366 }
367
368 llvm::SmallVector<int64_t> interimIndices(
369 indices.begin(), std::next(x: indices.begin(), n: currentIndicesIdx + 1));
370 // Verify we haven't already created a map for this particular member, by
371 // checking the list of members already mapped for the current parent,
372 // stored in the parentMemberIndices structure
373 if (!parentMemberIndices.isDuplicateMemberMapInfo(memberIndices&: interimIndices)) {
374 // Generate bounds operations using the standard lowering utility,
375 // unfortunately this currently does a bit more than just generate
376 // bounds and we discard the other bits. May be useful to extend the
377 // utility to just provide bounds in the future.
378 llvm::SmallVector<mlir::Value> interimBounds;
379 if (i + 1 < objectList.size() &&
380 objectList[i + 1].sym()->IsObjectArray()) {
381 std::stringstream interimFortran;
382 Fortran::lower::gatherDataOperandAddrAndBounds<
383 mlir::omp::MapBoundsOp, mlir::omp::MapBoundsType>(
384 converter, converter.getFirOpBuilder(), semaCtx,
385 converter.getFctCtx(), *objectList[i + 1].sym(),
386 objectList[i + 1].ref(), clauseLocation, interimFortran,
387 interimBounds, treatIndexAsSection);
388 }
389
390 // Remove all map-type bits (e.g. TO, FROM, etc.) from the intermediate
391 // allocatable maps, as we simply wish to alloc or release them. It may
392 // be safer to just pass OMP_MAP_NONE as the map type, but we may still
393 // need some of the other map types the mapped member utilises, so for
394 // now it's good to keep an eye on this.
395 llvm::omp::OpenMPOffloadMappingFlags interimMapType = mapTypeBits;
396 interimMapType &= ~llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_TO;
397 interimMapType &= ~llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_FROM;
398 interimMapType &=
399 ~llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_RETURN_PARAM;
400
401 // Create a map for the intermediate member and insert it and it's
402 // indices into the parentMemberIndices list to track it.
403 mlir::omp::MapInfoOp mapOp = createMapInfoOp(
404 firOpBuilder, clauseLocation, curValue,
405 /*varPtrPtr=*/mlir::Value{}, asFortran,
406 /*bounds=*/interimBounds,
407 /*members=*/{},
408 /*membersIndex=*/mlir::ArrayAttr{},
409 static_cast<
410 std::underlying_type_t<llvm::omp::OpenMPOffloadMappingFlags>>(
411 interimMapType),
412 mlir::omp::VariableCaptureKind::ByRef, curValue.getType());
413
414 parentMemberIndices.memberPlacementIndices.push_back(Elt: interimIndices);
415 parentMemberIndices.memberMap.push_back(mapOp);
416 }
417
418 // Load the currently accessed member, so we can continue to access
419 // further segments.
420 curValue = firOpBuilder.create<fir::LoadOp>(clauseLocation, curValue);
421 currentIndicesIdx++;
422 }
423 }
424
425 return curValue;
426}
427
428static int64_t
429getComponentPlacementInParent(const semantics::Symbol *componentSym) {
430 const auto *derived = componentSym->owner()
431 .derivedTypeSpec()
432 ->typeSymbol()
433 .detailsIf<semantics::DerivedTypeDetails>();
434 assert(derived &&
435 "expected derived type details when processing component symbol");
436 for (auto [placement, name] : llvm::enumerate(derived->componentNames()))
437 if (name == componentSym->name())
438 return placement;
439 return -1;
440}
441
442static std::optional<Object>
443getComponentObject(std::optional<Object> object,
444 semantics::SemanticsContext &semaCtx) {
445 if (!object)
446 return std::nullopt;
447
448 auto ref = evaluate::ExtractDataRef(object.value().ref());
449 if (!ref)
450 return std::nullopt;
451
452 if (std::holds_alternative<evaluate::Component>(ref->u))
453 return object;
454
455 auto baseObj = getBaseObject(object.value(), semaCtx);
456 if (!baseObj)
457 return std::nullopt;
458
459 return getComponentObject(baseObj.value(), semaCtx);
460}
461
462void generateMemberPlacementIndices(const Object &object,
463 llvm::SmallVectorImpl<int64_t> &indices,
464 semantics::SemanticsContext &semaCtx) {
465 assert(indices.empty() && "indices vector passed to "
466 "generateMemberPlacementIndices should be empty");
467 auto compObj = getComponentObject(object, semaCtx);
468
469 while (compObj) {
470 int64_t index = getComponentPlacementInParent(compObj->sym());
471 assert(
472 index >= 0 &&
473 "unexpected index value returned from getComponentPlacementInParent");
474 indices.push_back(Elt: index);
475 compObj =
476 getComponentObject(getBaseObject(compObj.value(), semaCtx), semaCtx);
477 }
478
479 indices = llvm::SmallVector<int64_t>{llvm::reverse(C&: indices)};
480}
481
482void OmpMapParentAndMemberData::addChildIndexAndMapToParent(
483 const omp::Object &object, mlir::omp::MapInfoOp &mapOp,
484 semantics::SemanticsContext &semaCtx) {
485 llvm::SmallVector<int64_t> indices;
486 generateMemberPlacementIndices(object, indices, semaCtx);
487 memberPlacementIndices.push_back(Elt: indices);
488 memberMap.push_back(mapOp);
489}
490
491bool isMemberOrParentAllocatableOrPointer(
492 const Object &object, semantics::SemanticsContext &semaCtx) {
493 if (semantics::IsAllocatableOrObjectPointer(object.sym()))
494 return true;
495
496 auto compObj = getBaseObject(object, semaCtx);
497 while (compObj) {
498 if (semantics::IsAllocatableOrObjectPointer(compObj.value().sym()))
499 return true;
500 compObj = getBaseObject(compObj.value(), semaCtx);
501 }
502
503 return false;
504}
505
506void insertChildMapInfoIntoParent(
507 lower::AbstractConverter &converter, semantics::SemanticsContext &semaCtx,
508 lower::StatementContext &stmtCtx,
509 std::map<Object, OmpMapParentAndMemberData> &parentMemberIndices,
510 llvm::SmallVectorImpl<mlir::Value> &mapOperands,
511 llvm::SmallVectorImpl<const semantics::Symbol *> &mapSyms) {
512 fir::FirOpBuilder &firOpBuilder = converter.getFirOpBuilder();
513 for (auto indices : parentMemberIndices) {
514 auto *parentIter =
515 llvm::find_if(mapSyms, [&indices](const semantics::Symbol *v) {
516 return v == indices.first.sym();
517 });
518 if (parentIter != mapSyms.end()) {
519 auto mapOp = llvm::cast<mlir::omp::MapInfoOp>(
520 mapOperands[std::distance(mapSyms.begin(), parentIter)]
521 .getDefiningOp());
522
523 // NOTE: To maintain appropriate SSA ordering, we move the parent map
524 // which will now have references to its children after the last
525 // of its members to be generated. This is necessary when a user
526 // has defined a series of parent and children maps where the parent
527 // precedes the children. An alternative, may be to do
528 // delayed generation of map info operations from the clauses and
529 // organize them first before generation. Or to use the
530 // topologicalSort utility which will enforce a stronger SSA
531 // dominance ordering at the cost of efficiency/time.
532 mapOp->moveAfter(indices.second.memberMap.back());
533
534 for (mlir::omp::MapInfoOp memberMap : indices.second.memberMap)
535 mapOp.getMembersMutable().append(memberMap.getResult());
536
537 mapOp.setMembersIndexAttr(firOpBuilder.create2DI64ArrayAttr(
538 indices.second.memberPlacementIndices));
539 } else {
540 // NOTE: We take the map type of the first child, this may not
541 // be the correct thing to do, however, we shall see. For the moment
542 // it allows this to work with enter and exit without causing MLIR
543 // verification issues. The more appropriate thing may be to take
544 // the "main" map type clause from the directive being used.
545 uint64_t mapType = indices.second.memberMap[0].getMapType();
546
547 llvm::SmallVector<mlir::Value> members;
548 members.reserve(indices.second.memberMap.size());
549 for (mlir::omp::MapInfoOp memberMap : indices.second.memberMap)
550 members.push_back(memberMap.getResult());
551
552 // Create parent to emplace and bind members
553 llvm::SmallVector<mlir::Value> bounds;
554 std::stringstream asFortran;
555 fir::factory::AddrAndBoundsInfo info =
556 lower::gatherDataOperandAddrAndBounds<mlir::omp::MapBoundsOp,
557 mlir::omp::MapBoundsType>(
558 converter, firOpBuilder, semaCtx, converter.getFctCtx(),
559 *indices.first.sym(), indices.first.ref(),
560 converter.getCurrentLocation(), asFortran, bounds,
561 treatIndexAsSection);
562
563 mlir::omp::MapInfoOp mapOp = createMapInfoOp(
564 firOpBuilder, info.rawInput.getLoc(), info.rawInput,
565 /*varPtrPtr=*/mlir::Value(), asFortran.str(), bounds, members,
566 firOpBuilder.create2DI64ArrayAttr(
567 indices.second.memberPlacementIndices),
568 mapType, mlir::omp::VariableCaptureKind::ByRef,
569 info.rawInput.getType(),
570 /*partialMap=*/true);
571
572 mapOperands.push_back(mapOp);
573 mapSyms.push_back(indices.first.sym());
574 }
575 }
576}
577
578void lastprivateModifierNotSupported(const omp::clause::Lastprivate &lastp,
579 mlir::Location loc) {
580 using Lastprivate = omp::clause::Lastprivate;
581 auto &maybeMod =
582 std::get<std::optional<Lastprivate::LastprivateModifier>>(lastp.t);
583 if (maybeMod) {
584 assert(*maybeMod == Lastprivate::LastprivateModifier::Conditional &&
585 "Unexpected lastprivate modifier");
586 TODO(loc, "lastprivate clause with CONDITIONAL modifier");
587 }
588}
589
590static void convertLoopBounds(lower::AbstractConverter &converter,
591 mlir::Location loc,
592 mlir::omp::LoopRelatedClauseOps &result,
593 std::size_t loopVarTypeSize) {
594 fir::FirOpBuilder &firOpBuilder = converter.getFirOpBuilder();
595 // The types of lower bound, upper bound, and step are converted into the
596 // type of the loop variable if necessary.
597 mlir::Type loopVarType = getLoopVarType(converter, loopVarTypeSize);
598 for (unsigned it = 0; it < (unsigned)result.loopLowerBounds.size(); it++) {
599 result.loopLowerBounds[it] = firOpBuilder.createConvert(
600 loc, loopVarType, result.loopLowerBounds[it]);
601 result.loopUpperBounds[it] = firOpBuilder.createConvert(
602 loc, loopVarType, result.loopUpperBounds[it]);
603 result.loopSteps[it] =
604 firOpBuilder.createConvert(loc, loopVarType, result.loopSteps[it]);
605 }
606}
607
608bool collectLoopRelatedInfo(
609 lower::AbstractConverter &converter, mlir::Location currentLocation,
610 lower::pft::Evaluation &eval, const omp::List<omp::Clause> &clauses,
611 mlir::omp::LoopRelatedClauseOps &result,
612 llvm::SmallVectorImpl<const semantics::Symbol *> &iv) {
613 bool found = false;
614 fir::FirOpBuilder &firOpBuilder = converter.getFirOpBuilder();
615
616 // Collect the loops to collapse.
617 lower::pft::Evaluation *doConstructEval = &eval.getFirstNestedEvaluation();
618 if (doConstructEval->getIf<parser::DoConstruct>()->IsDoConcurrent()) {
619 TODO(currentLocation, "Do Concurrent in Worksharing loop construct");
620 }
621
622 std::int64_t collapseValue = 1l;
623 if (auto *clause =
624 ClauseFinder::findUniqueClause<omp::clause::Collapse>(clauses)) {
625 collapseValue = evaluate::ToInt64(clause->v).value();
626 found = true;
627 }
628
629 std::size_t loopVarTypeSize = 0;
630 do {
631 lower::pft::Evaluation *doLoop =
632 &doConstructEval->getFirstNestedEvaluation();
633 auto *doStmt = doLoop->getIf<parser::NonLabelDoStmt>();
634 assert(doStmt && "Expected do loop to be in the nested evaluation");
635 const auto &loopControl =
636 std::get<std::optional<parser::LoopControl>>(doStmt->t);
637 const parser::LoopControl::Bounds *bounds =
638 std::get_if<parser::LoopControl::Bounds>(&loopControl->u);
639 assert(bounds && "Expected bounds for worksharing do loop");
640 lower::StatementContext stmtCtx;
641 result.loopLowerBounds.push_back(fir::getBase(
642 converter.genExprValue(*semantics::GetExpr(bounds->lower), stmtCtx)));
643 result.loopUpperBounds.push_back(fir::getBase(
644 converter.genExprValue(*semantics::GetExpr(bounds->upper), stmtCtx)));
645 if (bounds->step) {
646 result.loopSteps.push_back(fir::getBase(
647 converter.genExprValue(*semantics::GetExpr(bounds->step), stmtCtx)));
648 } else { // If `step` is not present, assume it as `1`.
649 result.loopSteps.push_back(firOpBuilder.createIntegerConstant(
650 currentLocation, firOpBuilder.getIntegerType(32), 1));
651 }
652 iv.push_back(Elt: bounds->name.thing.symbol);
653 loopVarTypeSize = std::max(loopVarTypeSize,
654 bounds->name.thing.symbol->GetUltimate().size());
655 collapseValue--;
656 doConstructEval =
657 &*std::next(doConstructEval->getNestedEvaluations().begin());
658 } while (collapseValue > 0);
659
660 convertLoopBounds(converter, currentLocation, result, loopVarTypeSize);
661
662 return found;
663}
664} // namespace omp
665} // namespace lower
666} // namespace Fortran
667

source code of flang/lib/Lower/OpenMP/Utils.cpp