1//===-- DebugTypeGenerator.cpp -- type conversion ---------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Coding style: https://mlir.llvm.org/getting_started/DeveloperGuide/
10//
11//===----------------------------------------------------------------------===//
12
13#define DEBUG_TYPE "flang-debug-type-generator"
14
15#include "DebugTypeGenerator.h"
16#include "flang/Optimizer/CodeGen/DescriptorModel.h"
17#include "flang/Optimizer/Support/InternalNames.h"
18#include "flang/Optimizer/Support/Utils.h"
19#include "mlir/Pass/Pass.h"
20#include "llvm/ADT/ScopeExit.h"
21#include "llvm/BinaryFormat/Dwarf.h"
22#include "llvm/Support/Debug.h"
23
24namespace fir {
25
26/// Calculate offset of any field in the descriptor.
27template <int DescriptorField>
28std::uint64_t getComponentOffset(const mlir::DataLayout &dl,
29 mlir::MLIRContext *context,
30 mlir::Type llvmFieldType) {
31 static_assert(DescriptorField > 0 && DescriptorField < 10);
32 mlir::Type previousFieldType =
33 getDescFieldTypeModel<DescriptorField - 1>()(context);
34 std::uint64_t previousOffset =
35 getComponentOffset<DescriptorField - 1>(dl, context, previousFieldType);
36 std::uint64_t offset = previousOffset + dl.getTypeSize(previousFieldType);
37 std::uint64_t fieldAlignment = dl.getTypeABIAlignment(llvmFieldType);
38 return llvm::alignTo(offset, fieldAlignment);
39}
40template <>
41std::uint64_t getComponentOffset<0>(const mlir::DataLayout &dl,
42 mlir::MLIRContext *context,
43 mlir::Type llvmFieldType) {
44 return 0;
45}
46
47DebugTypeGenerator::DebugTypeGenerator(mlir::ModuleOp m,
48 mlir::SymbolTable *symbolTable_,
49 const mlir::DataLayout &dl)
50 : module(m), symbolTable(symbolTable_), dataLayout{&dl},
51 kindMapping(getKindMapping(m)), llvmTypeConverter(m, false, false, dl) {
52 LLVM_DEBUG(llvm::dbgs() << "DITypeAttr generator\n");
53
54 mlir::MLIRContext *context = module.getContext();
55
56 // The debug information requires the offset of certain fields in the
57 // descriptors like lower_bound and extent for each dimension.
58 mlir::Type llvmDimsType = getDescFieldTypeModel<kDimsPosInBox>()(context);
59 mlir::Type llvmPtrType = getDescFieldTypeModel<kAddrPosInBox>()(context);
60 mlir::Type llvmLenType = getDescFieldTypeModel<kElemLenPosInBox>()(context);
61 mlir::Type llvmRankType = getDescFieldTypeModel<kRankPosInBox>()(context);
62
63 dimsOffset =
64 getComponentOffset<kDimsPosInBox>(*dataLayout, context, llvmDimsType);
65 dimsSize = dataLayout->getTypeSize(llvmDimsType);
66 ptrSize = dataLayout->getTypeSize(llvmPtrType);
67 rankSize = dataLayout->getTypeSize(llvmRankType);
68 lenOffset =
69 getComponentOffset<kElemLenPosInBox>(*dataLayout, context, llvmLenType);
70 rankOffset =
71 getComponentOffset<kRankPosInBox>(*dataLayout, context, llvmRankType);
72}
73
74static mlir::LLVM::DITypeAttr genBasicType(mlir::MLIRContext *context,
75 mlir::StringAttr name,
76 unsigned bitSize,
77 unsigned decoding) {
78 return mlir::LLVM::DIBasicTypeAttr::get(
79 context, llvm::dwarf::DW_TAG_base_type, name, bitSize, decoding);
80}
81
82static mlir::LLVM::DITypeAttr genPlaceholderType(mlir::MLIRContext *context) {
83 return genBasicType(context, mlir::StringAttr::get(context, "integer"),
84 /*bitSize=*/32, llvm::dwarf::DW_ATE_signed);
85}
86
87// Helper function to create DILocalVariableAttr and DbgValueOp when information
88// about the size or dimension of a variable etc lives in an mlir::Value.
89mlir::LLVM::DILocalVariableAttr DebugTypeGenerator::generateArtificialVariable(
90 mlir::MLIRContext *context, mlir::Value val,
91 mlir::LLVM::DIFileAttr fileAttr, mlir::LLVM::DIScopeAttr scope,
92 fir::cg::XDeclareOp declOp) {
93 // There can be multiple artificial variable for a single declOp. To help
94 // distinguish them, we pad the name with a counter. The counter is the
95 // position of 'val' in the operands of declOp.
96 auto varID = std::distance(
97 declOp.getOperands().begin(),
98 std::find(declOp.getOperands().begin(), declOp.getOperands().end(), val));
99 mlir::OpBuilder builder(context);
100 auto name = mlir::StringAttr::get(context, "." + declOp.getUniqName().str() +
101 std::to_string(varID));
102 builder.setInsertionPoint(declOp);
103 mlir::Type type = val.getType();
104 if (!mlir::isa<mlir::IntegerType>(type) || !type.isSignlessInteger()) {
105 type = builder.getIntegerType(64);
106 val = builder.create<fir::ConvertOp>(declOp.getLoc(), type, val);
107 }
108 mlir::LLVM::DITypeAttr Ty = convertType(type, fileAttr, scope, declOp);
109 auto lvAttr = mlir::LLVM::DILocalVariableAttr::get(
110 context, scope, name, fileAttr, /*line=*/0, /*argNo=*/0,
111 /*alignInBits=*/0, Ty, mlir::LLVM::DIFlags::Artificial);
112 builder.create<mlir::LLVM::DbgValueOp>(declOp.getLoc(), val, lvAttr, nullptr);
113 return lvAttr;
114}
115
116mlir::LLVM::DITypeAttr DebugTypeGenerator::convertBoxedSequenceType(
117 fir::SequenceType seqTy, mlir::LLVM::DIFileAttr fileAttr,
118 mlir::LLVM::DIScopeAttr scope, fir::cg::XDeclareOp declOp,
119 bool genAllocated, bool genAssociated) {
120
121 mlir::MLIRContext *context = module.getContext();
122 llvm::SmallVector<mlir::LLVM::DINodeAttr> elements;
123 llvm::SmallVector<mlir::LLVM::DIExpressionElemAttr> ops;
124 auto addOp = [&](unsigned opc, llvm::ArrayRef<uint64_t> vals) {
125 ops.push_back(mlir::LLVM::DIExpressionElemAttr::get(context, opc, vals));
126 };
127
128 addOp(llvm::dwarf::DW_OP_push_object_address, {});
129 addOp(llvm::dwarf::DW_OP_deref, {});
130
131 // dataLocation = *base_addr
132 mlir::LLVM::DIExpressionAttr dataLocation =
133 mlir::LLVM::DIExpressionAttr::get(context, ops);
134 ops.clear();
135
136 mlir::LLVM::DITypeAttr elemTy =
137 convertType(seqTy.getEleTy(), fileAttr, scope, declOp);
138
139 // Assumed-rank arrays
140 if (seqTy.hasUnknownShape()) {
141 addOp(llvm::dwarf::DW_OP_push_object_address, {});
142 addOp(llvm::dwarf::DW_OP_plus_uconst, {rankOffset});
143 addOp(llvm::dwarf::DW_OP_deref_size, {rankSize});
144 mlir::LLVM::DIExpressionAttr rank =
145 mlir::LLVM::DIExpressionAttr::get(context, ops);
146 ops.clear();
147
148 auto genSubrangeOp = [&](unsigned field) -> mlir::LLVM::DIExpressionAttr {
149 // The dwarf expression for generic subrange assumes that dimension for
150 // which it is being generated is already pushed on the stack. Here is the
151 // formula we will use to calculate count for example.
152 // *(base_addr + offset_count_0 + (dimsSize x dimension_number)).
153 // where offset_count_0 is offset of the count field for the 0th dimension
154 addOp(llvm::dwarf::DW_OP_push_object_address, {});
155 addOp(llvm::dwarf::DW_OP_over, {});
156 addOp(llvm::dwarf::DW_OP_constu, {dimsSize});
157 addOp(llvm::dwarf::DW_OP_mul, {});
158 addOp(llvm::dwarf::DW_OP_plus_uconst,
159 {dimsOffset + ((dimsSize / 3) * field)});
160 addOp(llvm::dwarf::DW_OP_plus, {});
161 addOp(llvm::dwarf::DW_OP_deref, {});
162 mlir::LLVM::DIExpressionAttr attr =
163 mlir::LLVM::DIExpressionAttr::get(context, ops);
164 ops.clear();
165 return attr;
166 };
167
168 mlir::LLVM::DIExpressionAttr lowerAttr = genSubrangeOp(kDimLowerBoundPos);
169 mlir::LLVM::DIExpressionAttr countAttr = genSubrangeOp(kDimExtentPos);
170 mlir::LLVM::DIExpressionAttr strideAttr = genSubrangeOp(kDimStridePos);
171
172 auto subrangeTy = mlir::LLVM::DIGenericSubrangeAttr::get(
173 context, countAttr, lowerAttr, /*upperBound=*/nullptr, strideAttr);
174 elements.push_back(subrangeTy);
175
176 return mlir::LLVM::DICompositeTypeAttr::get(
177 context, llvm::dwarf::DW_TAG_array_type, /*name=*/nullptr,
178 /*file=*/nullptr, /*line=*/0, /*scope=*/nullptr, elemTy,
179 mlir::LLVM::DIFlags::Zero, /*sizeInBits=*/0, /*alignInBits=*/0,
180 elements, dataLocation, rank, /*allocated=*/nullptr,
181 /*associated=*/nullptr);
182 }
183
184 addOp(llvm::dwarf::DW_OP_push_object_address, {});
185 addOp(llvm::dwarf::DW_OP_deref, {});
186 addOp(llvm::dwarf::DW_OP_lit0, {});
187 addOp(llvm::dwarf::DW_OP_ne, {});
188
189 // allocated = associated = (*base_addr != 0)
190 mlir::LLVM::DIExpressionAttr valid =
191 mlir::LLVM::DIExpressionAttr::get(context, ops);
192 mlir::LLVM::DIExpressionAttr allocated = genAllocated ? valid : nullptr;
193 mlir::LLVM::DIExpressionAttr associated = genAssociated ? valid : nullptr;
194 ops.clear();
195
196 unsigned offset = dimsOffset;
197 unsigned index = 0;
198 mlir::IntegerType intTy = mlir::IntegerType::get(context, 64);
199 const unsigned indexSize = dimsSize / 3;
200 for ([[maybe_unused]] auto _ : seqTy.getShape()) {
201 // For each dimension, find the offset of count, lower bound and stride in
202 // the descriptor and generate the dwarf expression to extract it.
203 mlir::Attribute lowerAttr = nullptr;
204 // If declaration has a lower bound, use it.
205 if (declOp && declOp.getShift().size() > index) {
206 if (std::optional<std::int64_t> optint =
207 getIntIfConstant(declOp.getShift()[index]))
208 lowerAttr = mlir::IntegerAttr::get(intTy, llvm::APInt(64, *optint));
209 else
210 lowerAttr = generateArtificialVariable(
211 context, declOp.getShift()[index], fileAttr, scope, declOp);
212 }
213 // FIXME: If `indexSize` happens to be bigger than address size on the
214 // system then we may have to change 'DW_OP_deref' here.
215 addOp(llvm::dwarf::DW_OP_push_object_address, {});
216 addOp(llvm::dwarf::DW_OP_plus_uconst,
217 {offset + (indexSize * kDimExtentPos)});
218 addOp(llvm::dwarf::DW_OP_deref, {});
219 // count[i] = *(base_addr + offset + (indexSize * kDimExtentPos))
220 // where 'offset' is dimsOffset + (i * dimsSize)
221 mlir::LLVM::DIExpressionAttr countAttr =
222 mlir::LLVM::DIExpressionAttr::get(context, ops);
223 ops.clear();
224
225 // If a lower bound was not found in the declOp, then we will get them from
226 // descriptor only for pointer and allocatable case. DWARF assumes lower
227 // bound of 1 when this attribute is missing.
228 if (!lowerAttr && (genAllocated || genAssociated)) {
229 addOp(llvm::dwarf::DW_OP_push_object_address, {});
230 addOp(llvm::dwarf::DW_OP_plus_uconst,
231 {offset + (indexSize * kDimLowerBoundPos)});
232 addOp(llvm::dwarf::DW_OP_deref, {});
233 // lower_bound[i] = *(base_addr + offset + (indexSize *
234 // kDimLowerBoundPos))
235 lowerAttr = mlir::LLVM::DIExpressionAttr::get(context, ops);
236 ops.clear();
237 }
238
239 addOp(llvm::dwarf::DW_OP_push_object_address, {});
240 addOp(llvm::dwarf::DW_OP_plus_uconst,
241 {offset + (indexSize * kDimStridePos)});
242 addOp(llvm::dwarf::DW_OP_deref, {});
243 // stride[i] = *(base_addr + offset + (indexSize * kDimStridePos))
244 mlir::LLVM::DIExpressionAttr strideAttr =
245 mlir::LLVM::DIExpressionAttr::get(context, ops);
246 ops.clear();
247
248 offset += dimsSize;
249 mlir::LLVM::DISubrangeAttr subrangeTy = mlir::LLVM::DISubrangeAttr::get(
250 context, countAttr, lowerAttr, /*upperBound=*/nullptr, strideAttr);
251 elements.push_back(subrangeTy);
252 ++index;
253 }
254 return mlir::LLVM::DICompositeTypeAttr::get(
255 context, llvm::dwarf::DW_TAG_array_type, /*name=*/nullptr,
256 /*file=*/nullptr, /*line=*/0, /*scope=*/nullptr, elemTy,
257 mlir::LLVM::DIFlags::Zero, /*sizeInBits=*/0, /*alignInBits=*/0, elements,
258 dataLocation, /*rank=*/nullptr, allocated, associated);
259}
260
261std::pair<std::uint64_t, unsigned short>
262DebugTypeGenerator::getFieldSizeAndAlign(mlir::Type fieldTy) {
263 mlir::Type llvmTy;
264 if (auto boxTy = mlir::dyn_cast_if_present<fir::BaseBoxType>(fieldTy))
265 llvmTy = llvmTypeConverter.convertBoxTypeAsStruct(boxTy, getBoxRank(boxTy));
266 else
267 llvmTy = llvmTypeConverter.convertType(fieldTy);
268
269 uint64_t byteSize = dataLayout->getTypeSize(llvmTy);
270 unsigned short byteAlign = dataLayout->getTypeABIAlignment(llvmTy);
271 return std::pair{byteSize, byteAlign};
272}
273
274mlir::LLVM::DITypeAttr DerivedTypeCache::lookup(mlir::Type type) {
275 auto iter = typeCache.find(type);
276 if (iter != typeCache.end()) {
277 if (iter->second.first) {
278 componentActiveRecursionLevels = iter->second.second;
279 }
280 return iter->second.first;
281 }
282 return nullptr;
283}
284
285DerivedTypeCache::ActiveLevels
286DerivedTypeCache::startTranslating(mlir::Type type,
287 mlir::LLVM::DITypeAttr placeHolder) {
288 derivedTypeDepth++;
289 if (!placeHolder)
290 return {};
291 typeCache[type] = std::pair<mlir::LLVM::DITypeAttr, ActiveLevels>(
292 placeHolder, {derivedTypeDepth});
293 return {};
294}
295
296void DerivedTypeCache::preComponentVisitUpdate() {
297 componentActiveRecursionLevels.clear();
298}
299
300void DerivedTypeCache::postComponentVisitUpdate(
301 ActiveLevels &activeRecursionLevels) {
302 if (componentActiveRecursionLevels.empty())
303 return;
304 ActiveLevels oldLevels;
305 oldLevels.swap(activeRecursionLevels);
306 std::set_union(componentActiveRecursionLevels.begin(),
307 componentActiveRecursionLevels.end(), oldLevels.begin(),
308 oldLevels.end(), std::back_inserter(activeRecursionLevels));
309}
310
311void DerivedTypeCache::finalize(mlir::Type ty, mlir::LLVM::DITypeAttr attr,
312 ActiveLevels &&activeRecursionLevels) {
313 // If there is no nested recursion or if this type does not point to any type
314 // nodes above it, it is safe to cache it indefinitely (it can be used in any
315 // contexts).
316 if (activeRecursionLevels.empty() ||
317 (activeRecursionLevels[0] == derivedTypeDepth)) {
318 typeCache[ty] = std::pair<mlir::LLVM::DITypeAttr, ActiveLevels>(attr, {});
319 componentActiveRecursionLevels.clear();
320 cleanUpCache(derivedTypeDepth);
321 --derivedTypeDepth;
322 return;
323 }
324 // Trim any recursion below the current type.
325 if (activeRecursionLevels.back() >= derivedTypeDepth) {
326 auto last = llvm::find_if(activeRecursionLevels, [&](std::int32_t depth) {
327 return depth >= derivedTypeDepth;
328 });
329 if (last != activeRecursionLevels.end()) {
330 activeRecursionLevels.erase(last, activeRecursionLevels.end());
331 }
332 }
333 componentActiveRecursionLevels = std::move(activeRecursionLevels);
334 typeCache[ty] = std::pair<mlir::LLVM::DITypeAttr, ActiveLevels>(
335 attr, componentActiveRecursionLevels);
336 cleanUpCache(derivedTypeDepth);
337 if (!componentActiveRecursionLevels.empty())
338 insertCacheCleanUp(ty, componentActiveRecursionLevels.back());
339 --derivedTypeDepth;
340}
341
342void DerivedTypeCache::insertCacheCleanUp(mlir::Type type, int32_t depth) {
343 auto iter = llvm::find_if(cacheCleanupList,
344 [&](const auto &x) { return x.second >= depth; });
345 if (iter == cacheCleanupList.end()) {
346 cacheCleanupList.emplace_back(
347 std::pair<llvm::SmallVector<mlir::Type>, int32_t>({type}, depth));
348 return;
349 }
350 if (iter->second == depth) {
351 iter->first.push_back(type);
352 return;
353 }
354 cacheCleanupList.insert(
355 iter, std::pair<llvm::SmallVector<mlir::Type>, int32_t>({type}, depth));
356}
357
358void DerivedTypeCache::cleanUpCache(int32_t depth) {
359 if (cacheCleanupList.empty())
360 return;
361 // cleanups are done in the post actions when visiting a derived type
362 // tree. So if there is a clean-up for the current depth, it has to be
363 // the last one (deeper ones must have been done already).
364 if (cacheCleanupList.back().second == depth) {
365 for (mlir::Type type : cacheCleanupList.back().first)
366 typeCache[type].first = nullptr;
367 cacheCleanupList.pop_back_n(1);
368 }
369}
370
371mlir::LLVM::DITypeAttr DebugTypeGenerator::convertRecordType(
372 fir::RecordType Ty, mlir::LLVM::DIFileAttr fileAttr,
373 mlir::LLVM::DIScopeAttr scope, fir::cg::XDeclareOp declOp) {
374
375 if (mlir::LLVM::DITypeAttr attr = derivedTypeCache.lookup(Ty))
376 return attr;
377
378 mlir::MLIRContext *context = module.getContext();
379 auto [nameKind, sourceName] = fir::NameUniquer::deconstruct(Ty.getName());
380 if (nameKind != fir::NameUniquer::NameKind::DERIVED_TYPE)
381 return genPlaceholderType(context);
382
383 llvm::SmallVector<mlir::LLVM::DINodeAttr> elements;
384 // Generate a place holder TypeAttr which will be used if a member
385 // references the parent type.
386 auto recId = mlir::DistinctAttr::create(mlir::UnitAttr::get(context));
387 auto placeHolder = mlir::LLVM::DICompositeTypeAttr::get(
388 context, recId, /*isRecSelf=*/true, llvm::dwarf::DW_TAG_structure_type,
389 mlir::StringAttr::get(context, ""), fileAttr, /*line=*/0, scope,
390 /*baseType=*/nullptr, mlir::LLVM::DIFlags::Zero, /*sizeInBits=*/0,
391 /*alignInBits=*/0, elements, /*dataLocation=*/nullptr, /*rank=*/nullptr,
392 /*allocated=*/nullptr, /*associated=*/nullptr);
393 DerivedTypeCache::ActiveLevels nestedRecursions =
394 derivedTypeCache.startTranslating(Ty, placeHolder);
395
396 fir::TypeInfoOp tiOp = symbolTable->lookup<fir::TypeInfoOp>(Ty.getName());
397 unsigned line = (tiOp) ? getLineFromLoc(tiOp.getLoc()) : 1;
398
399 mlir::OpBuilder builder(context);
400 mlir::IntegerType intTy = mlir::IntegerType::get(context, 64);
401 std::uint64_t offset = 0;
402 for (auto [fieldName, fieldTy] : Ty.getTypeList()) {
403 derivedTypeCache.preComponentVisitUpdate();
404 auto [byteSize, byteAlign] = getFieldSizeAndAlign(fieldTy);
405 std::optional<llvm::ArrayRef<int64_t>> lowerBounds =
406 fir::getComponentLowerBoundsIfNonDefault(Ty, fieldName, module,
407 symbolTable);
408 auto seqTy = mlir::dyn_cast_if_present<fir::SequenceType>(fieldTy);
409
410 // For members of the derived types, the information about the shift in
411 // lower bounds is not part of the declOp but has to be extracted from the
412 // TypeInfoOp (using getComponentLowerBoundsIfNonDefault).
413 mlir::LLVM::DITypeAttr elemTy;
414 if (lowerBounds && seqTy &&
415 lowerBounds->size() == seqTy.getShape().size()) {
416 llvm::SmallVector<mlir::LLVM::DINodeAttr> arrayElements;
417 for (auto [bound, dim] :
418 llvm::zip_equal(*lowerBounds, seqTy.getShape())) {
419 auto countAttr = mlir::IntegerAttr::get(intTy, llvm::APInt(64, dim));
420 auto lowerAttr = mlir::IntegerAttr::get(intTy, llvm::APInt(64, bound));
421 auto subrangeTy = mlir::LLVM::DISubrangeAttr::get(
422 context, countAttr, lowerAttr, /*upperBound=*/nullptr,
423 /*stride=*/nullptr);
424 arrayElements.push_back(subrangeTy);
425 }
426 elemTy = mlir::LLVM::DICompositeTypeAttr::get(
427 context, llvm::dwarf::DW_TAG_array_type, /*name=*/nullptr,
428 /*file=*/nullptr, /*line=*/0, /*scope=*/nullptr,
429 convertType(seqTy.getEleTy(), fileAttr, scope, declOp),
430 mlir::LLVM::DIFlags::Zero, /*sizeInBits=*/0, /*alignInBits=*/0,
431 arrayElements, /*dataLocation=*/nullptr, /*rank=*/nullptr,
432 /*allocated=*/nullptr, /*associated=*/nullptr);
433 } else
434 elemTy = convertType(fieldTy, fileAttr, scope, /*declOp=*/nullptr);
435 offset = llvm::alignTo(offset, byteAlign);
436 mlir::LLVM::DIDerivedTypeAttr tyAttr = mlir::LLVM::DIDerivedTypeAttr::get(
437 context, llvm::dwarf::DW_TAG_member,
438 mlir::StringAttr::get(context, fieldName), elemTy, byteSize * 8,
439 byteAlign * 8, offset * 8, /*optional<address space>=*/std::nullopt,
440 /*extra data=*/nullptr);
441 elements.push_back(tyAttr);
442 offset += llvm::alignTo(byteSize, byteAlign);
443 derivedTypeCache.postComponentVisitUpdate(nestedRecursions);
444 }
445
446 auto finalAttr = mlir::LLVM::DICompositeTypeAttr::get(
447 context, recId, /*isRecSelf=*/false, llvm::dwarf::DW_TAG_structure_type,
448 mlir::StringAttr::get(context, sourceName.name), fileAttr, line, scope,
449 /*baseType=*/nullptr, mlir::LLVM::DIFlags::Zero, offset * 8,
450 /*alignInBits=*/0, elements, /*dataLocation=*/nullptr, /*rank=*/nullptr,
451 /*allocated=*/nullptr, /*associated=*/nullptr);
452
453 derivedTypeCache.finalize(Ty, finalAttr, std::move(nestedRecursions));
454
455 return finalAttr;
456}
457
458mlir::LLVM::DITypeAttr DebugTypeGenerator::convertTupleType(
459 mlir::TupleType Ty, mlir::LLVM::DIFileAttr fileAttr,
460 mlir::LLVM::DIScopeAttr scope, fir::cg::XDeclareOp declOp) {
461 // Check if this type has already been converted.
462 if (mlir::LLVM::DITypeAttr attr = derivedTypeCache.lookup(Ty))
463 return attr;
464
465 DerivedTypeCache::ActiveLevels nestedRecursions =
466 derivedTypeCache.startTranslating(Ty);
467
468 llvm::SmallVector<mlir::LLVM::DINodeAttr> elements;
469 mlir::MLIRContext *context = module.getContext();
470
471 std::uint64_t offset = 0;
472 for (auto fieldTy : Ty.getTypes()) {
473 derivedTypeCache.preComponentVisitUpdate();
474 auto [byteSize, byteAlign] = getFieldSizeAndAlign(fieldTy);
475 mlir::LLVM::DITypeAttr elemTy =
476 convertType(fieldTy, fileAttr, scope, /*declOp=*/nullptr);
477 offset = llvm::alignTo(offset, byteAlign);
478 mlir::LLVM::DIDerivedTypeAttr tyAttr = mlir::LLVM::DIDerivedTypeAttr::get(
479 context, llvm::dwarf::DW_TAG_member, mlir::StringAttr::get(context, ""),
480 elemTy, byteSize * 8, byteAlign * 8, offset * 8,
481 /*optional<address space>=*/std::nullopt,
482 /*extra data=*/nullptr);
483 elements.push_back(tyAttr);
484 offset += llvm::alignTo(byteSize, byteAlign);
485 derivedTypeCache.postComponentVisitUpdate(nestedRecursions);
486 }
487
488 auto typeAttr = mlir::LLVM::DICompositeTypeAttr::get(
489 context, llvm::dwarf::DW_TAG_structure_type,
490 mlir::StringAttr::get(context, ""), fileAttr, /*line=*/0, scope,
491 /*baseType=*/nullptr, mlir::LLVM::DIFlags::Zero, offset * 8,
492 /*alignInBits=*/0, elements, /*dataLocation=*/nullptr, /*rank=*/nullptr,
493 /*allocated=*/nullptr, /*associated=*/nullptr);
494 derivedTypeCache.finalize(Ty, typeAttr, std::move(nestedRecursions));
495 return typeAttr;
496}
497
498mlir::LLVM::DITypeAttr DebugTypeGenerator::convertSequenceType(
499 fir::SequenceType seqTy, mlir::LLVM::DIFileAttr fileAttr,
500 mlir::LLVM::DIScopeAttr scope, fir::cg::XDeclareOp declOp) {
501 mlir::MLIRContext *context = module.getContext();
502
503 llvm::SmallVector<mlir::LLVM::DINodeAttr> elements;
504 mlir::LLVM::DITypeAttr elemTy =
505 convertType(seqTy.getEleTy(), fileAttr, scope, declOp);
506
507 unsigned index = 0;
508 auto intTy = mlir::IntegerType::get(context, 64);
509 for (fir::SequenceType::Extent dim : seqTy.getShape()) {
510 mlir::Attribute lowerAttr = nullptr;
511 mlir::Attribute countAttr = nullptr;
512 // If declOp is present, we use the shift in it to get the lower bound of
513 // the array. If it is constant, that is used. If it is not constant, we
514 // create a variable that represents its location and use that as lower
515 // bound. As an optimization, we don't create a lower bound when shift is a
516 // constant 1 as that is the default.
517 if (declOp && declOp.getShift().size() > index) {
518 if (std::optional<std::int64_t> optint =
519 getIntIfConstant(declOp.getShift()[index])) {
520 if (*optint != 1)
521 lowerAttr = mlir::IntegerAttr::get(intTy, llvm::APInt(64, *optint));
522 } else
523 lowerAttr = generateArtificialVariable(
524 context, declOp.getShift()[index], fileAttr, scope, declOp);
525 }
526
527 if (dim == seqTy.getUnknownExtent()) {
528 // This path is taken for both assumed size array or when the size of the
529 // array is variable. In the case of variable size, we create a variable
530 // to use as countAttr. Note that fir has a constant size of -1 for
531 // assumed size array. So !optint check makes sure we don't generate
532 // variable in that case.
533 if (declOp && declOp.getShape().size() > index) {
534 std::optional<std::int64_t> optint =
535 getIntIfConstant(declOp.getShape()[index]);
536 if (!optint)
537 countAttr = generateArtificialVariable(
538 context, declOp.getShape()[index], fileAttr, scope, declOp);
539 }
540 } else
541 countAttr = mlir::IntegerAttr::get(intTy, llvm::APInt(64, dim));
542
543 auto subrangeTy = mlir::LLVM::DISubrangeAttr::get(
544 context, countAttr, lowerAttr, /*upperBound=*/nullptr,
545 /*stride=*/nullptr);
546 elements.push_back(subrangeTy);
547 ++index;
548 }
549 // Apart from arrays, the `DICompositeTypeAttr` is used for other things like
550 // structure types. Many of its fields which are not applicable to arrays
551 // have been set to some valid default values.
552
553 return mlir::LLVM::DICompositeTypeAttr::get(
554 context, llvm::dwarf::DW_TAG_array_type, /*name=*/nullptr,
555 /*file=*/nullptr, /*line=*/0, /*scope=*/nullptr, elemTy,
556 mlir::LLVM::DIFlags::Zero, /*sizeInBits=*/0, /*alignInBits=*/0, elements,
557 /*dataLocation=*/nullptr, /*rank=*/nullptr, /*allocated=*/nullptr,
558 /*associated=*/nullptr);
559}
560
561mlir::LLVM::DITypeAttr DebugTypeGenerator::convertVectorType(
562 fir::VectorType vecTy, mlir::LLVM::DIFileAttr fileAttr,
563 mlir::LLVM::DIScopeAttr scope, fir::cg::XDeclareOp declOp) {
564 mlir::MLIRContext *context = module.getContext();
565
566 llvm::SmallVector<mlir::LLVM::DINodeAttr> elements;
567 mlir::LLVM::DITypeAttr elemTy =
568 convertType(vecTy.getEleTy(), fileAttr, scope, declOp);
569 auto intTy = mlir::IntegerType::get(context, 64);
570 auto countAttr =
571 mlir::IntegerAttr::get(intTy, llvm::APInt(64, vecTy.getLen()));
572 auto subrangeTy = mlir::LLVM::DISubrangeAttr::get(
573 context, countAttr, /*lowerBound=*/nullptr, /*upperBound=*/nullptr,
574 /*stride=*/nullptr);
575 elements.push_back(subrangeTy);
576 mlir::Type llvmTy = llvmTypeConverter.convertType(vecTy.getEleTy());
577 uint64_t sizeInBits = dataLayout->getTypeSize(llvmTy) * vecTy.getLen() * 8;
578 std::string name("vector");
579 // The element type of the vector must be integer or real so it will be a
580 // DIBasicTypeAttr.
581 if (auto ty = mlir::dyn_cast_if_present<mlir::LLVM::DIBasicTypeAttr>(elemTy))
582 name += " " + ty.getName().str();
583
584 name += " (" + std::to_string(vecTy.getLen()) + ")";
585 return mlir::LLVM::DICompositeTypeAttr::get(
586 context, llvm::dwarf::DW_TAG_array_type,
587 mlir::StringAttr::get(context, name),
588 /*file=*/nullptr, /*line=*/0, /*scope=*/nullptr, elemTy,
589 mlir::LLVM::DIFlags::Vector, sizeInBits, /*alignInBits=*/0, elements,
590 /*dataLocation=*/nullptr, /*rank=*/nullptr, /*allocated=*/nullptr,
591 /*associated=*/nullptr);
592}
593
594mlir::LLVM::DITypeAttr DebugTypeGenerator::convertCharacterType(
595 fir::CharacterType charTy, mlir::LLVM::DIFileAttr fileAttr,
596 mlir::LLVM::DIScopeAttr scope, fir::cg::XDeclareOp declOp,
597 bool hasDescriptor) {
598 mlir::MLIRContext *context = module.getContext();
599
600 // DWARF 5 says the following about the character encoding in 5.1.1.2.
601 // "DW_ATE_ASCII and DW_ATE_UCS specify encodings for the Fortran 2003
602 // string kinds ASCII (ISO/IEC 646:1991) and ISO_10646 (UCS-4 in ISO/IEC
603 // 10646:2000)."
604 unsigned encoding = llvm::dwarf::DW_ATE_ASCII;
605 if (charTy.getFKind() != 1)
606 encoding = llvm::dwarf::DW_ATE_UCS;
607
608 uint64_t sizeInBits = 0;
609 mlir::LLVM::DIExpressionAttr lenExpr = nullptr;
610 mlir::LLVM::DIExpressionAttr locExpr = nullptr;
611 mlir::LLVM::DIVariableAttr varAttr = nullptr;
612
613 if (hasDescriptor) {
614 llvm::SmallVector<mlir::LLVM::DIExpressionElemAttr> ops;
615 auto addOp = [&](unsigned opc, llvm::ArrayRef<uint64_t> vals) {
616 ops.push_back(mlir::LLVM::DIExpressionElemAttr::get(context, opc, vals));
617 };
618 addOp(llvm::dwarf::DW_OP_push_object_address, {});
619 addOp(llvm::dwarf::DW_OP_plus_uconst, {lenOffset});
620 lenExpr = mlir::LLVM::DIExpressionAttr::get(context, ops);
621 ops.clear();
622
623 addOp(llvm::dwarf::DW_OP_push_object_address, {});
624 addOp(llvm::dwarf::DW_OP_deref, {});
625 locExpr = mlir::LLVM::DIExpressionAttr::get(context, ops);
626 } else if (charTy.hasConstantLen()) {
627 sizeInBits =
628 charTy.getLen() * kindMapping.getCharacterBitsize(charTy.getFKind());
629 } else {
630 // In assumed length string, the len of the character is not part of the
631 // type but can be found at the runtime. Here we create an artificial
632 // variable that will contain that length. This variable is used as
633 // 'stringLength' in DIStringTypeAttr.
634 if (declOp && !declOp.getTypeparams().empty()) {
635 mlir::LLVM::DILocalVariableAttr lvAttr = generateArtificialVariable(
636 context, declOp.getTypeparams()[0], fileAttr, scope, declOp);
637 varAttr = mlir::cast<mlir::LLVM::DIVariableAttr>(lvAttr);
638 }
639 }
640
641 // FIXME: Currently the DIStringType in llvm does not have the option to set
642 // type of the underlying character. This restricts out ability to represent
643 // string with non-default characters. Please see issue #95440 for more
644 // details.
645 return mlir::LLVM::DIStringTypeAttr::get(
646 context, llvm::dwarf::DW_TAG_string_type,
647 mlir::StringAttr::get(context, ""), sizeInBits, /*alignInBits=*/0,
648 /*stringLength=*/varAttr, lenExpr, locExpr, encoding);
649}
650
651mlir::LLVM::DITypeAttr DebugTypeGenerator::convertPointerLikeType(
652 mlir::Type elTy, mlir::LLVM::DIFileAttr fileAttr,
653 mlir::LLVM::DIScopeAttr scope, fir::cg::XDeclareOp declOp,
654 bool genAllocated, bool genAssociated) {
655 mlir::MLIRContext *context = module.getContext();
656
657 // Arrays and character need different treatment because DWARF have special
658 // constructs for them to get the location from the descriptor. Rest of
659 // types are handled like pointer to underlying type.
660 if (auto seqTy = mlir::dyn_cast_if_present<fir::SequenceType>(elTy))
661 return convertBoxedSequenceType(seqTy, fileAttr, scope, declOp,
662 genAllocated, genAssociated);
663 if (auto charTy = mlir::dyn_cast_if_present<fir::CharacterType>(elTy))
664 return convertCharacterType(charTy, fileAttr, scope, declOp,
665 /*hasDescriptor=*/true);
666
667 // If elTy is null or none then generate a void*
668 mlir::LLVM::DITypeAttr elTyAttr;
669 if (!elTy || mlir::isa<mlir::NoneType>(elTy))
670 elTyAttr = mlir::LLVM::DINullTypeAttr::get(context);
671 else
672 elTyAttr = convertType(elTy, fileAttr, scope, declOp);
673
674 return mlir::LLVM::DIDerivedTypeAttr::get(
675 context, llvm::dwarf::DW_TAG_pointer_type,
676 mlir::StringAttr::get(context, ""), elTyAttr, /*sizeInBits=*/ptrSize * 8,
677 /*alignInBits=*/0, /*offset=*/0,
678 /*optional<address space>=*/std::nullopt, /*extra data=*/nullptr);
679}
680
681mlir::LLVM::DITypeAttr
682DebugTypeGenerator::convertType(mlir::Type Ty, mlir::LLVM::DIFileAttr fileAttr,
683 mlir::LLVM::DIScopeAttr scope,
684 fir::cg::XDeclareOp declOp) {
685 mlir::MLIRContext *context = module.getContext();
686 if (Ty.isInteger()) {
687 return genBasicType(context, mlir::StringAttr::get(context, "integer"),
688 Ty.getIntOrFloatBitWidth(), llvm::dwarf::DW_ATE_signed);
689 } else if (mlir::isa<mlir::FloatType>(Ty)) {
690 return genBasicType(context, mlir::StringAttr::get(context, "real"),
691 Ty.getIntOrFloatBitWidth(), llvm::dwarf::DW_ATE_float);
692 } else if (auto logTy = mlir::dyn_cast_if_present<fir::LogicalType>(Ty)) {
693 return genBasicType(context,
694 mlir::StringAttr::get(context, logTy.getMnemonic()),
695 kindMapping.getLogicalBitsize(logTy.getFKind()),
696 llvm::dwarf::DW_ATE_boolean);
697 } else if (auto cplxTy = mlir::dyn_cast_if_present<mlir::ComplexType>(Ty)) {
698 auto floatTy = mlir::cast<mlir::FloatType>(cplxTy.getElementType());
699 unsigned bitWidth = floatTy.getWidth();
700 return genBasicType(context, mlir::StringAttr::get(context, "complex"),
701 bitWidth * 2, llvm::dwarf::DW_ATE_complex_float);
702 } else if (auto seqTy = mlir::dyn_cast_if_present<fir::SequenceType>(Ty)) {
703 return convertSequenceType(seqTy, fileAttr, scope, declOp);
704 } else if (auto charTy = mlir::dyn_cast_if_present<fir::CharacterType>(Ty)) {
705 return convertCharacterType(charTy, fileAttr, scope, declOp,
706 /*hasDescriptor=*/false);
707 } else if (auto recTy = mlir::dyn_cast_if_present<fir::RecordType>(Ty)) {
708 return convertRecordType(recTy, fileAttr, scope, declOp);
709 } else if (auto tupleTy = mlir::dyn_cast_if_present<mlir::TupleType>(Ty)) {
710 return convertTupleType(tupleTy, fileAttr, scope, declOp);
711 } else if (auto refTy = mlir::dyn_cast_if_present<fir::ReferenceType>(Ty)) {
712 auto elTy = refTy.getEleTy();
713 return convertPointerLikeType(elTy, fileAttr, scope, declOp,
714 /*genAllocated=*/false,
715 /*genAssociated=*/false);
716 } else if (auto vecTy = mlir::dyn_cast_if_present<fir::VectorType>(Ty)) {
717 return convertVectorType(vecTy, fileAttr, scope, declOp);
718 } else if (mlir::isa<mlir::IndexType>(Ty)) {
719 return genBasicType(context, mlir::StringAttr::get(context, "integer"),
720 llvmTypeConverter.getIndexTypeBitwidth(),
721 llvm::dwarf::DW_ATE_signed);
722 } else if (auto boxTy = mlir::dyn_cast_if_present<fir::BaseBoxType>(Ty)) {
723 auto elTy = boxTy.getEleTy();
724 if (auto seqTy = mlir::dyn_cast_if_present<fir::SequenceType>(elTy))
725 return convertBoxedSequenceType(seqTy, fileAttr, scope, declOp, false,
726 false);
727 if (auto heapTy = mlir::dyn_cast_if_present<fir::HeapType>(elTy))
728 return convertPointerLikeType(heapTy.getElementType(), fileAttr, scope,
729 declOp, /*genAllocated=*/true,
730 /*genAssociated=*/false);
731 if (auto ptrTy = mlir::dyn_cast_if_present<fir::PointerType>(elTy))
732 return convertPointerLikeType(ptrTy.getElementType(), fileAttr, scope,
733 declOp, /*genAllocated=*/false,
734 /*genAssociated=*/true);
735 return convertPointerLikeType(elTy, fileAttr, scope, declOp,
736 /*genAllocated=*/false,
737 /*genAssociated=*/false);
738 } else {
739 // FIXME: These types are currently unhandled. We are generating a
740 // placeholder type to allow us to test supported bits.
741 return genPlaceholderType(context);
742 }
743}
744
745} // namespace fir
746

source code of flang/lib/Optimizer/Transforms/DebugTypeGenerator.cpp