1//===- LoopAnalysis.cpp - Misc loop analysis routines //-------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements miscellaneous loop analysis routines.
10//
11//===----------------------------------------------------------------------===//
12
13#include "mlir/Dialect/Affine/Analysis/LoopAnalysis.h"
14
15#include "mlir/Analysis/SliceAnalysis.h"
16#include "mlir/Dialect/Affine/Analysis/AffineAnalysis.h"
17#include "mlir/Dialect/Affine/Analysis/AffineStructures.h"
18#include "mlir/Dialect/Affine/Analysis/NestedMatcher.h"
19#include "mlir/Dialect/Affine/Analysis/Utils.h"
20#include "mlir/Dialect/Affine/IR/AffineValueMap.h"
21#include "llvm/Support/MathExtras.h"
22
23#include "llvm/Support/Debug.h"
24#include <numeric>
25#include <optional>
26
27#define DEBUG_TYPE "affine-loop-analysis"
28
29using namespace mlir;
30using namespace mlir::affine;
31
32namespace {
33
34/// A directed graph to model relationships between MLIR Operations.
35class DirectedOpGraph {
36public:
37 /// Add a node to the graph.
38 void addNode(Operation *op) {
39 assert(!hasNode(op) && "node already added");
40 nodes.emplace_back(Args&: op);
41 edges[op] = {};
42 }
43
44 /// Add an edge from `src` to `dest`.
45 void addEdge(Operation *src, Operation *dest) {
46 // This is a multi-graph.
47 assert(hasNode(src) && "src node does not exist in graph");
48 assert(hasNode(dest) && "dest node does not exist in graph");
49 edges[src].push_back(Elt: getNode(op: dest));
50 }
51
52 /// Returns true if there is a (directed) cycle in the graph.
53 bool hasCycle() { return dfs(/*cycleCheck=*/true); }
54
55 void printEdges() {
56 for (auto &en : edges) {
57 llvm::dbgs() << *en.first << " (" << en.first << ")"
58 << " has " << en.second.size() << " edges:\n";
59 for (auto *node : en.second) {
60 llvm::dbgs() << '\t' << *node->op << '\n';
61 }
62 }
63 }
64
65private:
66 /// A node of a directed graph between MLIR Operations to model various
67 /// relationships. This is meant to be used internally.
68 struct DGNode {
69 DGNode(Operation *op) : op(op) {};
70 Operation *op;
71
72 // Start and finish visit numbers are standard in DFS to implement things
73 // like finding strongly connected components. These numbers are modified
74 // during analyses on the graph and so seemingly const API methods will be
75 // non-const.
76
77 /// Start visit number.
78 int vn = -1;
79
80 /// Finish visit number.
81 int fn = -1;
82 };
83
84 /// Get internal node corresponding to `op`.
85 DGNode *getNode(Operation *op) {
86 auto *value =
87 llvm::find_if(Range&: nodes, P: [&](const DGNode &node) { return node.op == op; });
88 assert(value != nodes.end() && "node doesn't exist in graph");
89 return &*value;
90 }
91
92 /// Returns true if `key` is in the graph.
93 bool hasNode(Operation *key) const {
94 return llvm::find_if(Range: nodes, P: [&](const DGNode &node) {
95 return node.op == key;
96 }) != nodes.end();
97 }
98
99 /// Perform a depth-first traversal of the graph setting visited and finished
100 /// numbers. If `cycleCheck` is set, detects cycles and returns true as soon
101 /// as the first cycle is detected, and false if there are no cycles. If
102 /// `cycleCheck` is not set, completes the DFS and the `return` value doesn't
103 /// have a meaning.
104 bool dfs(bool cycleCheck = false) {
105 for (DGNode &node : nodes) {
106 node.vn = 0;
107 node.fn = -1;
108 }
109
110 unsigned time = 0;
111 for (DGNode &node : nodes) {
112 if (node.vn == 0) {
113 bool ret = dfsNode(node, cycleCheck, time);
114 // Check if a cycle was already found.
115 if (cycleCheck && ret)
116 return true;
117 } else if (cycleCheck && node.fn == -1) {
118 // We have encountered a node whose visit has started but it's not
119 // finished. So we have a cycle.
120 return true;
121 }
122 }
123 return false;
124 }
125
126 /// Perform depth-first traversal starting at `node`. Return true
127 /// as soon as a cycle is found if `cycleCheck` was set. Update `time`.
128 bool dfsNode(DGNode &node, bool cycleCheck, unsigned &time) const {
129 auto nodeEdges = edges.find(Val: node.op);
130 assert(nodeEdges != edges.end() && "missing node in graph");
131 node.vn = ++time;
132
133 for (auto &neighbour : nodeEdges->second) {
134 if (neighbour->vn == 0) {
135 bool ret = dfsNode(node&: *neighbour, cycleCheck, time);
136 if (cycleCheck && ret)
137 return true;
138 } else if (cycleCheck && neighbour->fn == -1) {
139 // We have encountered a node whose visit has started but it's not
140 // finished. So we have a cycle.
141 return true;
142 }
143 }
144
145 // Update finish time.
146 node.fn = ++time;
147
148 return false;
149 }
150
151 // The list of nodes. The storage is owned by this class.
152 SmallVector<DGNode> nodes;
153
154 // Edges as an adjacency list.
155 DenseMap<Operation *, SmallVector<DGNode *>> edges;
156};
157
158} // namespace
159
160/// Returns the trip count of the loop as an affine expression if the latter is
161/// expressible as an affine expression, and nullptr otherwise. The trip count
162/// expression is simplified before returning. This method only utilizes map
163/// composition to construct lower and upper bounds before computing the trip
164/// count expressions.
165void mlir::affine::getTripCountMapAndOperands(
166 AffineForOp forOp, AffineMap *tripCountMap,
167 SmallVectorImpl<Value> *tripCountOperands) {
168 MLIRContext *context = forOp.getContext();
169 int64_t step = forOp.getStepAsInt();
170 int64_t loopSpan;
171 if (forOp.hasConstantBounds()) {
172 int64_t lb = forOp.getConstantLowerBound();
173 int64_t ub = forOp.getConstantUpperBound();
174 loopSpan = ub - lb;
175 if (loopSpan < 0)
176 loopSpan = 0;
177 *tripCountMap = AffineMap::getConstantMap(
178 val: llvm::divideCeilSigned(Numerator: loopSpan, Denominator: step), context);
179 tripCountOperands->clear();
180 return;
181 }
182 auto lbMap = forOp.getLowerBoundMap();
183 auto ubMap = forOp.getUpperBoundMap();
184 if (lbMap.getNumResults() != 1) {
185 *tripCountMap = AffineMap();
186 return;
187 }
188
189 // Difference of each upper bound expression from the single lower bound
190 // expression (divided by the step) provides the expressions for the trip
191 // count map.
192 AffineValueMap ubValueMap(ubMap, forOp.getUpperBoundOperands());
193
194 SmallVector<AffineExpr, 4> lbSplatExpr(ubValueMap.getNumResults(),
195 lbMap.getResult(idx: 0));
196 auto lbMapSplat = AffineMap::get(dimCount: lbMap.getNumDims(), symbolCount: lbMap.getNumSymbols(),
197 results: lbSplatExpr, context);
198 AffineValueMap lbSplatValueMap(lbMapSplat, forOp.getLowerBoundOperands());
199
200 AffineValueMap tripCountValueMap;
201 AffineValueMap::difference(a: ubValueMap, b: lbSplatValueMap, res: &tripCountValueMap);
202 for (unsigned i = 0, e = tripCountValueMap.getNumResults(); i < e; ++i)
203 tripCountValueMap.setResult(i,
204 e: tripCountValueMap.getResult(i).ceilDiv(v: step));
205
206 *tripCountMap = tripCountValueMap.getAffineMap();
207 tripCountOperands->assign(in_start: tripCountValueMap.getOperands().begin(),
208 in_end: tripCountValueMap.getOperands().end());
209}
210
211/// Returns the trip count of the loop if it's a constant, std::nullopt
212/// otherwise. This method uses affine expression analysis (in turn using
213/// getTripCount) and is able to determine constant trip count in non-trivial
214/// cases.
215std::optional<uint64_t> mlir::affine::getConstantTripCount(AffineForOp forOp) {
216 SmallVector<Value, 4> operands;
217 AffineMap map;
218 getTripCountMapAndOperands(forOp, tripCountMap: &map, tripCountOperands: &operands);
219
220 if (!map)
221 return std::nullopt;
222
223 // Take the min if all trip counts are constant.
224 std::optional<uint64_t> tripCount;
225 for (auto resultExpr : map.getResults()) {
226 if (auto constExpr = dyn_cast<AffineConstantExpr>(Val&: resultExpr)) {
227 if (tripCount.has_value())
228 tripCount =
229 std::min(a: *tripCount, b: static_cast<uint64_t>(constExpr.getValue()));
230 else
231 tripCount = constExpr.getValue();
232 } else {
233 return std::nullopt;
234 }
235 }
236 return tripCount;
237}
238
239/// Returns the greatest known integral divisor of the trip count. Affine
240/// expression analysis is used (indirectly through getTripCount), and
241/// this method is thus able to determine non-trivial divisors.
242uint64_t mlir::affine::getLargestDivisorOfTripCount(AffineForOp forOp) {
243 SmallVector<Value, 4> operands;
244 AffineMap map;
245 getTripCountMapAndOperands(forOp, tripCountMap: &map, tripCountOperands: &operands);
246
247 if (!map)
248 return 1;
249
250 // The largest divisor of the trip count is the GCD of the individual largest
251 // divisors.
252 assert(map.getNumResults() >= 1 && "expected one or more results");
253 std::optional<uint64_t> gcd;
254 for (auto resultExpr : map.getResults()) {
255 uint64_t thisGcd;
256 if (auto constExpr = dyn_cast<AffineConstantExpr>(Val&: resultExpr)) {
257 uint64_t tripCount = constExpr.getValue();
258 // 0 iteration loops (greatest divisor is 2^64 - 1).
259 if (tripCount == 0)
260 thisGcd = std::numeric_limits<uint64_t>::max();
261 else
262 // The greatest divisor is the trip count.
263 thisGcd = tripCount;
264 } else {
265 // Trip count is not a known constant; return its largest known divisor.
266 thisGcd = resultExpr.getLargestKnownDivisor();
267 }
268 if (gcd.has_value())
269 gcd = std::gcd(m: *gcd, n: thisGcd);
270 else
271 gcd = thisGcd;
272 }
273 assert(gcd.has_value() && "value expected per above logic");
274 return *gcd;
275}
276
277/// Given an affine.for `iv` and an access `index` of type index, returns `true`
278/// if `index` is independent of `iv` and false otherwise.
279///
280/// Prerequisites: `iv` and `index` of the proper type;
281static bool isAccessIndexInvariant(Value iv, Value index) {
282 assert(isAffineForInductionVar(iv) && "iv must be an affine.for iv");
283 assert(isa<IndexType>(index.getType()) && "index must be of 'index' type");
284 auto map = AffineMap::getMultiDimIdentityMap(/*numDims=*/1, context: iv.getContext());
285 SmallVector<Value> operands = {index};
286 AffineValueMap avm(map, operands);
287 avm.composeSimplifyAndCanonicalize();
288 return !avm.isFunctionOf(idx: 0, value: iv);
289}
290
291// Pre-requisite: Loop bounds should be in canonical form.
292template <typename LoadOrStoreOp>
293bool mlir::affine::isInvariantAccess(LoadOrStoreOp memOp, AffineForOp forOp) {
294 AffineValueMap avm(memOp.getAffineMap(), memOp.getMapOperands());
295 avm.composeSimplifyAndCanonicalize();
296 return !llvm::is_contained(Range: avm.getOperands(), Element: forOp.getInductionVar());
297}
298
299// Explicitly instantiate the template so that the compiler knows we need them.
300template bool mlir::affine::isInvariantAccess(AffineReadOpInterface,
301 AffineForOp);
302template bool mlir::affine::isInvariantAccess(AffineWriteOpInterface,
303 AffineForOp);
304template bool mlir::affine::isInvariantAccess(AffineLoadOp, AffineForOp);
305template bool mlir::affine::isInvariantAccess(AffineStoreOp, AffineForOp);
306
307DenseSet<Value> mlir::affine::getInvariantAccesses(Value iv,
308 ArrayRef<Value> indices) {
309 DenseSet<Value> res;
310 for (Value index : indices) {
311 if (isAccessIndexInvariant(iv, index))
312 res.insert(V: index);
313 }
314 return res;
315}
316
317// TODO: check access stride.
318template <typename LoadOrStoreOp>
319bool mlir::affine::isContiguousAccess(Value iv, LoadOrStoreOp memoryOp,
320 int *memRefDim) {
321 static_assert(llvm::is_one_of<LoadOrStoreOp, AffineReadOpInterface,
322 AffineWriteOpInterface>::value,
323 "Must be called on either an affine read or write op");
324 assert(memRefDim && "memRefDim == nullptr");
325 auto memRefType = memoryOp.getMemRefType();
326
327 if (!memRefType.getLayout().isIdentity())
328 return memoryOp.emitError("NYI: non-trivial layout map"), false;
329
330 int uniqueVaryingIndexAlongIv = -1;
331 auto accessMap = memoryOp.getAffineMap();
332 SmallVector<Value, 4> mapOperands(memoryOp.getMapOperands());
333 unsigned numDims = accessMap.getNumDims();
334 for (unsigned i = 0, e = memRefType.getRank(); i < e; ++i) {
335 // Gather map operands used in result expr 'i' in 'exprOperands'.
336 SmallVector<Value, 4> exprOperands;
337 auto resultExpr = accessMap.getResult(i);
338 resultExpr.walk([&](AffineExpr expr) {
339 if (auto dimExpr = dyn_cast<AffineDimExpr>(Val&: expr))
340 exprOperands.push_back(Elt: mapOperands[dimExpr.getPosition()]);
341 else if (auto symExpr = dyn_cast<AffineSymbolExpr>(Val&: expr))
342 exprOperands.push_back(Elt: mapOperands[numDims + symExpr.getPosition()]);
343 });
344 // Check access invariance of each operand in 'exprOperands'.
345 for (Value exprOperand : exprOperands) {
346 if (!isAccessIndexInvariant(iv, index: exprOperand)) {
347 if (uniqueVaryingIndexAlongIv != -1) {
348 // 2+ varying indices -> do not vectorize along iv.
349 return false;
350 }
351 uniqueVaryingIndexAlongIv = i;
352 }
353 }
354 }
355
356 if (uniqueVaryingIndexAlongIv == -1)
357 *memRefDim = -1;
358 else
359 *memRefDim = memRefType.getRank() - (uniqueVaryingIndexAlongIv + 1);
360 return true;
361}
362
363template bool mlir::affine::isContiguousAccess(Value iv,
364 AffineReadOpInterface loadOp,
365 int *memRefDim);
366template bool mlir::affine::isContiguousAccess(Value iv,
367 AffineWriteOpInterface loadOp,
368 int *memRefDim);
369
370template <typename LoadOrStoreOp>
371static bool isVectorElement(LoadOrStoreOp memoryOp) {
372 auto memRefType = memoryOp.getMemRefType();
373 return isa<VectorType>(memRefType.getElementType());
374}
375
376using VectorizableOpFun = std::function<bool(AffineForOp, Operation &)>;
377
378static bool
379isVectorizableLoopBodyWithOpCond(AffineForOp loop,
380 const VectorizableOpFun &isVectorizableOp,
381 NestedPattern &vectorTransferMatcher) {
382 auto *forOp = loop.getOperation();
383
384 // No vectorization across conditionals for now.
385 auto conditionals = matcher::If();
386 SmallVector<NestedMatch, 8> conditionalsMatched;
387 conditionals.match(op: forOp, matches: &conditionalsMatched);
388 if (!conditionalsMatched.empty()) {
389 return false;
390 }
391
392 // No vectorization for ops with operand or result types that are not
393 // vectorizable.
394 auto types = matcher::Op(filter: [](Operation &op) -> bool {
395 if (llvm::any_of(Range: op.getOperandTypes(), P: [](Type type) {
396 if (MemRefType t = dyn_cast<MemRefType>(Val&: type))
397 return !VectorType::isValidElementType(t: t.getElementType());
398 return !VectorType::isValidElementType(t: type);
399 }))
400 return true;
401 return !llvm::all_of(Range: op.getResultTypes(), P: VectorType::isValidElementType);
402 });
403 SmallVector<NestedMatch, 8> opsMatched;
404 types.match(op: forOp, matches: &opsMatched);
405 if (!opsMatched.empty()) {
406 return false;
407 }
408
409 // No vectorization across unknown regions.
410 auto regions = matcher::Op(filter: [](Operation &op) -> bool {
411 return op.getNumRegions() != 0 && !isa<AffineIfOp, AffineForOp>(Val: op);
412 });
413 SmallVector<NestedMatch, 8> regionsMatched;
414 regions.match(op: forOp, matches: &regionsMatched);
415 if (!regionsMatched.empty()) {
416 return false;
417 }
418
419 SmallVector<NestedMatch, 8> vectorTransfersMatched;
420 vectorTransferMatcher.match(op: forOp, matches: &vectorTransfersMatched);
421 if (!vectorTransfersMatched.empty()) {
422 return false;
423 }
424
425 auto loadAndStores = matcher::Op(filter: matcher::isLoadOrStore);
426 SmallVector<NestedMatch, 8> loadAndStoresMatched;
427 loadAndStores.match(op: forOp, matches: &loadAndStoresMatched);
428 for (auto ls : loadAndStoresMatched) {
429 auto *op = ls.getMatchedOperation();
430 auto load = dyn_cast<AffineLoadOp>(Val: op);
431 auto store = dyn_cast<AffineStoreOp>(Val: op);
432 // Only scalar types are considered vectorizable, all load/store must be
433 // vectorizable for a loop to qualify as vectorizable.
434 // TODO: ponder whether we want to be more general here.
435 bool vector = load ? isVectorElement(memoryOp: load) : isVectorElement(memoryOp: store);
436 if (vector) {
437 return false;
438 }
439 if (isVectorizableOp && !isVectorizableOp(loop, *op)) {
440 return false;
441 }
442 }
443 return true;
444}
445
446bool mlir::affine::isVectorizableLoopBody(
447 AffineForOp loop, int *memRefDim, NestedPattern &vectorTransferMatcher) {
448 *memRefDim = -1;
449 VectorizableOpFun fun([memRefDim](AffineForOp loop, Operation &op) {
450 auto load = dyn_cast<AffineLoadOp>(Val&: op);
451 auto store = dyn_cast<AffineStoreOp>(Val&: op);
452 int thisOpMemRefDim = -1;
453 bool isContiguous =
454 load ? isContiguousAccess(iv: loop.getInductionVar(),
455 memoryOp: cast<AffineReadOpInterface>(Val&: *load),
456 memRefDim: &thisOpMemRefDim)
457 : isContiguousAccess(iv: loop.getInductionVar(),
458 memoryOp: cast<AffineWriteOpInterface>(Val&: *store),
459 memRefDim: &thisOpMemRefDim);
460 if (thisOpMemRefDim != -1) {
461 // If memory accesses vary across different dimensions then the loop is
462 // not vectorizable.
463 if (*memRefDim != -1 && *memRefDim != thisOpMemRefDim)
464 return false;
465 *memRefDim = thisOpMemRefDim;
466 }
467 return isContiguous;
468 });
469 return isVectorizableLoopBodyWithOpCond(loop, isVectorizableOp: fun, vectorTransferMatcher);
470}
471
472bool mlir::affine::isVectorizableLoopBody(
473 AffineForOp loop, NestedPattern &vectorTransferMatcher) {
474 return isVectorizableLoopBodyWithOpCond(loop, isVectorizableOp: nullptr, vectorTransferMatcher);
475}
476
477/// Checks whether SSA dominance would be violated if a for op's body
478/// operations are shifted by the specified shifts. This method checks if a
479/// 'def' and all its uses have the same shift factor.
480// TODO: extend this to check for memory-based dependence violation when we have
481// the support.
482bool mlir::affine::isOpwiseShiftValid(AffineForOp forOp,
483 ArrayRef<uint64_t> shifts) {
484 auto *forBody = forOp.getBody();
485 assert(shifts.size() == forBody->getOperations().size());
486
487 // Work backwards over the body of the block so that the shift of a use's
488 // ancestor operation in the block gets recorded before it's looked up.
489 DenseMap<Operation *, uint64_t> forBodyShift;
490 for (const auto &it :
491 llvm::enumerate(First: llvm::reverse(C&: forBody->getOperations()))) {
492 auto &op = it.value();
493
494 // Get the index of the current operation, note that we are iterating in
495 // reverse so we need to fix it up.
496 size_t index = shifts.size() - it.index() - 1;
497
498 // Remember the shift of this operation.
499 uint64_t shift = shifts[index];
500 forBodyShift.try_emplace(Key: &op, Args&: shift);
501
502 // Validate the results of this operation if it were to be shifted.
503 for (unsigned i = 0, e = op.getNumResults(); i < e; ++i) {
504 Value result = op.getResult(idx: i);
505 for (auto *user : result.getUsers()) {
506 // If an ancestor operation doesn't lie in the block of forOp,
507 // there is no shift to check.
508 if (auto *ancOp = forBody->findAncestorOpInBlock(op&: *user)) {
509 assert(forBodyShift.count(ancOp) > 0 && "ancestor expected in map");
510 if (shift != forBodyShift[ancOp])
511 return false;
512 }
513 }
514 }
515 }
516 return true;
517}
518
519bool mlir::affine::isTilingValid(ArrayRef<AffineForOp> loops) {
520 assert(!loops.empty() && "no original loops provided");
521
522 // We first find out all dependences we intend to check.
523 SmallVector<Operation *, 8> loadAndStoreOps;
524 loops[0]->walk(callback: [&](Operation *op) {
525 if (isa<AffineReadOpInterface, AffineWriteOpInterface>(Val: op))
526 loadAndStoreOps.push_back(Elt: op);
527 });
528
529 unsigned numOps = loadAndStoreOps.size();
530 unsigned numLoops = loops.size();
531 for (unsigned d = 1; d <= numLoops + 1; ++d) {
532 for (unsigned i = 0; i < numOps; ++i) {
533 Operation *srcOp = loadAndStoreOps[i];
534 MemRefAccess srcAccess(srcOp);
535 for (unsigned j = 0; j < numOps; ++j) {
536 Operation *dstOp = loadAndStoreOps[j];
537 MemRefAccess dstAccess(dstOp);
538
539 SmallVector<DependenceComponent, 2> depComps;
540 DependenceResult result = checkMemrefAccessDependence(
541 srcAccess, dstAccess, loopDepth: d, /*dependenceConstraints=*/nullptr,
542 dependenceComponents: &depComps);
543
544 // Skip if there is no dependence in this case.
545 if (!hasDependence(result))
546 continue;
547
548 // Check whether there is any negative direction vector in the
549 // dependence components found above, which means that dependence is
550 // violated by the default hyper-rect tiling method.
551 LLVM_DEBUG(llvm::dbgs() << "Checking whether tiling legality violated "
552 "for dependence at depth: "
553 << Twine(d) << " between:\n";);
554 LLVM_DEBUG(srcAccess.opInst->dump());
555 LLVM_DEBUG(dstAccess.opInst->dump());
556 for (const DependenceComponent &depComp : depComps) {
557 if (depComp.lb.has_value() && depComp.ub.has_value() &&
558 *depComp.lb < *depComp.ub && *depComp.ub < 0) {
559 LLVM_DEBUG(llvm::dbgs()
560 << "Dependence component lb = " << Twine(*depComp.lb)
561 << " ub = " << Twine(*depComp.ub)
562 << " is negative at depth: " << Twine(d)
563 << " and thus violates the legality rule.\n");
564 return false;
565 }
566 }
567 }
568 }
569 }
570
571 return true;
572}
573
574bool mlir::affine::hasCyclicDependence(AffineForOp root) {
575 // Collect all the memory accesses in the source nest grouped by their
576 // immediate parent block.
577 DirectedOpGraph graph;
578 SmallVector<MemRefAccess> accesses;
579 root->walk(callback: [&](Operation *op) {
580 if (isa<AffineReadOpInterface, AffineWriteOpInterface>(Val: op)) {
581 accesses.emplace_back(Args&: op);
582 graph.addNode(op);
583 }
584 });
585
586 // Construct the dependence graph for all the collected acccesses.
587 unsigned rootDepth = getNestingDepth(op: root);
588 for (const auto &accA : accesses) {
589 for (const auto &accB : accesses) {
590 if (accA.memref != accB.memref)
591 continue;
592 // Perform the dependence on all surrounding loops + the body.
593 unsigned numCommonLoops =
594 getNumCommonSurroundingLoops(a&: *accA.opInst, b&: *accB.opInst);
595 for (unsigned d = rootDepth + 1; d <= numCommonLoops + 1; ++d) {
596 if (!noDependence(result: checkMemrefAccessDependence(srcAccess: accA, dstAccess: accB, loopDepth: d)))
597 graph.addEdge(src: accA.opInst, dest: accB.opInst);
598 }
599 }
600 }
601 return graph.hasCycle();
602}
603

source code of mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp