1//===- ScopInfo.cpp -------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Create a polyhedral description for a static control flow region.
10//
11// The pass creates a polyhedral description of the Scops detected by the Scop
12// detection derived from their LLVM-IR code.
13//
14// This representation is shared among several tools in the polyhedral
15// community, which are e.g. Cloog, Pluto, Loopo, Graphite.
16//
17//===----------------------------------------------------------------------===//
18
19#include "polly/ScopInfo.h"
20#include "polly/LinkAllPasses.h"
21#include "polly/Options.h"
22#include "polly/ScopBuilder.h"
23#include "polly/ScopDetection.h"
24#include "polly/Support/GICHelper.h"
25#include "polly/Support/ISLOStream.h"
26#include "polly/Support/ISLTools.h"
27#include "polly/Support/SCEVAffinator.h"
28#include "polly/Support/SCEVValidator.h"
29#include "polly/Support/ScopHelper.h"
30#include "llvm/ADT/APInt.h"
31#include "llvm/ADT/ArrayRef.h"
32#include "llvm/ADT/PostOrderIterator.h"
33#include "llvm/ADT/Sequence.h"
34#include "llvm/ADT/SmallPtrSet.h"
35#include "llvm/ADT/SmallSet.h"
36#include "llvm/ADT/Statistic.h"
37#include "llvm/ADT/StringExtras.h"
38#include "llvm/Analysis/AliasAnalysis.h"
39#include "llvm/Analysis/AssumptionCache.h"
40#include "llvm/Analysis/Loads.h"
41#include "llvm/Analysis/LoopInfo.h"
42#include "llvm/Analysis/OptimizationRemarkEmitter.h"
43#include "llvm/Analysis/RegionInfo.h"
44#include "llvm/Analysis/RegionIterator.h"
45#include "llvm/Analysis/ScalarEvolution.h"
46#include "llvm/Analysis/ScalarEvolutionExpressions.h"
47#include "llvm/IR/BasicBlock.h"
48#include "llvm/IR/ConstantRange.h"
49#include "llvm/IR/DataLayout.h"
50#include "llvm/IR/DebugLoc.h"
51#include "llvm/IR/Dominators.h"
52#include "llvm/IR/Function.h"
53#include "llvm/IR/InstrTypes.h"
54#include "llvm/IR/Instruction.h"
55#include "llvm/IR/Instructions.h"
56#include "llvm/IR/Module.h"
57#include "llvm/IR/PassManager.h"
58#include "llvm/IR/Type.h"
59#include "llvm/IR/Value.h"
60#include "llvm/InitializePasses.h"
61#include "llvm/Support/Compiler.h"
62#include "llvm/Support/Debug.h"
63#include "llvm/Support/ErrorHandling.h"
64#include "llvm/Support/raw_ostream.h"
65#include "isl/aff.h"
66#include "isl/local_space.h"
67#include "isl/map.h"
68#include "isl/options.h"
69#include "isl/set.h"
70#include <cassert>
71#include <numeric>
72
73using namespace llvm;
74using namespace polly;
75
76#include "polly/Support/PollyDebug.h"
77#define DEBUG_TYPE "polly-scops"
78
79STATISTIC(AssumptionsAliasing, "Number of aliasing assumptions taken.");
80STATISTIC(AssumptionsInbounds, "Number of inbounds assumptions taken.");
81STATISTIC(AssumptionsWrapping, "Number of wrapping assumptions taken.");
82STATISTIC(AssumptionsUnsigned, "Number of unsigned assumptions taken.");
83STATISTIC(AssumptionsComplexity, "Number of too complex SCoPs.");
84STATISTIC(AssumptionsUnprofitable, "Number of unprofitable SCoPs.");
85STATISTIC(AssumptionsErrorBlock, "Number of error block assumptions taken.");
86STATISTIC(AssumptionsInfiniteLoop, "Number of bounded loop assumptions taken.");
87STATISTIC(AssumptionsInvariantLoad,
88 "Number of invariant loads assumptions taken.");
89STATISTIC(AssumptionsDelinearization,
90 "Number of delinearization assumptions taken.");
91
92STATISTIC(NumScops, "Number of feasible SCoPs after ScopInfo");
93STATISTIC(NumLoopsInScop, "Number of loops in scops");
94STATISTIC(NumBoxedLoops, "Number of boxed loops in SCoPs after ScopInfo");
95STATISTIC(NumAffineLoops, "Number of affine loops in SCoPs after ScopInfo");
96
97STATISTIC(NumScopsDepthZero, "Number of scops with maximal loop depth 0");
98STATISTIC(NumScopsDepthOne, "Number of scops with maximal loop depth 1");
99STATISTIC(NumScopsDepthTwo, "Number of scops with maximal loop depth 2");
100STATISTIC(NumScopsDepthThree, "Number of scops with maximal loop depth 3");
101STATISTIC(NumScopsDepthFour, "Number of scops with maximal loop depth 4");
102STATISTIC(NumScopsDepthFive, "Number of scops with maximal loop depth 5");
103STATISTIC(NumScopsDepthLarger,
104 "Number of scops with maximal loop depth 6 and larger");
105STATISTIC(MaxNumLoopsInScop, "Maximal number of loops in scops");
106
107STATISTIC(NumValueWrites, "Number of scalar value writes after ScopInfo");
108STATISTIC(
109 NumValueWritesInLoops,
110 "Number of scalar value writes nested in affine loops after ScopInfo");
111STATISTIC(NumPHIWrites, "Number of scalar phi writes after ScopInfo");
112STATISTIC(NumPHIWritesInLoops,
113 "Number of scalar phi writes nested in affine loops after ScopInfo");
114STATISTIC(NumSingletonWrites, "Number of singleton writes after ScopInfo");
115STATISTIC(NumSingletonWritesInLoops,
116 "Number of singleton writes nested in affine loops after ScopInfo");
117
118unsigned const polly::MaxDisjunctsInDomain = 20;
119
120// The number of disjunct in the context after which we stop to add more
121// disjuncts. This parameter is there to avoid exponential growth in the
122// number of disjunct when adding non-convex sets to the context.
123static int const MaxDisjunctsInContext = 4;
124
125// Be a bit more generous for the defined behavior context which is used less
126// often.
127static int const MaxDisjunktsInDefinedBehaviourContext = 8;
128
129static cl::opt<bool> PollyRemarksMinimal(
130 "polly-remarks-minimal",
131 cl::desc("Do not emit remarks about assumptions that are known"),
132 cl::Hidden, cl::cat(PollyCategory));
133
134static cl::opt<bool>
135 IslOnErrorAbort("polly-on-isl-error-abort",
136 cl::desc("Abort if an isl error is encountered"),
137 cl::init(Val: true), cl::cat(PollyCategory));
138
139static cl::opt<bool> PollyPreciseInbounds(
140 "polly-precise-inbounds",
141 cl::desc("Take more precise inbounds assumptions (do not scale well)"),
142 cl::Hidden, cl::init(Val: false), cl::cat(PollyCategory));
143
144static cl::opt<bool> PollyIgnoreParamBounds(
145 "polly-ignore-parameter-bounds",
146 cl::desc(
147 "Do not add parameter bounds and do no gist simplify sets accordingly"),
148 cl::Hidden, cl::init(Val: false), cl::cat(PollyCategory));
149
150static cl::opt<bool> PollyPreciseFoldAccesses(
151 "polly-precise-fold-accesses",
152 cl::desc("Fold memory accesses to model more possible delinearizations "
153 "(does not scale well)"),
154 cl::Hidden, cl::init(Val: false), cl::cat(PollyCategory));
155
156bool polly::UseInstructionNames;
157
158static cl::opt<bool, true> XUseInstructionNames(
159 "polly-use-llvm-names",
160 cl::desc("Use LLVM-IR names when deriving statement names"),
161 cl::location(L&: UseInstructionNames), cl::Hidden, cl::cat(PollyCategory));
162
163static cl::opt<bool> PollyPrintInstructions(
164 "polly-print-instructions", cl::desc("Output instructions per ScopStmt"),
165 cl::Hidden, cl::Optional, cl::init(Val: false), cl::cat(PollyCategory));
166
167static cl::list<std::string> IslArgs("polly-isl-arg",
168 cl::value_desc("argument"),
169 cl::desc("Option passed to ISL"),
170 cl::cat(PollyCategory));
171
172//===----------------------------------------------------------------------===//
173
174static isl::set addRangeBoundsToSet(isl::set S, const ConstantRange &Range,
175 int dim, isl::dim type) {
176 isl::val V;
177 isl::ctx Ctx = S.ctx();
178
179 // The upper and lower bound for a parameter value is derived either from
180 // the data type of the parameter or from the - possibly more restrictive -
181 // range metadata.
182 V = valFromAPInt(Ctx: Ctx.get(), Int: Range.getSignedMin(), IsSigned: true);
183 S = S.lower_bound_val(type, pos: dim, value: V);
184 V = valFromAPInt(Ctx: Ctx.get(), Int: Range.getSignedMax(), IsSigned: true);
185 S = S.upper_bound_val(type, pos: dim, value: V);
186
187 if (Range.isFullSet())
188 return S;
189
190 if (S.n_basic_set().release() > MaxDisjunctsInContext)
191 return S;
192
193 // In case of signed wrapping, we can refine the set of valid values by
194 // excluding the part not covered by the wrapping range.
195 if (Range.isSignWrappedSet()) {
196 V = valFromAPInt(Ctx: Ctx.get(), Int: Range.getLower(), IsSigned: true);
197 isl::set SLB = S.lower_bound_val(type, pos: dim, value: V);
198
199 V = valFromAPInt(Ctx: Ctx.get(), Int: Range.getUpper(), IsSigned: true);
200 V = V.sub(v2: 1);
201 isl::set SUB = S.upper_bound_val(type, pos: dim, value: V);
202 S = SLB.unite(set2: SUB);
203 }
204
205 return S;
206}
207
208static const ScopArrayInfo *identifyBasePtrOriginSAI(Scop *S, Value *BasePtr) {
209 LoadInst *BasePtrLI = dyn_cast<LoadInst>(Val: BasePtr);
210 if (!BasePtrLI)
211 return nullptr;
212
213 if (!S->contains(I: BasePtrLI))
214 return nullptr;
215
216 ScalarEvolution &SE = *S->getSE();
217
218 const SCEV *OriginBaseSCEV =
219 SE.getPointerBase(V: SE.getSCEV(V: BasePtrLI->getPointerOperand()));
220 if (!OriginBaseSCEV)
221 return nullptr;
222
223 auto *OriginBaseSCEVUnknown = dyn_cast<SCEVUnknown>(Val: OriginBaseSCEV);
224 if (!OriginBaseSCEVUnknown)
225 return nullptr;
226
227 return S->getScopArrayInfo(BasePtr: OriginBaseSCEVUnknown->getValue(),
228 Kind: MemoryKind::Array);
229}
230
231ScopArrayInfo::ScopArrayInfo(Value *BasePtr, Type *ElementType, isl::ctx Ctx,
232 ArrayRef<const SCEV *> Sizes, MemoryKind Kind,
233 const DataLayout &DL, Scop *S,
234 const char *BaseName)
235 : BasePtr(BasePtr), ElementType(ElementType), Kind(Kind), DL(DL), S(*S) {
236 std::string BasePtrName =
237 BaseName ? BaseName
238 : getIslCompatibleName(Prefix: "MemRef", Val: BasePtr, Number: S->getNextArrayIdx(),
239 Suffix: Kind == MemoryKind::PHI ? "__phi" : "",
240 UseInstructionNames);
241 Id = isl::id::alloc(ctx: Ctx, name: BasePtrName, user: this);
242
243 updateSizes(Sizes);
244
245 if (!BasePtr || Kind != MemoryKind::Array) {
246 BasePtrOriginSAI = nullptr;
247 return;
248 }
249
250 BasePtrOriginSAI = identifyBasePtrOriginSAI(S, BasePtr);
251 if (BasePtrOriginSAI)
252 const_cast<ScopArrayInfo *>(BasePtrOriginSAI)->addDerivedSAI(DerivedSAI: this);
253}
254
255ScopArrayInfo::~ScopArrayInfo() = default;
256
257isl::space ScopArrayInfo::getSpace() const {
258 auto Space = isl::space(Id.ctx(), 0, getNumberOfDimensions());
259 Space = Space.set_tuple_id(type: isl::dim::set, id: Id);
260 return Space;
261}
262
263bool ScopArrayInfo::isReadOnly() {
264 isl::union_set WriteSet = S.getWrites().range();
265 isl::space Space = getSpace();
266 WriteSet = WriteSet.extract_set(space: Space);
267
268 return bool(WriteSet.is_empty());
269}
270
271bool ScopArrayInfo::isCompatibleWith(const ScopArrayInfo *Array) const {
272 if (Array->getElementType() != getElementType())
273 return false;
274
275 if (Array->getNumberOfDimensions() != getNumberOfDimensions())
276 return false;
277
278 for (unsigned i = 0; i < getNumberOfDimensions(); i++)
279 if (Array->getDimensionSize(Dim: i) != getDimensionSize(Dim: i))
280 return false;
281
282 return true;
283}
284
285void ScopArrayInfo::updateElementType(Type *NewElementType) {
286 if (NewElementType == ElementType)
287 return;
288
289 auto OldElementSize = DL.getTypeAllocSizeInBits(Ty: ElementType);
290 auto NewElementSize = DL.getTypeAllocSizeInBits(Ty: NewElementType);
291
292 if (NewElementSize == OldElementSize || NewElementSize == 0)
293 return;
294
295 if (NewElementSize % OldElementSize == 0 && NewElementSize < OldElementSize) {
296 ElementType = NewElementType;
297 } else {
298 auto GCD = std::gcd(m: (uint64_t)NewElementSize, n: (uint64_t)OldElementSize);
299 ElementType = IntegerType::get(C&: ElementType->getContext(), NumBits: GCD);
300 }
301}
302
303bool ScopArrayInfo::updateSizes(ArrayRef<const SCEV *> NewSizes,
304 bool CheckConsistency) {
305 int SharedDims = std::min(a: NewSizes.size(), b: DimensionSizes.size());
306 int ExtraDimsNew = NewSizes.size() - SharedDims;
307 int ExtraDimsOld = DimensionSizes.size() - SharedDims;
308
309 if (CheckConsistency) {
310 for (int i = 0; i < SharedDims; i++) {
311 auto *NewSize = NewSizes[i + ExtraDimsNew];
312 auto *KnownSize = DimensionSizes[i + ExtraDimsOld];
313 if (NewSize && KnownSize && NewSize != KnownSize)
314 return false;
315 }
316
317 if (DimensionSizes.size() >= NewSizes.size())
318 return true;
319 }
320
321 DimensionSizes.clear();
322 DimensionSizes.insert(I: DimensionSizes.begin(), From: NewSizes.begin(),
323 To: NewSizes.end());
324 DimensionSizesPw.clear();
325 for (const SCEV *Expr : DimensionSizes) {
326 if (!Expr) {
327 DimensionSizesPw.push_back(Elt: isl::pw_aff());
328 continue;
329 }
330 isl::pw_aff Size = S.getPwAffOnly(E: Expr);
331 DimensionSizesPw.push_back(Elt: Size);
332 }
333 return true;
334}
335
336std::string ScopArrayInfo::getName() const { return Id.get_name(); }
337
338int ScopArrayInfo::getElemSizeInBytes() const {
339 return DL.getTypeAllocSize(Ty: ElementType);
340}
341
342isl::id ScopArrayInfo::getBasePtrId() const { return Id; }
343
344#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
345LLVM_DUMP_METHOD void ScopArrayInfo::dump() const { print(OS&: errs()); }
346#endif
347
348void ScopArrayInfo::print(raw_ostream &OS, bool SizeAsPwAff) const {
349 OS.indent(NumSpaces: 8) << *getElementType() << " " << getName();
350 unsigned u = 0;
351
352 if (getNumberOfDimensions() > 0 && !getDimensionSize(Dim: 0)) {
353 OS << "[*]";
354 u++;
355 }
356 for (; u < getNumberOfDimensions(); u++) {
357 OS << "[";
358
359 if (SizeAsPwAff) {
360 isl::pw_aff Size = getDimensionSizePw(Dim: u);
361 OS << " " << Size << " ";
362 } else {
363 OS << *getDimensionSize(Dim: u);
364 }
365
366 OS << "]";
367 }
368
369 OS << ";";
370
371 if (BasePtrOriginSAI)
372 OS << " [BasePtrOrigin: " << BasePtrOriginSAI->getName() << "]";
373
374 OS << " // Element size " << getElemSizeInBytes() << "\n";
375}
376
377const ScopArrayInfo *
378ScopArrayInfo::getFromAccessFunction(isl::pw_multi_aff PMA) {
379 isl::id Id = PMA.get_tuple_id(type: isl::dim::out);
380 assert(!Id.is_null() && "Output dimension didn't have an ID");
381 return getFromId(Id);
382}
383
384const ScopArrayInfo *ScopArrayInfo::getFromId(isl::id Id) {
385 void *User = Id.get_user();
386 const ScopArrayInfo *SAI = static_cast<ScopArrayInfo *>(User);
387 return SAI;
388}
389
390void MemoryAccess::wrapConstantDimensions() {
391 auto *SAI = getScopArrayInfo();
392 isl::space ArraySpace = SAI->getSpace();
393 isl::ctx Ctx = ArraySpace.ctx();
394 unsigned DimsArray = SAI->getNumberOfDimensions();
395
396 isl::multi_aff DivModAff = isl::multi_aff::identity(
397 space: ArraySpace.map_from_domain_and_range(range: ArraySpace));
398 isl::local_space LArraySpace = isl::local_space(ArraySpace);
399
400 // Begin with last dimension, to iteratively carry into higher dimensions.
401 for (int i = DimsArray - 1; i > 0; i--) {
402 auto *DimSize = SAI->getDimensionSize(Dim: i);
403 auto *DimSizeCst = dyn_cast<SCEVConstant>(Val: DimSize);
404
405 // This transformation is not applicable to dimensions with dynamic size.
406 if (!DimSizeCst)
407 continue;
408
409 // This transformation is not applicable to dimensions of size zero.
410 if (DimSize->isZero())
411 continue;
412
413 isl::val DimSizeVal =
414 valFromAPInt(Ctx: Ctx.get(), Int: DimSizeCst->getAPInt(), IsSigned: false);
415 isl::aff Var = isl::aff::var_on_domain(ls: LArraySpace, type: isl::dim::set, pos: i);
416 isl::aff PrevVar =
417 isl::aff::var_on_domain(ls: LArraySpace, type: isl::dim::set, pos: i - 1);
418
419 // Compute: index % size
420 // Modulo must apply in the divide of the previous iteration, if any.
421 isl::aff Modulo = Var.mod(mod: DimSizeVal);
422 Modulo = Modulo.pullback(ma: DivModAff);
423
424 // Compute: floor(index / size)
425 isl::aff Divide = Var.div(aff2: isl::aff(LArraySpace, DimSizeVal));
426 Divide = Divide.floor();
427 Divide = Divide.add(aff2: PrevVar);
428 Divide = Divide.pullback(ma: DivModAff);
429
430 // Apply Modulo and Divide.
431 DivModAff = DivModAff.set_aff(pos: i, el: Modulo);
432 DivModAff = DivModAff.set_aff(pos: i - 1, el: Divide);
433 }
434
435 // Apply all modulo/divides on the accesses.
436 isl::map Relation = AccessRelation;
437 Relation = Relation.apply_range(map2: isl::map::from_multi_aff(maff: DivModAff));
438 Relation = Relation.detect_equalities();
439 AccessRelation = Relation;
440}
441
442void MemoryAccess::updateDimensionality() {
443 auto *SAI = getScopArrayInfo();
444 isl::space ArraySpace = SAI->getSpace();
445 isl::space AccessSpace = AccessRelation.get_space().range();
446 isl::ctx Ctx = ArraySpace.ctx();
447
448 unsigned DimsArray = unsignedFromIslSize(Size: ArraySpace.dim(type: isl::dim::set));
449 unsigned DimsAccess = unsignedFromIslSize(Size: AccessSpace.dim(type: isl::dim::set));
450 assert(DimsArray >= DimsAccess);
451 unsigned DimsMissing = DimsArray - DimsAccess;
452
453 auto *BB = getStatement()->getEntryBlock();
454 auto &DL = BB->getModule()->getDataLayout();
455 unsigned ArrayElemSize = SAI->getElemSizeInBytes();
456 unsigned ElemBytes = DL.getTypeAllocSize(Ty: getElementType());
457
458 isl::map Map = isl::map::from_domain_and_range(
459 domain: isl::set::universe(space: AccessSpace), range: isl::set::universe(space: ArraySpace));
460
461 for (auto i : seq<unsigned>(Begin: 0, End: DimsMissing))
462 Map = Map.fix_si(type: isl::dim::out, pos: i, value: 0);
463
464 for (auto i : seq<unsigned>(Begin: DimsMissing, End: DimsArray))
465 Map = Map.equate(type1: isl::dim::in, pos1: i - DimsMissing, type2: isl::dim::out, pos2: i);
466
467 AccessRelation = AccessRelation.apply_range(map2: Map);
468
469 // For the non delinearized arrays, divide the access function of the last
470 // subscript by the size of the elements in the array.
471 //
472 // A stride one array access in C expressed as A[i] is expressed in
473 // LLVM-IR as something like A[i * elementsize]. This hides the fact that
474 // two subsequent values of 'i' index two values that are stored next to
475 // each other in memory. By this division we make this characteristic
476 // obvious again. If the base pointer was accessed with offsets not divisible
477 // by the accesses element size, we will have chosen a smaller ArrayElemSize
478 // that divides the offsets of all accesses to this base pointer.
479 if (DimsAccess == 1) {
480 isl::val V = isl::val(Ctx, ArrayElemSize);
481 AccessRelation = AccessRelation.floordiv_val(d: V);
482 }
483
484 // We currently do this only if we added at least one dimension, which means
485 // some dimension's indices have not been specified, an indicator that some
486 // index values have been added together.
487 // TODO: Investigate general usefulness; Effect on unit tests is to make index
488 // expressions more complicated.
489 if (DimsMissing)
490 wrapConstantDimensions();
491
492 if (!isAffine())
493 computeBoundsOnAccessRelation(ElementSize: ArrayElemSize);
494
495 // Introduce multi-element accesses in case the type loaded by this memory
496 // access is larger than the canonical element type of the array.
497 //
498 // An access ((float *)A)[i] to an array char *A is modeled as
499 // {[i] -> A[o] : 4 i <= o <= 4 i + 3
500 if (ElemBytes > ArrayElemSize) {
501 assert(ElemBytes % ArrayElemSize == 0 &&
502 "Loaded element size should be multiple of canonical element size");
503 assert(DimsArray >= 1);
504 isl::map Map = isl::map::from_domain_and_range(
505 domain: isl::set::universe(space: ArraySpace), range: isl::set::universe(space: ArraySpace));
506 for (auto i : seq<unsigned>(Begin: 0, End: DimsArray - 1))
507 Map = Map.equate(type1: isl::dim::in, pos1: i, type2: isl::dim::out, pos2: i);
508
509 isl::constraint C;
510 isl::local_space LS;
511
512 LS = isl::local_space(Map.get_space());
513 int Num = ElemBytes / getScopArrayInfo()->getElemSizeInBytes();
514
515 C = isl::constraint::alloc_inequality(ls: LS);
516 C = C.set_constant_val(isl::val(Ctx, Num - 1));
517 C = C.set_coefficient_si(type: isl::dim::in, pos: DimsArray - 1, v: 1);
518 C = C.set_coefficient_si(type: isl::dim::out, pos: DimsArray - 1, v: -1);
519 Map = Map.add_constraint(constraint: C);
520
521 C = isl::constraint::alloc_inequality(ls: LS);
522 C = C.set_coefficient_si(type: isl::dim::in, pos: DimsArray - 1, v: -1);
523 C = C.set_coefficient_si(type: isl::dim::out, pos: DimsArray - 1, v: 1);
524 C = C.set_constant_val(isl::val(Ctx, 0));
525 Map = Map.add_constraint(constraint: C);
526 AccessRelation = AccessRelation.apply_range(map2: Map);
527 }
528}
529
530std::string
531MemoryAccess::getReductionOperatorStr(MemoryAccess::ReductionType RT) {
532 switch (RT) {
533 case MemoryAccess::RT_NONE:
534 llvm_unreachable("Requested a reduction operator string for a memory "
535 "access which isn't a reduction");
536 case MemoryAccess::RT_BOTTOM:
537 llvm_unreachable("Requested a reduction operator string for a internal "
538 "reduction type!");
539 case MemoryAccess::RT_ADD:
540 return "+";
541 case MemoryAccess::RT_MUL:
542 return "*";
543 case MemoryAccess::RT_BOR:
544 return "|";
545 case MemoryAccess::RT_BXOR:
546 return "^";
547 case MemoryAccess::RT_BAND:
548 return "&";
549 }
550 llvm_unreachable("Unknown reduction type");
551}
552
553const ScopArrayInfo *MemoryAccess::getOriginalScopArrayInfo() const {
554 isl::id ArrayId = getArrayId();
555 void *User = ArrayId.get_user();
556 const ScopArrayInfo *SAI = static_cast<ScopArrayInfo *>(User);
557 return SAI;
558}
559
560const ScopArrayInfo *MemoryAccess::getLatestScopArrayInfo() const {
561 isl::id ArrayId = getLatestArrayId();
562 void *User = ArrayId.get_user();
563 const ScopArrayInfo *SAI = static_cast<ScopArrayInfo *>(User);
564 return SAI;
565}
566
567isl::id MemoryAccess::getOriginalArrayId() const {
568 return AccessRelation.get_tuple_id(type: isl::dim::out);
569}
570
571isl::id MemoryAccess::getLatestArrayId() const {
572 if (!hasNewAccessRelation())
573 return getOriginalArrayId();
574 return NewAccessRelation.get_tuple_id(type: isl::dim::out);
575}
576
577isl::map MemoryAccess::getAddressFunction() const {
578 return getAccessRelation().lexmin();
579}
580
581isl::pw_multi_aff
582MemoryAccess::applyScheduleToAccessRelation(isl::union_map USchedule) const {
583 isl::map Schedule, ScheduledAccRel;
584 isl::union_set UDomain;
585
586 UDomain = getStatement()->getDomain();
587 USchedule = USchedule.intersect_domain(uset: UDomain);
588 Schedule = isl::map::from_union_map(umap: USchedule);
589 ScheduledAccRel = getAddressFunction().apply_domain(map2: Schedule);
590 return isl::pw_multi_aff::from_map(map: ScheduledAccRel);
591}
592
593isl::map MemoryAccess::getOriginalAccessRelation() const {
594 return AccessRelation;
595}
596
597std::string MemoryAccess::getOriginalAccessRelationStr() const {
598 return stringFromIslObj(Obj: AccessRelation);
599}
600
601isl::space MemoryAccess::getOriginalAccessRelationSpace() const {
602 return AccessRelation.get_space();
603}
604
605isl::map MemoryAccess::getNewAccessRelation() const {
606 return NewAccessRelation;
607}
608
609std::string MemoryAccess::getNewAccessRelationStr() const {
610 return stringFromIslObj(Obj: NewAccessRelation);
611}
612
613std::string MemoryAccess::getAccessRelationStr() const {
614 return stringFromIslObj(Obj: getAccessRelation());
615}
616
617isl::basic_map MemoryAccess::createBasicAccessMap(ScopStmt *Statement) {
618 isl::space Space = isl::space(Statement->getIslCtx(), 0, 1);
619 Space = Space.align_params(space2: Statement->getDomainSpace());
620
621 return isl::basic_map::from_domain_and_range(
622 domain: isl::basic_set::universe(space: Statement->getDomainSpace()),
623 range: isl::basic_set::universe(space: Space));
624}
625
626// Formalize no out-of-bound access assumption
627//
628// When delinearizing array accesses we optimistically assume that the
629// delinearized accesses do not access out of bound locations (the subscript
630// expression of each array evaluates for each statement instance that is
631// executed to a value that is larger than zero and strictly smaller than the
632// size of the corresponding dimension). The only exception is the outermost
633// dimension for which we do not need to assume any upper bound. At this point
634// we formalize this assumption to ensure that at code generation time the
635// relevant run-time checks can be generated.
636//
637// To find the set of constraints necessary to avoid out of bound accesses, we
638// first build the set of data locations that are not within array bounds. We
639// then apply the reverse access relation to obtain the set of iterations that
640// may contain invalid accesses and reduce this set of iterations to the ones
641// that are actually executed by intersecting them with the domain of the
642// statement. If we now project out all loop dimensions, we obtain a set of
643// parameters that may cause statement instances to be executed that may
644// possibly yield out of bound memory accesses. The complement of these
645// constraints is the set of constraints that needs to be assumed to ensure such
646// statement instances are never executed.
647isl::set MemoryAccess::assumeNoOutOfBound() {
648 auto *SAI = getScopArrayInfo();
649 isl::space Space = getOriginalAccessRelationSpace().range();
650 isl::set Outside = isl::set::empty(space: Space);
651 for (int i = 1, Size = Space.dim(type: isl::dim::set).release(); i < Size; ++i) {
652 isl::local_space LS(Space);
653 isl::pw_aff Var = isl::pw_aff::var_on_domain(ls: LS, type: isl::dim::set, pos: i);
654 isl::pw_aff Zero = isl::pw_aff(LS);
655
656 isl::set DimOutside = Var.lt_set(pwaff2: Zero);
657 isl::pw_aff SizeE = SAI->getDimensionSizePw(Dim: i);
658 SizeE = SizeE.add_dims(type: isl::dim::in, n: Space.dim(type: isl::dim::set).release());
659 SizeE = SizeE.set_tuple_id(type: isl::dim::in, id: Space.get_tuple_id(type: isl::dim::set));
660 DimOutside = DimOutside.unite(set2: SizeE.le_set(pwaff2: Var));
661
662 Outside = Outside.unite(set2: DimOutside);
663 }
664
665 Outside = Outside.apply(map: getAccessRelation().reverse());
666 Outside = Outside.intersect(set2: Statement->getDomain());
667 Outside = Outside.params();
668
669 // Remove divs to avoid the construction of overly complicated assumptions.
670 // Doing so increases the set of parameter combinations that are assumed to
671 // not appear. This is always save, but may make the resulting run-time check
672 // bail out more often than strictly necessary.
673 Outside = Outside.remove_divs();
674 Outside = Outside.complement();
675
676 if (!PollyPreciseInbounds)
677 Outside = Outside.gist_params(context: Statement->getDomain().params());
678 return Outside;
679}
680
681void MemoryAccess::buildMemIntrinsicAccessRelation() {
682 assert(isMemoryIntrinsic());
683 assert(Subscripts.size() == 2 && Sizes.size() == 1);
684
685 isl::pw_aff SubscriptPWA = getPwAff(E: Subscripts[0]);
686 isl::map SubscriptMap = isl::map::from_pw_aff(pwaff: SubscriptPWA);
687
688 isl::map LengthMap;
689 if (Subscripts[1] == nullptr) {
690 LengthMap = isl::map::universe(space: SubscriptMap.get_space());
691 } else {
692 isl::pw_aff LengthPWA = getPwAff(E: Subscripts[1]);
693 LengthMap = isl::map::from_pw_aff(pwaff: LengthPWA);
694 isl::space RangeSpace = LengthMap.get_space().range();
695 LengthMap = LengthMap.apply_range(map2: isl::map::lex_gt(set_space: RangeSpace));
696 }
697 LengthMap = LengthMap.lower_bound_si(type: isl::dim::out, pos: 0, value: 0);
698 LengthMap = LengthMap.align_params(model: SubscriptMap.get_space());
699 SubscriptMap = SubscriptMap.align_params(model: LengthMap.get_space());
700 LengthMap = LengthMap.sum(map2: SubscriptMap);
701 AccessRelation =
702 LengthMap.set_tuple_id(type: isl::dim::in, id: getStatement()->getDomainId());
703}
704
705void MemoryAccess::computeBoundsOnAccessRelation(unsigned ElementSize) {
706 ScalarEvolution *SE = Statement->getParent()->getSE();
707
708 auto MAI = MemAccInst(getAccessInstruction());
709 if (isa<MemIntrinsic>(Val: MAI))
710 return;
711
712 Value *Ptr = MAI.getPointerOperand();
713 if (!Ptr || !SE->isSCEVable(Ty: Ptr->getType()))
714 return;
715
716 const SCEV *PtrSCEV = SE->getSCEV(V: Ptr);
717 if (isa<SCEVCouldNotCompute>(Val: PtrSCEV))
718 return;
719
720 const SCEV *BasePtrSCEV = SE->getPointerBase(V: PtrSCEV);
721 if (BasePtrSCEV && !isa<SCEVCouldNotCompute>(Val: BasePtrSCEV))
722 PtrSCEV = SE->getMinusSCEV(LHS: PtrSCEV, RHS: BasePtrSCEV);
723
724 const ConstantRange &Range = SE->getSignedRange(S: PtrSCEV);
725 if (Range.isFullSet())
726 return;
727
728 if (Range.isUpperWrapped() || Range.isSignWrappedSet())
729 return;
730
731 bool isWrapping = Range.isSignWrappedSet();
732
733 unsigned BW = Range.getBitWidth();
734 const auto One = APInt(BW, 1);
735 const auto LB = isWrapping ? Range.getLower() : Range.getSignedMin();
736 const auto UB = isWrapping ? (Range.getUpper() - One) : Range.getSignedMax();
737
738 auto Min = LB.sdiv(RHS: APInt(BW, ElementSize));
739 auto Max = UB.sdiv(RHS: APInt(BW, ElementSize)) + One;
740
741 assert(Min.sle(Max) && "Minimum expected to be less or equal than max");
742
743 isl::map Relation = AccessRelation;
744 isl::set AccessRange = Relation.range();
745 AccessRange = addRangeBoundsToSet(S: AccessRange, Range: ConstantRange(Min, Max), dim: 0,
746 type: isl::dim::set);
747 AccessRelation = Relation.intersect_range(set: AccessRange);
748}
749
750void MemoryAccess::foldAccessRelation() {
751 if (Sizes.size() < 2 || isa<SCEVConstant>(Val: Sizes[1]))
752 return;
753
754 int Size = Subscripts.size();
755
756 isl::map NewAccessRelation = AccessRelation;
757
758 for (int i = Size - 2; i >= 0; --i) {
759 isl::space Space;
760 isl::map MapOne, MapTwo;
761 isl::pw_aff DimSize = getPwAff(E: Sizes[i + 1]);
762
763 isl::space SpaceSize = DimSize.get_space();
764 isl::id ParamId = SpaceSize.get_dim_id(type: isl::dim::param, pos: 0);
765
766 Space = AccessRelation.get_space();
767 Space = Space.range().map_from_set();
768 Space = Space.align_params(space2: SpaceSize);
769
770 int ParamLocation = Space.find_dim_by_id(type: isl::dim::param, id: ParamId);
771
772 MapOne = isl::map::universe(space: Space);
773 for (int j = 0; j < Size; ++j)
774 MapOne = MapOne.equate(type1: isl::dim::in, pos1: j, type2: isl::dim::out, pos2: j);
775 MapOne = MapOne.lower_bound_si(type: isl::dim::in, pos: i + 1, value: 0);
776
777 MapTwo = isl::map::universe(space: Space);
778 for (int j = 0; j < Size; ++j)
779 if (j < i || j > i + 1)
780 MapTwo = MapTwo.equate(type1: isl::dim::in, pos1: j, type2: isl::dim::out, pos2: j);
781
782 isl::local_space LS(Space);
783 isl::constraint C;
784 C = isl::constraint::alloc_equality(ls: LS);
785 C = C.set_constant_si(-1);
786 C = C.set_coefficient_si(type: isl::dim::in, pos: i, v: 1);
787 C = C.set_coefficient_si(type: isl::dim::out, pos: i, v: -1);
788 MapTwo = MapTwo.add_constraint(constraint: C);
789 C = isl::constraint::alloc_equality(ls: LS);
790 C = C.set_coefficient_si(type: isl::dim::in, pos: i + 1, v: 1);
791 C = C.set_coefficient_si(type: isl::dim::out, pos: i + 1, v: -1);
792 C = C.set_coefficient_si(type: isl::dim::param, pos: ParamLocation, v: 1);
793 MapTwo = MapTwo.add_constraint(constraint: C);
794 MapTwo = MapTwo.upper_bound_si(type: isl::dim::in, pos: i + 1, value: -1);
795
796 MapOne = MapOne.unite(map2: MapTwo);
797 NewAccessRelation = NewAccessRelation.apply_range(map2: MapOne);
798 }
799
800 isl::id BaseAddrId = getScopArrayInfo()->getBasePtrId();
801 isl::space Space = Statement->getDomainSpace();
802 NewAccessRelation = NewAccessRelation.set_tuple_id(
803 type: isl::dim::in, id: Space.get_tuple_id(type: isl::dim::set));
804 NewAccessRelation = NewAccessRelation.set_tuple_id(type: isl::dim::out, id: BaseAddrId);
805 NewAccessRelation = NewAccessRelation.gist_domain(context: Statement->getDomain());
806
807 // Access dimension folding might in certain cases increase the number of
808 // disjuncts in the memory access, which can possibly complicate the generated
809 // run-time checks and can lead to costly compilation.
810 if (!PollyPreciseFoldAccesses && NewAccessRelation.n_basic_map().release() >
811 AccessRelation.n_basic_map().release()) {
812 } else {
813 AccessRelation = NewAccessRelation;
814 }
815}
816
817void MemoryAccess::buildAccessRelation(const ScopArrayInfo *SAI) {
818 assert(AccessRelation.is_null() && "AccessRelation already built");
819
820 // Initialize the invalid domain which describes all iterations for which the
821 // access relation is not modeled correctly.
822 isl::set StmtInvalidDomain = getStatement()->getInvalidDomain();
823 InvalidDomain = isl::set::empty(space: StmtInvalidDomain.get_space());
824
825 isl::ctx Ctx = Id.ctx();
826 isl::id BaseAddrId = SAI->getBasePtrId();
827
828 if (getAccessInstruction() && isa<MemIntrinsic>(Val: getAccessInstruction())) {
829 buildMemIntrinsicAccessRelation();
830 AccessRelation = AccessRelation.set_tuple_id(type: isl::dim::out, id: BaseAddrId);
831 return;
832 }
833
834 if (!isAffine()) {
835 // We overapproximate non-affine accesses with a possible access to the
836 // whole array. For read accesses it does not make a difference, if an
837 // access must or may happen. However, for write accesses it is important to
838 // differentiate between writes that must happen and writes that may happen.
839 if (AccessRelation.is_null())
840 AccessRelation = createBasicAccessMap(Statement);
841
842 AccessRelation = AccessRelation.set_tuple_id(type: isl::dim::out, id: BaseAddrId);
843 return;
844 }
845
846 isl::space Space = isl::space(Ctx, 0, Statement->getNumIterators(), 0);
847 AccessRelation = isl::map::universe(space: Space);
848
849 for (int i = 0, Size = Subscripts.size(); i < Size; ++i) {
850 isl::pw_aff Affine = getPwAff(E: Subscripts[i]);
851 isl::map SubscriptMap = isl::map::from_pw_aff(pwaff: Affine);
852 AccessRelation = AccessRelation.flat_range_product(map2: SubscriptMap);
853 }
854
855 Space = Statement->getDomainSpace();
856 AccessRelation = AccessRelation.set_tuple_id(
857 type: isl::dim::in, id: Space.get_tuple_id(type: isl::dim::set));
858 AccessRelation = AccessRelation.set_tuple_id(type: isl::dim::out, id: BaseAddrId);
859
860 AccessRelation = AccessRelation.gist_domain(context: Statement->getDomain());
861}
862
863MemoryAccess::MemoryAccess(ScopStmt *Stmt, Instruction *AccessInst,
864 AccessType AccType, Value *BaseAddress,
865 Type *ElementType, bool Affine,
866 ArrayRef<const SCEV *> Subscripts,
867 ArrayRef<const SCEV *> Sizes, Value *AccessValue,
868 MemoryKind Kind)
869 : Kind(Kind), AccType(AccType), Statement(Stmt), InvalidDomain(),
870 BaseAddr(BaseAddress), ElementType(ElementType),
871 Sizes(Sizes.begin(), Sizes.end()), AccessInstruction(AccessInst),
872 AccessValue(AccessValue), IsAffine(Affine),
873 Subscripts(Subscripts.begin(), Subscripts.end()), AccessRelation(),
874 NewAccessRelation() {
875 static const std::string TypeStrings[] = {"", "_Read", "_Write", "_MayWrite"};
876 const std::string Access = TypeStrings[AccType] + utostr(X: Stmt->size());
877
878 std::string IdName = Stmt->getBaseName() + Access;
879 Id = isl::id::alloc(ctx: Stmt->getParent()->getIslCtx(), name: IdName, user: this);
880}
881
882MemoryAccess::MemoryAccess(ScopStmt *Stmt, AccessType AccType, isl::map AccRel)
883 : Kind(MemoryKind::Array), AccType(AccType), Statement(Stmt),
884 InvalidDomain(), AccessRelation(), NewAccessRelation(AccRel) {
885 isl::id ArrayInfoId = NewAccessRelation.get_tuple_id(type: isl::dim::out);
886 auto *SAI = ScopArrayInfo::getFromId(Id: ArrayInfoId);
887 Sizes.push_back(Elt: nullptr);
888 for (unsigned i = 1; i < SAI->getNumberOfDimensions(); i++)
889 Sizes.push_back(Elt: SAI->getDimensionSize(Dim: i));
890 ElementType = SAI->getElementType();
891 BaseAddr = SAI->getBasePtr();
892 static const std::string TypeStrings[] = {"", "_Read", "_Write", "_MayWrite"};
893 const std::string Access = TypeStrings[AccType] + utostr(X: Stmt->size());
894
895 std::string IdName = Stmt->getBaseName() + Access;
896 Id = isl::id::alloc(ctx: Stmt->getParent()->getIslCtx(), name: IdName, user: this);
897}
898
899MemoryAccess::~MemoryAccess() = default;
900
901void MemoryAccess::realignParams() {
902 isl::set Ctx = Statement->getParent()->getContext();
903 InvalidDomain = InvalidDomain.gist_params(context: Ctx);
904 AccessRelation = AccessRelation.gist_params(context: Ctx);
905
906 // Predictable parameter order is required for JSON imports. Ensure alignment
907 // by explicitly calling align_params.
908 isl::space CtxSpace = Ctx.get_space();
909 InvalidDomain = InvalidDomain.align_params(model: CtxSpace);
910 AccessRelation = AccessRelation.align_params(model: CtxSpace);
911}
912
913std::string MemoryAccess::getReductionOperatorStr() const {
914 return MemoryAccess::getReductionOperatorStr(RT: getReductionType());
915}
916
917isl::id MemoryAccess::getId() const { return Id; }
918
919raw_ostream &polly::operator<<(raw_ostream &OS,
920 MemoryAccess::ReductionType RT) {
921 switch (RT) {
922 case MemoryAccess::RT_NONE:
923 case MemoryAccess::RT_BOTTOM:
924 OS << "NONE";
925 break;
926 default:
927 OS << MemoryAccess::getReductionOperatorStr(RT);
928 break;
929 }
930 return OS;
931}
932
933void MemoryAccess::print(raw_ostream &OS) const {
934 switch (AccType) {
935 case READ:
936 OS.indent(NumSpaces: 12) << "ReadAccess :=\t";
937 break;
938 case MUST_WRITE:
939 OS.indent(NumSpaces: 12) << "MustWriteAccess :=\t";
940 break;
941 case MAY_WRITE:
942 OS.indent(NumSpaces: 12) << "MayWriteAccess :=\t";
943 break;
944 }
945
946 OS << "[Reduction Type: " << getReductionType() << "] ";
947
948 OS << "[Scalar: " << isScalarKind() << "]\n";
949 OS.indent(NumSpaces: 16) << getOriginalAccessRelationStr() << ";\n";
950 if (hasNewAccessRelation())
951 OS.indent(NumSpaces: 11) << "new: " << getNewAccessRelationStr() << ";\n";
952}
953
954#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
955LLVM_DUMP_METHOD void MemoryAccess::dump() const { print(OS&: errs()); }
956#endif
957
958isl::pw_aff MemoryAccess::getPwAff(const SCEV *E) {
959 auto *Stmt = getStatement();
960 PWACtx PWAC = Stmt->getParent()->getPwAff(E, BB: Stmt->getEntryBlock());
961 isl::set StmtDom = getStatement()->getDomain();
962 StmtDom = StmtDom.reset_tuple_id();
963 isl::set NewInvalidDom = StmtDom.intersect(set2: PWAC.second);
964 InvalidDomain = InvalidDomain.unite(set2: NewInvalidDom);
965 return PWAC.first;
966}
967
968// Create a map in the size of the provided set domain, that maps from the
969// one element of the provided set domain to another element of the provided
970// set domain.
971// The mapping is limited to all points that are equal in all but the last
972// dimension and for which the last dimension of the input is strict smaller
973// than the last dimension of the output.
974//
975// getEqualAndLarger(set[i0, i1, ..., iX]):
976//
977// set[i0, i1, ..., iX] -> set[o0, o1, ..., oX]
978// : i0 = o0, i1 = o1, ..., i(X-1) = o(X-1), iX < oX
979//
980static isl::map getEqualAndLarger(isl::space SetDomain) {
981 isl::space Space = SetDomain.map_from_set();
982 isl::map Map = isl::map::universe(space: Space);
983 unsigned lastDimension = Map.domain_tuple_dim().release() - 1;
984
985 // Set all but the last dimension to be equal for the input and output
986 //
987 // input[i0, i1, ..., iX] -> output[o0, o1, ..., oX]
988 // : i0 = o0, i1 = o1, ..., i(X-1) = o(X-1)
989 for (unsigned i = 0; i < lastDimension; ++i)
990 Map = Map.equate(type1: isl::dim::in, pos1: i, type2: isl::dim::out, pos2: i);
991
992 // Set the last dimension of the input to be strict smaller than the
993 // last dimension of the output.
994 //
995 // input[?,?,?,...,iX] -> output[?,?,?,...,oX] : iX < oX
996 Map = Map.order_lt(type1: isl::dim::in, pos1: lastDimension, type2: isl::dim::out, pos2: lastDimension);
997 return Map;
998}
999
1000isl::set MemoryAccess::getStride(isl::map Schedule) const {
1001 isl::map AccessRelation = getAccessRelation();
1002 isl::space Space = Schedule.get_space().range();
1003 isl::map NextScatt = getEqualAndLarger(SetDomain: Space);
1004
1005 Schedule = Schedule.reverse();
1006 NextScatt = NextScatt.lexmin();
1007
1008 NextScatt = NextScatt.apply_range(map2: Schedule);
1009 NextScatt = NextScatt.apply_range(map2: AccessRelation);
1010 NextScatt = NextScatt.apply_domain(map2: Schedule);
1011 NextScatt = NextScatt.apply_domain(map2: AccessRelation);
1012
1013 isl::set Deltas = NextScatt.deltas();
1014 return Deltas;
1015}
1016
1017bool MemoryAccess::isStrideX(isl::map Schedule, int StrideWidth) const {
1018 isl::set Stride, StrideX;
1019 bool IsStrideX;
1020
1021 Stride = getStride(Schedule);
1022 StrideX = isl::set::universe(space: Stride.get_space());
1023 int Size = unsignedFromIslSize(Size: StrideX.tuple_dim());
1024 for (auto i : seq<int>(Begin: 0, End: Size - 1))
1025 StrideX = StrideX.fix_si(type: isl::dim::set, pos: i, value: 0);
1026 StrideX = StrideX.fix_si(type: isl::dim::set, pos: Size - 1, value: StrideWidth);
1027 IsStrideX = Stride.is_subset(set2: StrideX);
1028
1029 return IsStrideX;
1030}
1031
1032bool MemoryAccess::isStrideZero(isl::map Schedule) const {
1033 return isStrideX(Schedule, StrideWidth: 0);
1034}
1035
1036bool MemoryAccess::isStrideOne(isl::map Schedule) const {
1037 return isStrideX(Schedule, StrideWidth: 1);
1038}
1039
1040void MemoryAccess::setAccessRelation(isl::map NewAccess) {
1041 AccessRelation = NewAccess;
1042}
1043
1044void MemoryAccess::setNewAccessRelation(isl::map NewAccess) {
1045 assert(!NewAccess.is_null());
1046
1047#ifndef NDEBUG
1048 // Check domain space compatibility.
1049 isl::space NewSpace = NewAccess.get_space();
1050 isl::space NewDomainSpace = NewSpace.domain();
1051 isl::space OriginalDomainSpace = getStatement()->getDomainSpace();
1052 assert(OriginalDomainSpace.has_equal_tuples(NewDomainSpace));
1053
1054 // Reads must be executed unconditionally. Writes might be executed in a
1055 // subdomain only.
1056 if (isRead()) {
1057 // Check whether there is an access for every statement instance.
1058 isl::set StmtDomain = getStatement()->getDomain();
1059 isl::set DefinedContext =
1060 getStatement()->getParent()->getBestKnownDefinedBehaviorContext();
1061 StmtDomain = StmtDomain.intersect_params(params: DefinedContext);
1062 isl::set NewDomain = NewAccess.domain();
1063 assert(!StmtDomain.is_subset(NewDomain).is_false() &&
1064 "Partial READ accesses not supported");
1065 }
1066
1067 isl::space NewAccessSpace = NewAccess.get_space();
1068 assert(NewAccessSpace.has_tuple_id(isl::dim::set) &&
1069 "Must specify the array that is accessed");
1070 isl::id NewArrayId = NewAccessSpace.get_tuple_id(type: isl::dim::set);
1071 auto *SAI = static_cast<ScopArrayInfo *>(NewArrayId.get_user());
1072 assert(SAI && "Must set a ScopArrayInfo");
1073
1074 if (SAI->isArrayKind() && SAI->getBasePtrOriginSAI()) {
1075 InvariantEquivClassTy *EqClass =
1076 getStatement()->getParent()->lookupInvariantEquivClass(
1077 Val: SAI->getBasePtr());
1078 assert(EqClass &&
1079 "Access functions to indirect arrays must have an invariant and "
1080 "hoisted base pointer");
1081 }
1082
1083 // Check whether access dimensions correspond to number of dimensions of the
1084 // accesses array.
1085 unsigned Dims = SAI->getNumberOfDimensions();
1086 unsigned SpaceSize = unsignedFromIslSize(Size: NewAccessSpace.dim(type: isl::dim::set));
1087 assert(SpaceSize == Dims && "Access dims must match array dims");
1088#endif
1089
1090 NewAccess = NewAccess.gist_params(context: getStatement()->getParent()->getContext());
1091 NewAccess = NewAccess.gist_domain(context: getStatement()->getDomain());
1092 NewAccessRelation = NewAccess;
1093}
1094
1095bool MemoryAccess::isLatestPartialAccess() const {
1096 isl::set StmtDom = getStatement()->getDomain();
1097 isl::set AccDom = getLatestAccessRelation().domain();
1098
1099 return !StmtDom.is_subset(set2: AccDom);
1100}
1101
1102//===----------------------------------------------------------------------===//
1103
1104isl::map ScopStmt::getSchedule() const {
1105 isl::set Domain = getDomain();
1106 if (Domain.is_empty())
1107 return isl::map::from_aff(aff: isl::aff(isl::local_space(getDomainSpace())));
1108 auto Schedule = getParent()->getSchedule();
1109 if (Schedule.is_null())
1110 return {};
1111 Schedule = Schedule.intersect_domain(uset: isl::union_set(Domain));
1112 if (Schedule.is_empty())
1113 return isl::map::from_aff(aff: isl::aff(isl::local_space(getDomainSpace())));
1114 isl::map M = M.from_union_map(umap: Schedule);
1115 M = M.coalesce();
1116 M = M.gist_domain(context: Domain);
1117 M = M.coalesce();
1118 return M;
1119}
1120
1121void ScopStmt::restrictDomain(isl::set NewDomain) {
1122 assert(NewDomain.is_subset(Domain) &&
1123 "New domain is not a subset of old domain!");
1124 Domain = NewDomain;
1125}
1126
1127void ScopStmt::addAccess(MemoryAccess *Access, bool Prepend) {
1128 Instruction *AccessInst = Access->getAccessInstruction();
1129
1130 if (Access->isArrayKind()) {
1131 MemoryAccessList &MAL = InstructionToAccess[AccessInst];
1132 MAL.emplace_front(args&: Access);
1133 } else if (Access->isValueKind() && Access->isWrite()) {
1134 Instruction *AccessVal = cast<Instruction>(Val: Access->getAccessValue());
1135 assert(!ValueWrites.lookup(AccessVal));
1136
1137 ValueWrites[AccessVal] = Access;
1138 } else if (Access->isValueKind() && Access->isRead()) {
1139 Value *AccessVal = Access->getAccessValue();
1140 assert(!ValueReads.lookup(AccessVal));
1141
1142 ValueReads[AccessVal] = Access;
1143 } else if (Access->isAnyPHIKind() && Access->isWrite()) {
1144 PHINode *PHI = cast<PHINode>(Val: Access->getAccessValue());
1145 assert(!PHIWrites.lookup(PHI));
1146
1147 PHIWrites[PHI] = Access;
1148 } else if (Access->isAnyPHIKind() && Access->isRead()) {
1149 PHINode *PHI = cast<PHINode>(Val: Access->getAccessValue());
1150 assert(!PHIReads.lookup(PHI));
1151
1152 PHIReads[PHI] = Access;
1153 }
1154
1155 if (Prepend) {
1156 MemAccs.insert(I: MemAccs.begin(), Elt: Access);
1157 return;
1158 }
1159 MemAccs.push_back(Elt: Access);
1160}
1161
1162void ScopStmt::realignParams() {
1163 for (MemoryAccess *MA : *this)
1164 MA->realignParams();
1165
1166 simplify(Set&: InvalidDomain);
1167 simplify(Set&: Domain);
1168
1169 isl::set Ctx = Parent.getContext();
1170 InvalidDomain = InvalidDomain.gist_params(context: Ctx);
1171 Domain = Domain.gist_params(context: Ctx);
1172
1173 // Predictable parameter order is required for JSON imports. Ensure alignment
1174 // by explicitly calling align_params.
1175 isl::space CtxSpace = Ctx.get_space();
1176 InvalidDomain = InvalidDomain.align_params(model: CtxSpace);
1177 Domain = Domain.align_params(model: CtxSpace);
1178}
1179
1180ScopStmt::ScopStmt(Scop &parent, Region &R, StringRef Name,
1181 Loop *SurroundingLoop,
1182 std::vector<Instruction *> EntryBlockInstructions)
1183 : Parent(parent), InvalidDomain(), Domain(), R(&R), Build(), BaseName(Name),
1184 SurroundingLoop(SurroundingLoop), Instructions(EntryBlockInstructions) {}
1185
1186ScopStmt::ScopStmt(Scop &parent, BasicBlock &bb, StringRef Name,
1187 Loop *SurroundingLoop,
1188 std::vector<Instruction *> Instructions)
1189 : Parent(parent), InvalidDomain(), Domain(), BB(&bb), Build(),
1190 BaseName(Name), SurroundingLoop(SurroundingLoop),
1191 Instructions(Instructions) {}
1192
1193ScopStmt::ScopStmt(Scop &parent, isl::map SourceRel, isl::map TargetRel,
1194 isl::set NewDomain)
1195 : Parent(parent), InvalidDomain(), Domain(NewDomain), Build() {
1196 BaseName = getIslCompatibleName(Prefix: "CopyStmt_", Middle: "",
1197 Suffix: std::to_string(val: parent.getCopyStmtsNum()));
1198 isl::id Id = isl::id::alloc(ctx: getIslCtx(), name: getBaseName(), user: this);
1199 Domain = Domain.set_tuple_id(Id);
1200 TargetRel = TargetRel.set_tuple_id(type: isl::dim::in, id: Id);
1201 auto *Access =
1202 new MemoryAccess(this, MemoryAccess::AccessType::MUST_WRITE, TargetRel);
1203 parent.addAccessFunction(Access);
1204 addAccess(Access);
1205 SourceRel = SourceRel.set_tuple_id(type: isl::dim::in, id: Id);
1206 Access = new MemoryAccess(this, MemoryAccess::AccessType::READ, SourceRel);
1207 parent.addAccessFunction(Access);
1208 addAccess(Access);
1209}
1210
1211ScopStmt::~ScopStmt() = default;
1212
1213std::string ScopStmt::getDomainStr() const { return stringFromIslObj(Obj: Domain); }
1214
1215std::string ScopStmt::getScheduleStr() const {
1216 return stringFromIslObj(Obj: getSchedule());
1217}
1218
1219void ScopStmt::setInvalidDomain(isl::set ID) { InvalidDomain = ID; }
1220
1221BasicBlock *ScopStmt::getEntryBlock() const {
1222 if (isBlockStmt())
1223 return getBasicBlock();
1224 return getRegion()->getEntry();
1225}
1226
1227unsigned ScopStmt::getNumIterators() const { return NestLoops.size(); }
1228
1229const char *ScopStmt::getBaseName() const { return BaseName.c_str(); }
1230
1231Loop *ScopStmt::getLoopForDimension(unsigned Dimension) const {
1232 return NestLoops[Dimension];
1233}
1234
1235isl::ctx ScopStmt::getIslCtx() const { return Parent.getIslCtx(); }
1236
1237isl::set ScopStmt::getDomain() const { return Domain; }
1238
1239isl::space ScopStmt::getDomainSpace() const { return Domain.get_space(); }
1240
1241isl::id ScopStmt::getDomainId() const { return Domain.get_tuple_id(); }
1242
1243void ScopStmt::printInstructions(raw_ostream &OS) const {
1244 OS << "Instructions {\n";
1245
1246 for (Instruction *Inst : Instructions)
1247 OS.indent(NumSpaces: 16) << *Inst << "\n";
1248
1249 OS.indent(NumSpaces: 12) << "}\n";
1250}
1251
1252void ScopStmt::print(raw_ostream &OS, bool PrintInstructions) const {
1253 OS << "\t" << getBaseName() << "\n";
1254 OS.indent(NumSpaces: 12) << "Domain :=\n";
1255
1256 if (!Domain.is_null()) {
1257 OS.indent(NumSpaces: 16) << getDomainStr() << ";\n";
1258 } else
1259 OS.indent(NumSpaces: 16) << "n/a\n";
1260
1261 OS.indent(NumSpaces: 12) << "Schedule :=\n";
1262
1263 if (!Domain.is_null()) {
1264 OS.indent(NumSpaces: 16) << getScheduleStr() << ";\n";
1265 } else
1266 OS.indent(NumSpaces: 16) << "n/a\n";
1267
1268 for (MemoryAccess *Access : MemAccs)
1269 Access->print(OS);
1270
1271 if (PrintInstructions)
1272 printInstructions(OS&: OS.indent(NumSpaces: 12));
1273}
1274
1275#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1276LLVM_DUMP_METHOD void ScopStmt::dump() const { print(OS&: dbgs(), PrintInstructions: true); }
1277#endif
1278
1279void ScopStmt::removeAccessData(MemoryAccess *MA) {
1280 if (MA->isRead() && MA->isOriginalValueKind()) {
1281 bool Found = ValueReads.erase(Val: MA->getAccessValue());
1282 (void)Found;
1283 assert(Found && "Expected access data not found");
1284 }
1285 if (MA->isWrite() && MA->isOriginalValueKind()) {
1286 bool Found = ValueWrites.erase(Val: cast<Instruction>(Val: MA->getAccessValue()));
1287 (void)Found;
1288 assert(Found && "Expected access data not found");
1289 }
1290 if (MA->isWrite() && MA->isOriginalAnyPHIKind()) {
1291 bool Found = PHIWrites.erase(Val: cast<PHINode>(Val: MA->getAccessInstruction()));
1292 (void)Found;
1293 assert(Found && "Expected access data not found");
1294 }
1295 if (MA->isRead() && MA->isOriginalAnyPHIKind()) {
1296 bool Found = PHIReads.erase(Val: cast<PHINode>(Val: MA->getAccessInstruction()));
1297 (void)Found;
1298 assert(Found && "Expected access data not found");
1299 }
1300}
1301
1302void ScopStmt::removeMemoryAccess(MemoryAccess *MA) {
1303 // Remove the memory accesses from this statement together with all scalar
1304 // accesses that were caused by it. MemoryKind::Value READs have no access
1305 // instruction, hence would not be removed by this function. However, it is
1306 // only used for invariant LoadInst accesses, its arguments are always affine,
1307 // hence synthesizable, and therefore there are no MemoryKind::Value READ
1308 // accesses to be removed.
1309 auto Predicate = [&](MemoryAccess *Acc) {
1310 return Acc->getAccessInstruction() == MA->getAccessInstruction();
1311 };
1312 for (auto *MA : MemAccs) {
1313 if (Predicate(MA)) {
1314 removeAccessData(MA);
1315 Parent.removeAccessData(Access: MA);
1316 }
1317 }
1318 llvm::erase_if(C&: MemAccs, P: Predicate);
1319 InstructionToAccess.erase(Val: MA->getAccessInstruction());
1320}
1321
1322void ScopStmt::removeSingleMemoryAccess(MemoryAccess *MA, bool AfterHoisting) {
1323 if (AfterHoisting) {
1324 auto MAIt = std::find(first: MemAccs.begin(), last: MemAccs.end(), val: MA);
1325 assert(MAIt != MemAccs.end());
1326 MemAccs.erase(CI: MAIt);
1327
1328 removeAccessData(MA);
1329 Parent.removeAccessData(Access: MA);
1330 }
1331
1332 auto It = InstructionToAccess.find(Val: MA->getAccessInstruction());
1333 if (It != InstructionToAccess.end()) {
1334 It->second.remove(val: MA);
1335 if (It->second.empty())
1336 InstructionToAccess.erase(Val: MA->getAccessInstruction());
1337 }
1338}
1339
1340MemoryAccess *ScopStmt::ensureValueRead(Value *V) {
1341 MemoryAccess *Access = lookupInputAccessOf(Val: V);
1342 if (Access)
1343 return Access;
1344
1345 ScopArrayInfo *SAI =
1346 Parent.getOrCreateScopArrayInfo(BasePtr: V, ElementType: V->getType(), Sizes: {}, Kind: MemoryKind::Value);
1347 Access = new MemoryAccess(this, nullptr, MemoryAccess::READ, V, V->getType(),
1348 true, {}, {}, V, MemoryKind::Value);
1349 Parent.addAccessFunction(Access);
1350 Access->buildAccessRelation(SAI);
1351 addAccess(Access);
1352 Parent.addAccessData(Access);
1353 return Access;
1354}
1355
1356raw_ostream &polly::operator<<(raw_ostream &OS, const ScopStmt &S) {
1357 S.print(OS, PrintInstructions: PollyPrintInstructions);
1358 return OS;
1359}
1360
1361//===----------------------------------------------------------------------===//
1362/// Scop class implement
1363
1364void Scop::setContext(isl::set NewContext) {
1365 Context = NewContext.align_params(model: Context.get_space());
1366}
1367
1368namespace {
1369
1370/// Remap parameter values but keep AddRecs valid wrt. invariant loads.
1371class SCEVSensitiveParameterRewriter final
1372 : public SCEVRewriteVisitor<SCEVSensitiveParameterRewriter> {
1373 const ValueToValueMap &VMap;
1374
1375public:
1376 SCEVSensitiveParameterRewriter(const ValueToValueMap &VMap,
1377 ScalarEvolution &SE)
1378 : SCEVRewriteVisitor(SE), VMap(VMap) {}
1379
1380 static const SCEV *rewrite(const SCEV *E, ScalarEvolution &SE,
1381 const ValueToValueMap &VMap) {
1382 SCEVSensitiveParameterRewriter SSPR(VMap, SE);
1383 return SSPR.visit(S: E);
1384 }
1385
1386 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *E) {
1387 const SCEV *Start = visit(S: E->getStart());
1388 const SCEV *AddRec = SE.getAddRecExpr(Start: SE.getConstant(Ty: E->getType(), V: 0),
1389 Step: visit(S: E->getStepRecurrence(SE)),
1390 L: E->getLoop(), Flags: SCEV::FlagAnyWrap);
1391 return SE.getAddExpr(LHS: Start, RHS: AddRec);
1392 }
1393
1394 const SCEV *visitUnknown(const SCEVUnknown *E) {
1395 if (auto *NewValue = VMap.lookup(Val: E->getValue()))
1396 return SE.getUnknown(V: NewValue);
1397 return E;
1398 }
1399};
1400
1401/// Check whether we should remap a SCEV expression.
1402class SCEVFindInsideScop : public SCEVTraversal<SCEVFindInsideScop> {
1403 const ValueToValueMap &VMap;
1404 bool FoundInside = false;
1405 const Scop *S;
1406
1407public:
1408 SCEVFindInsideScop(const ValueToValueMap &VMap, ScalarEvolution &SE,
1409 const Scop *S)
1410 : SCEVTraversal(*this), VMap(VMap), S(S) {}
1411
1412 static bool hasVariant(const SCEV *E, ScalarEvolution &SE,
1413 const ValueToValueMap &VMap, const Scop *S) {
1414 SCEVFindInsideScop SFIS(VMap, SE, S);
1415 SFIS.visitAll(Root: E);
1416 return SFIS.FoundInside;
1417 }
1418
1419 bool follow(const SCEV *E) {
1420 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(Val: E)) {
1421 FoundInside |= S->getRegion().contains(L: AddRec->getLoop());
1422 } else if (auto *Unknown = dyn_cast<SCEVUnknown>(Val: E)) {
1423 if (Instruction *I = dyn_cast<Instruction>(Val: Unknown->getValue()))
1424 FoundInside |= S->getRegion().contains(Inst: I) && !VMap.count(Val: I);
1425 }
1426 return !FoundInside;
1427 }
1428
1429 bool isDone() { return FoundInside; }
1430};
1431} // end anonymous namespace
1432
1433const SCEV *Scop::getRepresentingInvariantLoadSCEV(const SCEV *E) const {
1434 // Check whether it makes sense to rewrite the SCEV. (ScalarEvolution
1435 // doesn't like addition between an AddRec and an expression that
1436 // doesn't have a dominance relationship with it.)
1437 if (SCEVFindInsideScop::hasVariant(E, SE&: *SE, VMap: InvEquivClassVMap, S: this))
1438 return E;
1439
1440 // Rewrite SCEV.
1441 return SCEVSensitiveParameterRewriter::rewrite(E, SE&: *SE, VMap: InvEquivClassVMap);
1442}
1443
1444void Scop::createParameterId(const SCEV *Parameter) {
1445 assert(Parameters.count(Parameter));
1446 assert(!ParameterIds.count(Parameter));
1447
1448 std::string ParameterName = "p_" + std::to_string(val: getNumParams() - 1);
1449
1450 if (const SCEVUnknown *ValueParameter = dyn_cast<SCEVUnknown>(Val: Parameter)) {
1451 Value *Val = ValueParameter->getValue();
1452
1453 if (UseInstructionNames) {
1454 // If this parameter references a specific Value and this value has a name
1455 // we use this name as it is likely to be unique and more useful than just
1456 // a number.
1457 if (Val->hasName())
1458 ParameterName = Val->getName().str();
1459 else if (LoadInst *LI = dyn_cast<LoadInst>(Val)) {
1460 auto *LoadOrigin = LI->getPointerOperand()->stripInBoundsOffsets();
1461 if (LoadOrigin->hasName()) {
1462 ParameterName += "_loaded_from_";
1463 ParameterName +=
1464 LI->getPointerOperand()->stripInBoundsOffsets()->getName();
1465 }
1466 }
1467 }
1468
1469 ParameterName = getIslCompatibleName(Prefix: "", Middle: ParameterName, Suffix: "");
1470 }
1471
1472 isl::id Id = isl::id::alloc(ctx: getIslCtx(), name: ParameterName,
1473 user: const_cast<void *>((const void *)Parameter));
1474 ParameterIds[Parameter] = Id;
1475}
1476
1477void Scop::addParams(const ParameterSetTy &NewParameters) {
1478 for (const SCEV *Parameter : NewParameters) {
1479 // Normalize the SCEV to get the representing element for an invariant load.
1480 Parameter = extractConstantFactor(M: Parameter, SE&: *SE).second;
1481 Parameter = getRepresentingInvariantLoadSCEV(E: Parameter);
1482
1483 if (Parameters.insert(X: Parameter))
1484 createParameterId(Parameter);
1485 }
1486}
1487
1488isl::id Scop::getIdForParam(const SCEV *Parameter) const {
1489 // Normalize the SCEV to get the representing element for an invariant load.
1490 Parameter = getRepresentingInvariantLoadSCEV(E: Parameter);
1491 return ParameterIds.lookup(Val: Parameter);
1492}
1493
1494bool Scop::isDominatedBy(const DominatorTree &DT, BasicBlock *BB) const {
1495 return DT.dominates(A: BB, B: getEntry());
1496}
1497
1498void Scop::buildContext() {
1499 isl::space Space = isl::space::params_alloc(ctx: getIslCtx(), nparam: 0);
1500 Context = isl::set::universe(space: Space);
1501 InvalidContext = isl::set::empty(space: Space);
1502 AssumedContext = isl::set::universe(space: Space);
1503 DefinedBehaviorContext = isl::set::universe(space: Space);
1504}
1505
1506void Scop::addParameterBounds() {
1507 unsigned PDim = 0;
1508 for (auto *Parameter : Parameters) {
1509 ConstantRange SRange = SE->getSignedRange(S: Parameter);
1510 Context = addRangeBoundsToSet(S: Context, Range: SRange, dim: PDim++, type: isl::dim::param);
1511 }
1512 intersectDefinedBehavior(Set: Context, Sign: AS_ASSUMPTION);
1513}
1514
1515void Scop::realignParams() {
1516 if (PollyIgnoreParamBounds)
1517 return;
1518
1519 // Add all parameters into a common model.
1520 isl::space Space = getFullParamSpace();
1521
1522 // Align the parameters of all data structures to the model.
1523 Context = Context.align_params(model: Space);
1524 AssumedContext = AssumedContext.align_params(model: Space);
1525 InvalidContext = InvalidContext.align_params(model: Space);
1526
1527 // As all parameters are known add bounds to them.
1528 addParameterBounds();
1529
1530 for (ScopStmt &Stmt : *this)
1531 Stmt.realignParams();
1532 // Simplify the schedule according to the context too.
1533 Schedule = Schedule.gist_domain_params(context: getContext());
1534
1535 // Predictable parameter order is required for JSON imports. Ensure alignment
1536 // by explicitly calling align_params.
1537 Schedule = Schedule.align_params(space: Space);
1538}
1539
1540static isl::set simplifyAssumptionContext(isl::set AssumptionContext,
1541 const Scop &S) {
1542 // If we have modeled all blocks in the SCoP that have side effects we can
1543 // simplify the context with the constraints that are needed for anything to
1544 // be executed at all. However, if we have error blocks in the SCoP we already
1545 // assumed some parameter combinations cannot occur and removed them from the
1546 // domains, thus we cannot use the remaining domain to simplify the
1547 // assumptions.
1548 if (!S.hasErrorBlock()) {
1549 auto DomainParameters = S.getDomains().params();
1550 AssumptionContext = AssumptionContext.gist_params(context: DomainParameters);
1551 }
1552
1553 AssumptionContext = AssumptionContext.gist_params(context: S.getContext());
1554 return AssumptionContext;
1555}
1556
1557void Scop::simplifyContexts() {
1558 // The parameter constraints of the iteration domains give us a set of
1559 // constraints that need to hold for all cases where at least a single
1560 // statement iteration is executed in the whole scop. We now simplify the
1561 // assumed context under the assumption that such constraints hold and at
1562 // least a single statement iteration is executed. For cases where no
1563 // statement instances are executed, the assumptions we have taken about
1564 // the executed code do not matter and can be changed.
1565 //
1566 // WARNING: This only holds if the assumptions we have taken do not reduce
1567 // the set of statement instances that are executed. Otherwise we
1568 // may run into a case where the iteration domains suggest that
1569 // for a certain set of parameter constraints no code is executed,
1570 // but in the original program some computation would have been
1571 // performed. In such a case, modifying the run-time conditions and
1572 // possibly influencing the run-time check may cause certain scops
1573 // to not be executed.
1574 //
1575 // Example:
1576 //
1577 // When delinearizing the following code:
1578 //
1579 // for (long i = 0; i < 100; i++)
1580 // for (long j = 0; j < m; j++)
1581 // A[i+p][j] = 1.0;
1582 //
1583 // we assume that the condition m <= 0 or (m >= 1 and p >= 0) holds as
1584 // otherwise we would access out of bound data. Now, knowing that code is
1585 // only executed for the case m >= 0, it is sufficient to assume p >= 0.
1586 AssumedContext = simplifyAssumptionContext(AssumptionContext: AssumedContext, S: *this);
1587 InvalidContext = InvalidContext.align_params(model: getParamSpace());
1588 simplify(Set&: DefinedBehaviorContext);
1589 DefinedBehaviorContext = DefinedBehaviorContext.align_params(model: getParamSpace());
1590}
1591
1592isl::set Scop::getDomainConditions(const ScopStmt *Stmt) const {
1593 return getDomainConditions(BB: Stmt->getEntryBlock());
1594}
1595
1596isl::set Scop::getDomainConditions(BasicBlock *BB) const {
1597 auto DIt = DomainMap.find(Val: BB);
1598 if (DIt != DomainMap.end())
1599 return DIt->getSecond();
1600
1601 auto &RI = *R.getRegionInfo();
1602 auto *BBR = RI.getRegionFor(BB);
1603 while (BBR->getEntry() == BB)
1604 BBR = BBR->getParent();
1605 return getDomainConditions(BB: BBR->getEntry());
1606}
1607
1608Scop::Scop(Region &R, ScalarEvolution &ScalarEvolution, LoopInfo &LI,
1609 DominatorTree &DT, ScopDetection::DetectionContext &DC,
1610 OptimizationRemarkEmitter &ORE, int ID)
1611 : IslCtx(isl_ctx_alloc(), isl_ctx_free), SE(&ScalarEvolution), DT(&DT),
1612 R(R), name(std::nullopt), HasSingleExitEdge(R.getExitingBlock()), DC(DC),
1613 ORE(ORE), Affinator(this, LI), ID(ID) {
1614
1615 // Options defaults that are different from ISL's.
1616 isl_options_set_schedule_serialize_sccs(ctx: IslCtx.get(), val: true);
1617
1618 SmallVector<char *, 8> IslArgv;
1619 IslArgv.reserve(N: 1 + IslArgs.size());
1620
1621 // Substitute for program name.
1622 IslArgv.push_back(Elt: const_cast<char *>("-polly-isl-arg"));
1623
1624 for (std::string &Arg : IslArgs)
1625 IslArgv.push_back(Elt: const_cast<char *>(Arg.c_str()));
1626
1627 // Abort if unknown argument is passed.
1628 // Note that "-V" (print isl version) will always call exit(0), so we cannot
1629 // avoid ISL aborting the program at this point.
1630 unsigned IslParseFlags = ISL_ARG_ALL;
1631
1632 isl_ctx_parse_options(ctx: IslCtx.get(), argc: IslArgv.size(), argv: IslArgv.data(),
1633 flags: IslParseFlags);
1634
1635 if (IslOnErrorAbort)
1636 isl_options_set_on_error(ctx: getIslCtx().get(), ISL_ON_ERROR_ABORT);
1637 buildContext();
1638}
1639
1640Scop::~Scop() = default;
1641
1642void Scop::removeFromStmtMap(ScopStmt &Stmt) {
1643 for (Instruction *Inst : Stmt.getInstructions())
1644 InstStmtMap.erase(Val: Inst);
1645
1646 if (Stmt.isRegionStmt()) {
1647 for (BasicBlock *BB : Stmt.getRegion()->blocks()) {
1648 StmtMap.erase(Val: BB);
1649 // Skip entry basic block, as its instructions are already deleted as
1650 // part of the statement's instruction list.
1651 if (BB == Stmt.getEntryBlock())
1652 continue;
1653 for (Instruction &Inst : *BB)
1654 InstStmtMap.erase(Val: &Inst);
1655 }
1656 } else {
1657 auto StmtMapIt = StmtMap.find(Val: Stmt.getBasicBlock());
1658 if (StmtMapIt != StmtMap.end())
1659 llvm::erase(C&: StmtMapIt->second, V: &Stmt);
1660 for (Instruction *Inst : Stmt.getInstructions())
1661 InstStmtMap.erase(Val: Inst);
1662 }
1663}
1664
1665void Scop::removeStmts(function_ref<bool(ScopStmt &)> ShouldDelete,
1666 bool AfterHoisting) {
1667 for (auto StmtIt = Stmts.begin(), StmtEnd = Stmts.end(); StmtIt != StmtEnd;) {
1668 if (!ShouldDelete(*StmtIt)) {
1669 StmtIt++;
1670 continue;
1671 }
1672
1673 // Start with removing all of the statement's accesses including erasing it
1674 // from all maps that are pointing to them.
1675 // Make a temporary copy because removing MAs invalidates the iterator.
1676 SmallVector<MemoryAccess *, 16> MAList(StmtIt->begin(), StmtIt->end());
1677 for (MemoryAccess *MA : MAList)
1678 StmtIt->removeSingleMemoryAccess(MA, AfterHoisting);
1679
1680 removeFromStmtMap(Stmt&: *StmtIt);
1681 StmtIt = Stmts.erase(position: StmtIt);
1682 }
1683}
1684
1685void Scop::removeStmtNotInDomainMap() {
1686 removeStmts(ShouldDelete: [this](ScopStmt &Stmt) -> bool {
1687 isl::set Domain = DomainMap.lookup(Val: Stmt.getEntryBlock());
1688 if (Domain.is_null())
1689 return true;
1690 return Domain.is_empty();
1691 });
1692}
1693
1694void Scop::simplifySCoP(bool AfterHoisting) {
1695 removeStmts(
1696 ShouldDelete: [AfterHoisting](ScopStmt &Stmt) -> bool {
1697 // Never delete statements that contain calls to debug functions.
1698 if (hasDebugCall(Stmt: &Stmt))
1699 return false;
1700
1701 bool RemoveStmt = Stmt.isEmpty();
1702
1703 // Remove read only statements only after invariant load hoisting.
1704 if (!RemoveStmt && AfterHoisting) {
1705 bool OnlyRead = true;
1706 for (MemoryAccess *MA : Stmt) {
1707 if (MA->isRead())
1708 continue;
1709
1710 OnlyRead = false;
1711 break;
1712 }
1713
1714 RemoveStmt = OnlyRead;
1715 }
1716 return RemoveStmt;
1717 },
1718 AfterHoisting);
1719}
1720
1721InvariantEquivClassTy *Scop::lookupInvariantEquivClass(Value *Val) {
1722 LoadInst *LInst = dyn_cast<LoadInst>(Val);
1723 if (!LInst)
1724 return nullptr;
1725
1726 if (Value *Rep = InvEquivClassVMap.lookup(Val: LInst))
1727 LInst = cast<LoadInst>(Val: Rep);
1728
1729 Type *Ty = LInst->getType();
1730 const SCEV *PointerSCEV = SE->getSCEV(V: LInst->getPointerOperand());
1731 for (auto &IAClass : InvariantEquivClasses) {
1732 if (PointerSCEV != IAClass.IdentifyingPointer || Ty != IAClass.AccessType)
1733 continue;
1734
1735 auto &MAs = IAClass.InvariantAccesses;
1736 for (auto *MA : MAs)
1737 if (MA->getAccessInstruction() == Val)
1738 return &IAClass;
1739 }
1740
1741 return nullptr;
1742}
1743
1744ScopArrayInfo *Scop::getOrCreateScopArrayInfo(Value *BasePtr, Type *ElementType,
1745 ArrayRef<const SCEV *> Sizes,
1746 MemoryKind Kind,
1747 const char *BaseName) {
1748 assert((BasePtr || BaseName) &&
1749 "BasePtr and BaseName can not be nullptr at the same time.");
1750 assert(!(BasePtr && BaseName) && "BaseName is redundant.");
1751 auto &SAI = BasePtr ? ScopArrayInfoMap[std::make_pair(x&: BasePtr, y&: Kind)]
1752 : ScopArrayNameMap[BaseName];
1753 if (!SAI) {
1754 auto &DL = getFunction().getParent()->getDataLayout();
1755 SAI.reset(p: new ScopArrayInfo(BasePtr, ElementType, getIslCtx(), Sizes, Kind,
1756 DL, this, BaseName));
1757 ScopArrayInfoSet.insert(X: SAI.get());
1758 } else {
1759 SAI->updateElementType(NewElementType: ElementType);
1760 // In case of mismatching array sizes, we bail out by setting the run-time
1761 // context to false.
1762 if (!SAI->updateSizes(NewSizes: Sizes))
1763 invalidate(Kind: DELINEARIZATION, Loc: DebugLoc());
1764 }
1765 return SAI.get();
1766}
1767
1768ScopArrayInfo *Scop::createScopArrayInfo(Type *ElementType,
1769 const std::string &BaseName,
1770 const std::vector<unsigned> &Sizes) {
1771 auto *DimSizeType = Type::getInt64Ty(C&: getSE()->getContext());
1772 std::vector<const SCEV *> SCEVSizes;
1773
1774 for (auto size : Sizes)
1775 if (size)
1776 SCEVSizes.push_back(x: getSE()->getConstant(Ty: DimSizeType, V: size, isSigned: false));
1777 else
1778 SCEVSizes.push_back(x: nullptr);
1779
1780 auto *SAI = getOrCreateScopArrayInfo(BasePtr: nullptr, ElementType, Sizes: SCEVSizes,
1781 Kind: MemoryKind::Array, BaseName: BaseName.c_str());
1782 return SAI;
1783}
1784
1785ScopArrayInfo *Scop::getScopArrayInfoOrNull(Value *BasePtr, MemoryKind Kind) {
1786 auto *SAI = ScopArrayInfoMap[std::make_pair(x&: BasePtr, y&: Kind)].get();
1787 return SAI;
1788}
1789
1790ScopArrayInfo *Scop::getScopArrayInfo(Value *BasePtr, MemoryKind Kind) {
1791 auto *SAI = getScopArrayInfoOrNull(BasePtr, Kind);
1792 assert(SAI && "No ScopArrayInfo available for this base pointer");
1793 return SAI;
1794}
1795
1796std::string Scop::getContextStr() const {
1797 return stringFromIslObj(Obj: getContext());
1798}
1799
1800std::string Scop::getAssumedContextStr() const {
1801 assert(!AssumedContext.is_null() && "Assumed context not yet built");
1802 return stringFromIslObj(Obj: AssumedContext);
1803}
1804
1805std::string Scop::getInvalidContextStr() const {
1806 return stringFromIslObj(Obj: InvalidContext);
1807}
1808
1809std::string Scop::getNameStr() const {
1810 std::string ExitName, EntryName;
1811 std::tie(args&: EntryName, args&: ExitName) = getEntryExitStr();
1812 return EntryName + "---" + ExitName;
1813}
1814
1815std::pair<std::string, std::string> Scop::getEntryExitStr() const {
1816 std::string ExitName, EntryName;
1817 raw_string_ostream ExitStr(ExitName);
1818 raw_string_ostream EntryStr(EntryName);
1819
1820 R.getEntry()->printAsOperand(O&: EntryStr, PrintType: false);
1821
1822 if (R.getExit()) {
1823 R.getExit()->printAsOperand(O&: ExitStr, PrintType: false);
1824 } else
1825 ExitName = "FunctionExit";
1826
1827 return std::make_pair(x&: EntryName, y&: ExitName);
1828}
1829
1830isl::set Scop::getContext() const { return Context; }
1831
1832isl::space Scop::getParamSpace() const { return getContext().get_space(); }
1833
1834isl::space Scop::getFullParamSpace() const {
1835
1836 isl::space Space = isl::space::params_alloc(ctx: getIslCtx(), nparam: ParameterIds.size());
1837
1838 unsigned PDim = 0;
1839 for (const SCEV *Parameter : Parameters) {
1840 isl::id Id = getIdForParam(Parameter);
1841 Space = Space.set_dim_id(type: isl::dim::param, pos: PDim++, id: Id);
1842 }
1843
1844 return Space;
1845}
1846
1847isl::set Scop::getAssumedContext() const {
1848 assert(!AssumedContext.is_null() && "Assumed context not yet built");
1849 return AssumedContext;
1850}
1851
1852bool Scop::isProfitable(bool ScalarsAreUnprofitable) const {
1853 if (PollyProcessUnprofitable)
1854 return true;
1855
1856 if (isEmpty())
1857 return false;
1858
1859 unsigned OptimizableStmtsOrLoops = 0;
1860 for (auto &Stmt : *this) {
1861 if (Stmt.getNumIterators() == 0)
1862 continue;
1863
1864 bool ContainsArrayAccs = false;
1865 bool ContainsScalarAccs = false;
1866 for (auto *MA : Stmt) {
1867 if (MA->isRead())
1868 continue;
1869 ContainsArrayAccs |= MA->isLatestArrayKind();
1870 ContainsScalarAccs |= MA->isLatestScalarKind();
1871 }
1872
1873 if (!ScalarsAreUnprofitable || (ContainsArrayAccs && !ContainsScalarAccs))
1874 OptimizableStmtsOrLoops += Stmt.getNumIterators();
1875 }
1876
1877 return OptimizableStmtsOrLoops > 1;
1878}
1879
1880bool Scop::hasFeasibleRuntimeContext() const {
1881 if (Stmts.empty())
1882 return false;
1883
1884 isl::set PositiveContext = getAssumedContext();
1885 isl::set NegativeContext = getInvalidContext();
1886 PositiveContext = PositiveContext.intersect_params(params: Context);
1887 PositiveContext = PositiveContext.intersect_params(params: getDomains().params());
1888 return PositiveContext.is_empty().is_false() &&
1889 PositiveContext.is_subset(set2: NegativeContext).is_false();
1890}
1891
1892MemoryAccess *Scop::lookupBasePtrAccess(MemoryAccess *MA) {
1893 Value *PointerBase = MA->getOriginalBaseAddr();
1894
1895 auto *PointerBaseInst = dyn_cast<Instruction>(Val: PointerBase);
1896 if (!PointerBaseInst)
1897 return nullptr;
1898
1899 auto *BasePtrStmt = getStmtFor(Inst: PointerBaseInst);
1900 if (!BasePtrStmt)
1901 return nullptr;
1902
1903 return BasePtrStmt->getArrayAccessOrNULLFor(Inst: PointerBaseInst);
1904}
1905
1906static std::string toString(AssumptionKind Kind) {
1907 switch (Kind) {
1908 case ALIASING:
1909 return "No-aliasing";
1910 case INBOUNDS:
1911 return "Inbounds";
1912 case WRAPPING:
1913 return "No-overflows";
1914 case UNSIGNED:
1915 return "Signed-unsigned";
1916 case COMPLEXITY:
1917 return "Low complexity";
1918 case PROFITABLE:
1919 return "Profitable";
1920 case ERRORBLOCK:
1921 return "No-error";
1922 case INFINITELOOP:
1923 return "Finite loop";
1924 case INVARIANTLOAD:
1925 return "Invariant load";
1926 case DELINEARIZATION:
1927 return "Delinearization";
1928 }
1929 llvm_unreachable("Unknown AssumptionKind!");
1930}
1931
1932bool Scop::isEffectiveAssumption(isl::set Set, AssumptionSign Sign) {
1933 if (Sign == AS_ASSUMPTION) {
1934 if (Context.is_subset(set2: Set))
1935 return false;
1936
1937 if (AssumedContext.is_subset(set2: Set))
1938 return false;
1939 } else {
1940 if (Set.is_disjoint(set2: Context))
1941 return false;
1942
1943 if (Set.is_subset(set2: InvalidContext))
1944 return false;
1945 }
1946 return true;
1947}
1948
1949bool Scop::trackAssumption(AssumptionKind Kind, isl::set Set, DebugLoc Loc,
1950 AssumptionSign Sign, BasicBlock *BB) {
1951 if (PollyRemarksMinimal && !isEffectiveAssumption(Set, Sign))
1952 return false;
1953
1954 // Do never emit trivial assumptions as they only clutter the output.
1955 if (!PollyRemarksMinimal) {
1956 isl::set Univ;
1957 if (Sign == AS_ASSUMPTION)
1958 Univ = isl::set::universe(space: Set.get_space());
1959
1960 bool IsTrivial = (Sign == AS_RESTRICTION && Set.is_empty()) ||
1961 (Sign == AS_ASSUMPTION && Univ.is_equal(set2: Set));
1962
1963 if (IsTrivial)
1964 return false;
1965 }
1966
1967 switch (Kind) {
1968 case ALIASING:
1969 AssumptionsAliasing++;
1970 break;
1971 case INBOUNDS:
1972 AssumptionsInbounds++;
1973 break;
1974 case WRAPPING:
1975 AssumptionsWrapping++;
1976 break;
1977 case UNSIGNED:
1978 AssumptionsUnsigned++;
1979 break;
1980 case COMPLEXITY:
1981 AssumptionsComplexity++;
1982 break;
1983 case PROFITABLE:
1984 AssumptionsUnprofitable++;
1985 break;
1986 case ERRORBLOCK:
1987 AssumptionsErrorBlock++;
1988 break;
1989 case INFINITELOOP:
1990 AssumptionsInfiniteLoop++;
1991 break;
1992 case INVARIANTLOAD:
1993 AssumptionsInvariantLoad++;
1994 break;
1995 case DELINEARIZATION:
1996 AssumptionsDelinearization++;
1997 break;
1998 }
1999
2000 auto Suffix = Sign == AS_ASSUMPTION ? " assumption:\t" : " restriction:\t";
2001 std::string Msg = toString(Kind) + Suffix + stringFromIslObj(Obj: Set);
2002 if (BB)
2003 ORE.emit(OptDiag: OptimizationRemarkAnalysis(DEBUG_TYPE, "AssumpRestrict", Loc, BB)
2004 << Msg);
2005 else
2006 ORE.emit(OptDiag: OptimizationRemarkAnalysis(DEBUG_TYPE, "AssumpRestrict", Loc,
2007 R.getEntry())
2008 << Msg);
2009 return true;
2010}
2011
2012void Scop::addAssumption(AssumptionKind Kind, isl::set Set, DebugLoc Loc,
2013 AssumptionSign Sign, BasicBlock *BB,
2014 bool RequiresRTC) {
2015 // Simplify the assumptions/restrictions first.
2016 Set = Set.gist_params(context: getContext());
2017 intersectDefinedBehavior(Set, Sign);
2018
2019 if (!RequiresRTC)
2020 return;
2021
2022 if (!trackAssumption(Kind, Set, Loc, Sign, BB))
2023 return;
2024
2025 if (Sign == AS_ASSUMPTION)
2026 AssumedContext = AssumedContext.intersect(set2: Set).coalesce();
2027 else
2028 InvalidContext = InvalidContext.unite(set2: Set).coalesce();
2029}
2030
2031void Scop::intersectDefinedBehavior(isl::set Set, AssumptionSign Sign) {
2032 if (DefinedBehaviorContext.is_null())
2033 return;
2034
2035 if (Sign == AS_ASSUMPTION)
2036 DefinedBehaviorContext = DefinedBehaviorContext.intersect(set2: Set);
2037 else
2038 DefinedBehaviorContext = DefinedBehaviorContext.subtract(set2: Set);
2039
2040 // Limit the complexity of the context. If complexity is exceeded, simplify
2041 // the set and check again.
2042 if (DefinedBehaviorContext.n_basic_set().release() >
2043 MaxDisjunktsInDefinedBehaviourContext) {
2044 simplify(Set&: DefinedBehaviorContext);
2045 if (DefinedBehaviorContext.n_basic_set().release() >
2046 MaxDisjunktsInDefinedBehaviourContext)
2047 DefinedBehaviorContext = {};
2048 }
2049}
2050
2051void Scop::invalidate(AssumptionKind Kind, DebugLoc Loc, BasicBlock *BB) {
2052 POLLY_DEBUG(dbgs() << "Invalidate SCoP because of reason " << Kind << "\n");
2053 addAssumption(Kind, Set: isl::set::empty(space: getParamSpace()), Loc, Sign: AS_ASSUMPTION, BB);
2054}
2055
2056isl::set Scop::getInvalidContext() const { return InvalidContext; }
2057
2058void Scop::printContext(raw_ostream &OS) const {
2059 OS << "Context:\n";
2060 OS.indent(NumSpaces: 4) << Context << "\n";
2061
2062 OS.indent(NumSpaces: 4) << "Assumed Context:\n";
2063 OS.indent(NumSpaces: 4) << AssumedContext << "\n";
2064
2065 OS.indent(NumSpaces: 4) << "Invalid Context:\n";
2066 OS.indent(NumSpaces: 4) << InvalidContext << "\n";
2067
2068 OS.indent(NumSpaces: 4) << "Defined Behavior Context:\n";
2069 if (!DefinedBehaviorContext.is_null())
2070 OS.indent(NumSpaces: 4) << DefinedBehaviorContext << "\n";
2071 else
2072 OS.indent(NumSpaces: 4) << "<unavailable>\n";
2073
2074 unsigned Dim = 0;
2075 for (const SCEV *Parameter : Parameters)
2076 OS.indent(NumSpaces: 4) << "p" << Dim++ << ": " << *Parameter << "\n";
2077}
2078
2079void Scop::printAliasAssumptions(raw_ostream &OS) const {
2080 int noOfGroups = 0;
2081 for (const MinMaxVectorPairTy &Pair : MinMaxAliasGroups) {
2082 if (Pair.second.size() == 0)
2083 noOfGroups += 1;
2084 else
2085 noOfGroups += Pair.second.size();
2086 }
2087
2088 OS.indent(NumSpaces: 4) << "Alias Groups (" << noOfGroups << "):\n";
2089 if (MinMaxAliasGroups.empty()) {
2090 OS.indent(NumSpaces: 8) << "n/a\n";
2091 return;
2092 }
2093
2094 for (const MinMaxVectorPairTy &Pair : MinMaxAliasGroups) {
2095
2096 // If the group has no read only accesses print the write accesses.
2097 if (Pair.second.empty()) {
2098 OS.indent(NumSpaces: 8) << "[[";
2099 for (const MinMaxAccessTy &MMANonReadOnly : Pair.first) {
2100 OS << " <" << MMANonReadOnly.first << ", " << MMANonReadOnly.second
2101 << ">";
2102 }
2103 OS << " ]]\n";
2104 }
2105
2106 for (const MinMaxAccessTy &MMAReadOnly : Pair.second) {
2107 OS.indent(NumSpaces: 8) << "[[";
2108 OS << " <" << MMAReadOnly.first << ", " << MMAReadOnly.second << ">";
2109 for (const MinMaxAccessTy &MMANonReadOnly : Pair.first) {
2110 OS << " <" << MMANonReadOnly.first << ", " << MMANonReadOnly.second
2111 << ">";
2112 }
2113 OS << " ]]\n";
2114 }
2115 }
2116}
2117
2118void Scop::printStatements(raw_ostream &OS, bool PrintInstructions) const {
2119 OS << "Statements {\n";
2120
2121 for (const ScopStmt &Stmt : *this) {
2122 OS.indent(NumSpaces: 4);
2123 Stmt.print(OS, PrintInstructions);
2124 }
2125
2126 OS.indent(NumSpaces: 4) << "}\n";
2127}
2128
2129void Scop::printArrayInfo(raw_ostream &OS) const {
2130 OS << "Arrays {\n";
2131
2132 for (auto &Array : arrays())
2133 Array->print(OS);
2134
2135 OS.indent(NumSpaces: 4) << "}\n";
2136
2137 OS.indent(NumSpaces: 4) << "Arrays (Bounds as pw_affs) {\n";
2138
2139 for (auto &Array : arrays())
2140 Array->print(OS, /* SizeAsPwAff */ true);
2141
2142 OS.indent(NumSpaces: 4) << "}\n";
2143}
2144
2145void Scop::print(raw_ostream &OS, bool PrintInstructions) const {
2146 OS.indent(NumSpaces: 4) << "Function: " << getFunction().getName() << "\n";
2147 OS.indent(NumSpaces: 4) << "Region: " << getNameStr() << "\n";
2148 OS.indent(NumSpaces: 4) << "Max Loop Depth: " << getMaxLoopDepth() << "\n";
2149 OS.indent(NumSpaces: 4) << "Invariant Accesses: {\n";
2150 for (const auto &IAClass : InvariantEquivClasses) {
2151 const auto &MAs = IAClass.InvariantAccesses;
2152 if (MAs.empty()) {
2153 OS.indent(NumSpaces: 12) << "Class Pointer: " << *IAClass.IdentifyingPointer << "\n";
2154 } else {
2155 MAs.front()->print(OS);
2156 OS.indent(NumSpaces: 12) << "Execution Context: " << IAClass.ExecutionContext
2157 << "\n";
2158 }
2159 }
2160 OS.indent(NumSpaces: 4) << "}\n";
2161 printContext(OS&: OS.indent(NumSpaces: 4));
2162 printArrayInfo(OS&: OS.indent(NumSpaces: 4));
2163 printAliasAssumptions(OS);
2164 printStatements(OS&: OS.indent(NumSpaces: 4), PrintInstructions);
2165}
2166
2167#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2168LLVM_DUMP_METHOD void Scop::dump() const { print(OS&: dbgs(), PrintInstructions: true); }
2169#endif
2170
2171isl::ctx Scop::getIslCtx() const { return IslCtx.get(); }
2172
2173__isl_give PWACtx Scop::getPwAff(const SCEV *E, BasicBlock *BB,
2174 bool NonNegative,
2175 RecordedAssumptionsTy *RecordedAssumptions) {
2176 // First try to use the SCEVAffinator to generate a piecewise defined
2177 // affine function from @p E in the context of @p BB. If that tasks becomes to
2178 // complex the affinator might return a nullptr. In such a case we invalidate
2179 // the SCoP and return a dummy value. This way we do not need to add error
2180 // handling code to all users of this function.
2181 auto PWAC = Affinator.getPwAff(E, BB, RecordedAssumptions);
2182 if (!PWAC.first.is_null()) {
2183 // TODO: We could use a heuristic and either use:
2184 // SCEVAffinator::takeNonNegativeAssumption
2185 // or
2186 // SCEVAffinator::interpretAsUnsigned
2187 // to deal with unsigned or "NonNegative" SCEVs.
2188 if (NonNegative)
2189 Affinator.takeNonNegativeAssumption(PWAC, RecordedAssumptions);
2190 return PWAC;
2191 }
2192
2193 auto DL = BB ? BB->getTerminator()->getDebugLoc() : DebugLoc();
2194 invalidate(Kind: COMPLEXITY, Loc: DL, BB);
2195 return Affinator.getPwAff(E: SE->getZero(Ty: E->getType()), BB, RecordedAssumptions);
2196}
2197
2198isl::union_set Scop::getDomains() const {
2199 isl_space *EmptySpace = isl_space_params_alloc(ctx: getIslCtx().get(), nparam: 0);
2200 isl_union_set *Domain = isl_union_set_empty(space: EmptySpace);
2201
2202 for (const ScopStmt &Stmt : *this)
2203 Domain = isl_union_set_add_set(uset: Domain, set: Stmt.getDomain().release());
2204
2205 return isl::manage(ptr: Domain);
2206}
2207
2208isl::pw_aff Scop::getPwAffOnly(const SCEV *E, BasicBlock *BB,
2209 RecordedAssumptionsTy *RecordedAssumptions) {
2210 PWACtx PWAC = getPwAff(E, BB, NonNegative: RecordedAssumptions);
2211 return PWAC.first;
2212}
2213
2214isl::union_map
2215Scop::getAccessesOfType(std::function<bool(MemoryAccess &)> Predicate) {
2216 isl::union_map Accesses = isl::union_map::empty(ctx: getIslCtx());
2217
2218 for (ScopStmt &Stmt : *this) {
2219 for (MemoryAccess *MA : Stmt) {
2220 if (!Predicate(*MA))
2221 continue;
2222
2223 isl::set Domain = Stmt.getDomain();
2224 isl::map AccessDomain = MA->getAccessRelation();
2225 AccessDomain = AccessDomain.intersect_domain(set: Domain);
2226 Accesses = Accesses.unite(umap2: AccessDomain);
2227 }
2228 }
2229
2230 return Accesses.coalesce();
2231}
2232
2233isl::union_map Scop::getMustWrites() {
2234 return getAccessesOfType(Predicate: [](MemoryAccess &MA) { return MA.isMustWrite(); });
2235}
2236
2237isl::union_map Scop::getMayWrites() {
2238 return getAccessesOfType(Predicate: [](MemoryAccess &MA) { return MA.isMayWrite(); });
2239}
2240
2241isl::union_map Scop::getWrites() {
2242 return getAccessesOfType(Predicate: [](MemoryAccess &MA) { return MA.isWrite(); });
2243}
2244
2245isl::union_map Scop::getReads() {
2246 return getAccessesOfType(Predicate: [](MemoryAccess &MA) { return MA.isRead(); });
2247}
2248
2249isl::union_map Scop::getAccesses() {
2250 return getAccessesOfType(Predicate: [](MemoryAccess &MA) { return true; });
2251}
2252
2253isl::union_map Scop::getAccesses(ScopArrayInfo *Array) {
2254 return getAccessesOfType(
2255 Predicate: [Array](MemoryAccess &MA) { return MA.getScopArrayInfo() == Array; });
2256}
2257
2258isl::union_map Scop::getSchedule() const {
2259 auto Tree = getScheduleTree();
2260 return Tree.get_map();
2261}
2262
2263isl::schedule Scop::getScheduleTree() const {
2264 return Schedule.intersect_domain(domain: getDomains());
2265}
2266
2267void Scop::setSchedule(isl::union_map NewSchedule) {
2268 auto S = isl::schedule::from_domain(domain: getDomains());
2269 Schedule = S.insert_partial_schedule(
2270 partial: isl::multi_union_pw_aff::from_union_map(umap: NewSchedule));
2271 ScheduleModified = true;
2272}
2273
2274void Scop::setScheduleTree(isl::schedule NewSchedule) {
2275 Schedule = NewSchedule;
2276 ScheduleModified = true;
2277}
2278
2279bool Scop::restrictDomains(isl::union_set Domain) {
2280 bool Changed = false;
2281 for (ScopStmt &Stmt : *this) {
2282 isl::union_set StmtDomain = isl::union_set(Stmt.getDomain());
2283 isl::union_set NewStmtDomain = StmtDomain.intersect(uset2: Domain);
2284
2285 if (StmtDomain.is_subset(uset2: NewStmtDomain))
2286 continue;
2287
2288 Changed = true;
2289
2290 NewStmtDomain = NewStmtDomain.coalesce();
2291
2292 if (NewStmtDomain.is_empty())
2293 Stmt.restrictDomain(NewDomain: isl::set::empty(space: Stmt.getDomainSpace()));
2294 else
2295 Stmt.restrictDomain(NewDomain: isl::set(NewStmtDomain));
2296 }
2297 return Changed;
2298}
2299
2300ScalarEvolution *Scop::getSE() const { return SE; }
2301
2302void Scop::addScopStmt(BasicBlock *BB, StringRef Name, Loop *SurroundingLoop,
2303 std::vector<Instruction *> Instructions) {
2304 assert(BB && "Unexpected nullptr!");
2305 Stmts.emplace_back(args&: *this, args&: *BB, args&: Name, args&: SurroundingLoop, args&: Instructions);
2306 auto *Stmt = &Stmts.back();
2307 StmtMap[BB].push_back(x: Stmt);
2308 for (Instruction *Inst : Instructions) {
2309 assert(!InstStmtMap.count(Inst) &&
2310 "Unexpected statement corresponding to the instruction.");
2311 InstStmtMap[Inst] = Stmt;
2312 }
2313}
2314
2315void Scop::addScopStmt(Region *R, StringRef Name, Loop *SurroundingLoop,
2316 std::vector<Instruction *> Instructions) {
2317 assert(R && "Unexpected nullptr!");
2318 Stmts.emplace_back(args&: *this, args&: *R, args&: Name, args&: SurroundingLoop, args&: Instructions);
2319 auto *Stmt = &Stmts.back();
2320
2321 for (Instruction *Inst : Instructions) {
2322 assert(!InstStmtMap.count(Inst) &&
2323 "Unexpected statement corresponding to the instruction.");
2324 InstStmtMap[Inst] = Stmt;
2325 }
2326
2327 for (BasicBlock *BB : R->blocks()) {
2328 StmtMap[BB].push_back(x: Stmt);
2329 if (BB == R->getEntry())
2330 continue;
2331 for (Instruction &Inst : *BB) {
2332 assert(!InstStmtMap.count(&Inst) &&
2333 "Unexpected statement corresponding to the instruction.");
2334 InstStmtMap[&Inst] = Stmt;
2335 }
2336 }
2337}
2338
2339ScopStmt *Scop::addScopStmt(isl::map SourceRel, isl::map TargetRel,
2340 isl::set Domain) {
2341#ifndef NDEBUG
2342 isl::set SourceDomain = SourceRel.domain();
2343 isl::set TargetDomain = TargetRel.domain();
2344 assert(Domain.is_subset(TargetDomain) &&
2345 "Target access not defined for complete statement domain");
2346 assert(Domain.is_subset(SourceDomain) &&
2347 "Source access not defined for complete statement domain");
2348#endif
2349 Stmts.emplace_back(args&: *this, args&: SourceRel, args&: TargetRel, args&: Domain);
2350 CopyStmtsNum++;
2351 return &(Stmts.back());
2352}
2353
2354ArrayRef<ScopStmt *> Scop::getStmtListFor(BasicBlock *BB) const {
2355 auto StmtMapIt = StmtMap.find(Val: BB);
2356 if (StmtMapIt == StmtMap.end())
2357 return {};
2358 return StmtMapIt->second;
2359}
2360
2361ScopStmt *Scop::getIncomingStmtFor(const Use &U) const {
2362 auto *PHI = cast<PHINode>(Val: U.getUser());
2363 BasicBlock *IncomingBB = PHI->getIncomingBlock(U);
2364
2365 // If the value is a non-synthesizable from the incoming block, use the
2366 // statement that contains it as user statement.
2367 if (auto *IncomingInst = dyn_cast<Instruction>(Val: U.get())) {
2368 if (IncomingInst->getParent() == IncomingBB) {
2369 if (ScopStmt *IncomingStmt = getStmtFor(Inst: IncomingInst))
2370 return IncomingStmt;
2371 }
2372 }
2373
2374 // Otherwise, use the epilogue/last statement.
2375 return getLastStmtFor(BB: IncomingBB);
2376}
2377
2378ScopStmt *Scop::getLastStmtFor(BasicBlock *BB) const {
2379 ArrayRef<ScopStmt *> StmtList = getStmtListFor(BB);
2380 if (!StmtList.empty())
2381 return StmtList.back();
2382 return nullptr;
2383}
2384
2385ArrayRef<ScopStmt *> Scop::getStmtListFor(RegionNode *RN) const {
2386 if (RN->isSubRegion())
2387 return getStmtListFor(R: RN->getNodeAs<Region>());
2388 return getStmtListFor(BB: RN->getNodeAs<BasicBlock>());
2389}
2390
2391ArrayRef<ScopStmt *> Scop::getStmtListFor(Region *R) const {
2392 return getStmtListFor(BB: R->getEntry());
2393}
2394
2395int Scop::getRelativeLoopDepth(const Loop *L) const {
2396 if (!L || !R.contains(L))
2397 return -1;
2398 // outermostLoopInRegion always returns nullptr for top level regions
2399 if (R.isTopLevelRegion()) {
2400 // LoopInfo's depths start at 1, we start at 0
2401 return L->getLoopDepth() - 1;
2402 } else {
2403 Loop *OuterLoop = R.outermostLoopInRegion(L: const_cast<Loop *>(L));
2404 assert(OuterLoop);
2405 return L->getLoopDepth() - OuterLoop->getLoopDepth();
2406 }
2407}
2408
2409ScopArrayInfo *Scop::getArrayInfoByName(const std::string BaseName) {
2410 for (auto &SAI : arrays()) {
2411 if (SAI->getName() == BaseName)
2412 return SAI;
2413 }
2414 return nullptr;
2415}
2416
2417void Scop::addAccessData(MemoryAccess *Access) {
2418 const ScopArrayInfo *SAI = Access->getOriginalScopArrayInfo();
2419 assert(SAI && "can only use after access relations have been constructed");
2420
2421 if (Access->isOriginalValueKind() && Access->isRead())
2422 ValueUseAccs[SAI].push_back(Elt: Access);
2423 else if (Access->isOriginalAnyPHIKind() && Access->isWrite())
2424 PHIIncomingAccs[SAI].push_back(Elt: Access);
2425}
2426
2427void Scop::removeAccessData(MemoryAccess *Access) {
2428 if (Access->isOriginalValueKind() && Access->isWrite()) {
2429 ValueDefAccs.erase(Val: Access->getAccessValue());
2430 } else if (Access->isOriginalValueKind() && Access->isRead()) {
2431 auto &Uses = ValueUseAccs[Access->getScopArrayInfo()];
2432 llvm::erase(C&: Uses, V: Access);
2433 } else if (Access->isOriginalPHIKind() && Access->isRead()) {
2434 PHINode *PHI = cast<PHINode>(Val: Access->getAccessInstruction());
2435 PHIReadAccs.erase(Val: PHI);
2436 } else if (Access->isOriginalAnyPHIKind() && Access->isWrite()) {
2437 auto &Incomings = PHIIncomingAccs[Access->getScopArrayInfo()];
2438 llvm::erase(C&: Incomings, V: Access);
2439 }
2440}
2441
2442MemoryAccess *Scop::getValueDef(const ScopArrayInfo *SAI) const {
2443 assert(SAI->isValueKind());
2444
2445 Instruction *Val = dyn_cast<Instruction>(Val: SAI->getBasePtr());
2446 if (!Val)
2447 return nullptr;
2448
2449 return ValueDefAccs.lookup(Val);
2450}
2451
2452ArrayRef<MemoryAccess *> Scop::getValueUses(const ScopArrayInfo *SAI) const {
2453 assert(SAI->isValueKind());
2454 auto It = ValueUseAccs.find(Val: SAI);
2455 if (It == ValueUseAccs.end())
2456 return {};
2457 return It->second;
2458}
2459
2460MemoryAccess *Scop::getPHIRead(const ScopArrayInfo *SAI) const {
2461 assert(SAI->isPHIKind() || SAI->isExitPHIKind());
2462
2463 if (SAI->isExitPHIKind())
2464 return nullptr;
2465
2466 PHINode *PHI = cast<PHINode>(Val: SAI->getBasePtr());
2467 return PHIReadAccs.lookup(Val: PHI);
2468}
2469
2470ArrayRef<MemoryAccess *> Scop::getPHIIncomings(const ScopArrayInfo *SAI) const {
2471 assert(SAI->isPHIKind() || SAI->isExitPHIKind());
2472 auto It = PHIIncomingAccs.find(Val: SAI);
2473 if (It == PHIIncomingAccs.end())
2474 return {};
2475 return It->second;
2476}
2477
2478bool Scop::isEscaping(Instruction *Inst) {
2479 assert(contains(Inst) && "The concept of escaping makes only sense for "
2480 "values defined inside the SCoP");
2481
2482 for (Use &Use : Inst->uses()) {
2483 BasicBlock *UserBB = getUseBlock(U: Use);
2484 if (!contains(BB: UserBB))
2485 return true;
2486
2487 // When the SCoP region exit needs to be simplified, PHIs in the region exit
2488 // move to a new basic block such that its incoming blocks are not in the
2489 // SCoP anymore.
2490 if (hasSingleExitEdge() && isa<PHINode>(Val: Use.getUser()) &&
2491 isExit(BB: cast<PHINode>(Val: Use.getUser())->getParent()))
2492 return true;
2493 }
2494 return false;
2495}
2496
2497void Scop::incrementNumberOfAliasingAssumptions(unsigned step) {
2498 AssumptionsAliasing += step;
2499}
2500
2501Scop::ScopStatistics Scop::getStatistics() const {
2502 ScopStatistics Result;
2503#if !defined(NDEBUG) || defined(LLVM_ENABLE_STATS)
2504 auto LoopStat = ScopDetection::countBeneficialLoops(R: &R, SE&: *SE, LI&: *getLI(), MinProfitableTrips: 0);
2505
2506 int NumTotalLoops = LoopStat.NumLoops;
2507 Result.NumBoxedLoops = getBoxedLoops().size();
2508 Result.NumAffineLoops = NumTotalLoops - Result.NumBoxedLoops;
2509
2510 for (const ScopStmt &Stmt : *this) {
2511 isl::set Domain = Stmt.getDomain().intersect_params(params: getContext());
2512 bool IsInLoop = Stmt.getNumIterators() >= 1;
2513 for (MemoryAccess *MA : Stmt) {
2514 if (!MA->isWrite())
2515 continue;
2516
2517 if (MA->isLatestValueKind()) {
2518 Result.NumValueWrites += 1;
2519 if (IsInLoop)
2520 Result.NumValueWritesInLoops += 1;
2521 }
2522
2523 if (MA->isLatestAnyPHIKind()) {
2524 Result.NumPHIWrites += 1;
2525 if (IsInLoop)
2526 Result.NumPHIWritesInLoops += 1;
2527 }
2528
2529 isl::set AccSet =
2530 MA->getAccessRelation().intersect_domain(set: Domain).range();
2531 if (AccSet.is_singleton()) {
2532 Result.NumSingletonWrites += 1;
2533 if (IsInLoop)
2534 Result.NumSingletonWritesInLoops += 1;
2535 }
2536 }
2537 }
2538#endif
2539 return Result;
2540}
2541
2542raw_ostream &polly::operator<<(raw_ostream &OS, const Scop &scop) {
2543 scop.print(OS, PrintInstructions: PollyPrintInstructions);
2544 return OS;
2545}
2546
2547//===----------------------------------------------------------------------===//
2548void ScopInfoRegionPass::getAnalysisUsage(AnalysisUsage &AU) const {
2549 AU.addRequired<LoopInfoWrapperPass>();
2550 AU.addRequired<RegionInfoPass>();
2551 AU.addRequired<DominatorTreeWrapperPass>();
2552 AU.addRequiredTransitive<ScalarEvolutionWrapperPass>();
2553 AU.addRequiredTransitive<ScopDetectionWrapperPass>();
2554 AU.addRequired<AAResultsWrapperPass>();
2555 AU.addRequired<AssumptionCacheTracker>();
2556 AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2557 AU.setPreservesAll();
2558}
2559
2560void updateLoopCountStatistic(ScopDetection::LoopStats Stats,
2561 Scop::ScopStatistics ScopStats) {
2562 assert(Stats.NumLoops == ScopStats.NumAffineLoops + ScopStats.NumBoxedLoops);
2563
2564 NumScops++;
2565 NumLoopsInScop += Stats.NumLoops;
2566 MaxNumLoopsInScop =
2567 std::max(a: MaxNumLoopsInScop.getValue(), b: (uint64_t)Stats.NumLoops);
2568
2569 if (Stats.MaxDepth == 0)
2570 NumScopsDepthZero++;
2571 else if (Stats.MaxDepth == 1)
2572 NumScopsDepthOne++;
2573 else if (Stats.MaxDepth == 2)
2574 NumScopsDepthTwo++;
2575 else if (Stats.MaxDepth == 3)
2576 NumScopsDepthThree++;
2577 else if (Stats.MaxDepth == 4)
2578 NumScopsDepthFour++;
2579 else if (Stats.MaxDepth == 5)
2580 NumScopsDepthFive++;
2581 else
2582 NumScopsDepthLarger++;
2583
2584 NumAffineLoops += ScopStats.NumAffineLoops;
2585 NumBoxedLoops += ScopStats.NumBoxedLoops;
2586
2587 NumValueWrites += ScopStats.NumValueWrites;
2588 NumValueWritesInLoops += ScopStats.NumValueWritesInLoops;
2589 NumPHIWrites += ScopStats.NumPHIWrites;
2590 NumPHIWritesInLoops += ScopStats.NumPHIWritesInLoops;
2591 NumSingletonWrites += ScopStats.NumSingletonWrites;
2592 NumSingletonWritesInLoops += ScopStats.NumSingletonWritesInLoops;
2593}
2594
2595bool ScopInfoRegionPass::runOnRegion(Region *R, RGPassManager &RGM) {
2596 auto &SD = getAnalysis<ScopDetectionWrapperPass>().getSD();
2597
2598 if (!SD.isMaxRegionInScop(R: *R))
2599 return false;
2600
2601 Function *F = R->getEntry()->getParent();
2602 auto &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2603 auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2604 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2605 auto const &DL = F->getParent()->getDataLayout();
2606 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2607 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F&: *F);
2608 auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
2609
2610 ScopBuilder SB(R, AC, AA, DL, DT, LI, SD, SE, ORE);
2611 S = SB.getScop(); // take ownership of scop object
2612
2613#if !defined(NDEBUG) || defined(LLVM_ENABLE_STATS)
2614 if (S) {
2615 ScopDetection::LoopStats Stats =
2616 ScopDetection::countBeneficialLoops(R: &S->getRegion(), SE, LI, MinProfitableTrips: 0);
2617 updateLoopCountStatistic(Stats, ScopStats: S->getStatistics());
2618 }
2619#endif
2620
2621 return false;
2622}
2623
2624void ScopInfoRegionPass::print(raw_ostream &OS, const Module *) const {
2625 if (S)
2626 S->print(OS, PrintInstructions: PollyPrintInstructions);
2627 else
2628 OS << "Invalid Scop!\n";
2629}
2630
2631char ScopInfoRegionPass::ID = 0;
2632
2633Pass *polly::createScopInfoRegionPassPass() { return new ScopInfoRegionPass(); }
2634
2635INITIALIZE_PASS_BEGIN(ScopInfoRegionPass, "polly-scops",
2636 "Polly - Create polyhedral description of Scops", false,
2637 false);
2638INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass);
2639INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker);
2640INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass);
2641INITIALIZE_PASS_DEPENDENCY(RegionInfoPass);
2642INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass);
2643INITIALIZE_PASS_DEPENDENCY(ScopDetectionWrapperPass);
2644INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass);
2645INITIALIZE_PASS_END(ScopInfoRegionPass, "polly-scops",
2646 "Polly - Create polyhedral description of Scops", false,
2647 false)
2648
2649//===----------------------------------------------------------------------===//
2650
2651namespace {
2652
2653/// Print result from ScopInfoRegionPass.
2654class ScopInfoPrinterLegacyRegionPass final : public RegionPass {
2655public:
2656 static char ID;
2657
2658 ScopInfoPrinterLegacyRegionPass() : ScopInfoPrinterLegacyRegionPass(outs()) {}
2659
2660 explicit ScopInfoPrinterLegacyRegionPass(llvm::raw_ostream &OS)
2661 : RegionPass(ID), OS(OS) {}
2662
2663 bool runOnRegion(Region *R, RGPassManager &RGM) override {
2664 ScopInfoRegionPass &P = getAnalysis<ScopInfoRegionPass>();
2665
2666 OS << "Printing analysis '" << P.getPassName() << "' for region: '"
2667 << R->getNameStr() << "' in function '"
2668 << R->getEntry()->getParent()->getName() << "':\n";
2669 P.print(OS);
2670
2671 return false;
2672 }
2673
2674 void getAnalysisUsage(AnalysisUsage &AU) const override {
2675 RegionPass::getAnalysisUsage(AU);
2676 AU.addRequired<ScopInfoRegionPass>();
2677 AU.setPreservesAll();
2678 }
2679
2680private:
2681 llvm::raw_ostream &OS;
2682};
2683
2684char ScopInfoPrinterLegacyRegionPass::ID = 0;
2685} // namespace
2686
2687Pass *polly::createScopInfoPrinterLegacyRegionPass(raw_ostream &OS) {
2688 return new ScopInfoPrinterLegacyRegionPass(OS);
2689}
2690
2691INITIALIZE_PASS_BEGIN(ScopInfoPrinterLegacyRegionPass, "polly-print-scops",
2692 "Polly - Print polyhedral description of Scops", false,
2693 false);
2694INITIALIZE_PASS_DEPENDENCY(ScopInfoRegionPass);
2695INITIALIZE_PASS_END(ScopInfoPrinterLegacyRegionPass, "polly-print-scops",
2696 "Polly - Print polyhedral description of Scops", false,
2697 false)
2698
2699//===----------------------------------------------------------------------===//
2700
2701ScopInfo::ScopInfo(const DataLayout &DL, ScopDetection &SD, ScalarEvolution &SE,
2702 LoopInfo &LI, AliasAnalysis &AA, DominatorTree &DT,
2703 AssumptionCache &AC, OptimizationRemarkEmitter &ORE)
2704 : DL(DL), SD(SD), SE(SE), LI(LI), AA(AA), DT(DT), AC(AC), ORE(ORE) {
2705 recompute();
2706}
2707
2708void ScopInfo::recompute() {
2709 RegionToScopMap.clear();
2710 /// Create polyhedral description of scops for all the valid regions of a
2711 /// function.
2712 for (auto &It : SD) {
2713 Region *R = const_cast<Region *>(It);
2714 if (!SD.isMaxRegionInScop(R: *R))
2715 continue;
2716
2717 ScopBuilder SB(R, AC, AA, DL, DT, LI, SD, SE, ORE);
2718 std::unique_ptr<Scop> S = SB.getScop();
2719 if (!S)
2720 continue;
2721#if !defined(NDEBUG) || defined(LLVM_ENABLE_STATS)
2722 ScopDetection::LoopStats Stats =
2723 ScopDetection::countBeneficialLoops(R: &S->getRegion(), SE, LI, MinProfitableTrips: 0);
2724 updateLoopCountStatistic(Stats, ScopStats: S->getStatistics());
2725#endif
2726 bool Inserted = RegionToScopMap.insert(KV: {R, std::move(S)}).second;
2727 assert(Inserted && "Building Scop for the same region twice!");
2728 (void)Inserted;
2729 }
2730}
2731
2732bool ScopInfo::invalidate(Function &F, const PreservedAnalyses &PA,
2733 FunctionAnalysisManager::Invalidator &Inv) {
2734 // Check whether the analysis, all analyses on functions have been preserved
2735 // or anything we're holding references to is being invalidated
2736 auto PAC = PA.getChecker<ScopInfoAnalysis>();
2737 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) ||
2738 Inv.invalidate<ScopAnalysis>(IR&: F, PA) ||
2739 Inv.invalidate<ScalarEvolutionAnalysis>(IR&: F, PA) ||
2740 Inv.invalidate<LoopAnalysis>(IR&: F, PA) ||
2741 Inv.invalidate<AAManager>(IR&: F, PA) ||
2742 Inv.invalidate<DominatorTreeAnalysis>(IR&: F, PA) ||
2743 Inv.invalidate<AssumptionAnalysis>(IR&: F, PA);
2744}
2745
2746AnalysisKey ScopInfoAnalysis::Key;
2747
2748ScopInfoAnalysis::Result ScopInfoAnalysis::run(Function &F,
2749 FunctionAnalysisManager &FAM) {
2750 auto &SD = FAM.getResult<ScopAnalysis>(IR&: F);
2751 auto &SE = FAM.getResult<ScalarEvolutionAnalysis>(IR&: F);
2752 auto &LI = FAM.getResult<LoopAnalysis>(IR&: F);
2753 auto &AA = FAM.getResult<AAManager>(IR&: F);
2754 auto &DT = FAM.getResult<DominatorTreeAnalysis>(IR&: F);
2755 auto &AC = FAM.getResult<AssumptionAnalysis>(IR&: F);
2756 auto &DL = F.getParent()->getDataLayout();
2757 auto &ORE = FAM.getResult<OptimizationRemarkEmitterAnalysis>(IR&: F);
2758 return {DL, SD, SE, LI, AA, DT, AC, ORE};
2759}
2760
2761PreservedAnalyses ScopInfoPrinterPass::run(Function &F,
2762 FunctionAnalysisManager &FAM) {
2763 auto &SI = FAM.getResult<ScopInfoAnalysis>(IR&: F);
2764 // Since the legacy PM processes Scops in bottom up, we print them in reverse
2765 // order here to keep the output persistent
2766 for (auto &It : reverse(C&: SI)) {
2767 if (It.second)
2768 It.second->print(OS&: Stream, PrintInstructions: PollyPrintInstructions);
2769 else
2770 Stream << "Invalid Scop!\n";
2771 }
2772 return PreservedAnalyses::all();
2773}
2774
2775void ScopInfoWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
2776 AU.addRequired<LoopInfoWrapperPass>();
2777 AU.addRequired<RegionInfoPass>();
2778 AU.addRequired<DominatorTreeWrapperPass>();
2779 AU.addRequiredTransitive<ScalarEvolutionWrapperPass>();
2780 AU.addRequiredTransitive<ScopDetectionWrapperPass>();
2781 AU.addRequired<AAResultsWrapperPass>();
2782 AU.addRequired<AssumptionCacheTracker>();
2783 AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2784 AU.setPreservesAll();
2785}
2786
2787bool ScopInfoWrapperPass::runOnFunction(Function &F) {
2788 auto &SD = getAnalysis<ScopDetectionWrapperPass>().getSD();
2789 auto &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2790 auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2791 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2792 auto const &DL = F.getParent()->getDataLayout();
2793 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2794 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
2795 auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
2796
2797 Result.reset(p: new ScopInfo{DL, SD, SE, LI, AA, DT, AC, ORE});
2798 return false;
2799}
2800
2801void ScopInfoWrapperPass::print(raw_ostream &OS, const Module *) const {
2802 for (auto &It : *Result) {
2803 if (It.second)
2804 It.second->print(OS, PrintInstructions: PollyPrintInstructions);
2805 else
2806 OS << "Invalid Scop!\n";
2807 }
2808}
2809
2810char ScopInfoWrapperPass::ID = 0;
2811
2812Pass *polly::createScopInfoWrapperPassPass() {
2813 return new ScopInfoWrapperPass();
2814}
2815
2816INITIALIZE_PASS_BEGIN(
2817 ScopInfoWrapperPass, "polly-function-scops",
2818 "Polly - Create polyhedral description of all Scops of a function", false,
2819 false);
2820INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass);
2821INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker);
2822INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass);
2823INITIALIZE_PASS_DEPENDENCY(RegionInfoPass);
2824INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass);
2825INITIALIZE_PASS_DEPENDENCY(ScopDetectionWrapperPass);
2826INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass);
2827INITIALIZE_PASS_END(
2828 ScopInfoWrapperPass, "polly-function-scops",
2829 "Polly - Create polyhedral description of all Scops of a function", false,
2830 false)
2831
2832//===----------------------------------------------------------------------===//
2833
2834namespace {
2835/// Print result from ScopInfoWrapperPass.
2836class ScopInfoPrinterLegacyFunctionPass final : public FunctionPass {
2837public:
2838 static char ID;
2839
2840 ScopInfoPrinterLegacyFunctionPass()
2841 : ScopInfoPrinterLegacyFunctionPass(outs()) {}
2842 explicit ScopInfoPrinterLegacyFunctionPass(llvm::raw_ostream &OS)
2843 : FunctionPass(ID), OS(OS) {}
2844
2845 bool runOnFunction(Function &F) override {
2846 ScopInfoWrapperPass &P = getAnalysis<ScopInfoWrapperPass>();
2847
2848 OS << "Printing analysis '" << P.getPassName() << "' for function '"
2849 << F.getName() << "':\n";
2850 P.print(OS);
2851
2852 return false;
2853 }
2854
2855 void getAnalysisUsage(AnalysisUsage &AU) const override {
2856 FunctionPass::getAnalysisUsage(AU);
2857 AU.addRequired<ScopInfoWrapperPass>();
2858 AU.setPreservesAll();
2859 }
2860
2861private:
2862 llvm::raw_ostream &OS;
2863};
2864
2865char ScopInfoPrinterLegacyFunctionPass::ID = 0;
2866} // namespace
2867
2868Pass *polly::createScopInfoPrinterLegacyFunctionPass(raw_ostream &OS) {
2869 return new ScopInfoPrinterLegacyFunctionPass(OS);
2870}
2871
2872INITIALIZE_PASS_BEGIN(
2873 ScopInfoPrinterLegacyFunctionPass, "polly-print-function-scops",
2874 "Polly - Print polyhedral description of all Scops of a function", false,
2875 false);
2876INITIALIZE_PASS_DEPENDENCY(ScopInfoWrapperPass);
2877INITIALIZE_PASS_END(
2878 ScopInfoPrinterLegacyFunctionPass, "polly-print-function-scops",
2879 "Polly - Print polyhedral description of all Scops of a function", false,
2880 false)
2881

Provided by KDAB

Privacy Policy
Improve your Profiling and Debugging skills
Find out more

source code of polly/lib/Analysis/ScopInfo.cpp