1//===- CodeGenPrepare.cpp - Prepare a function for code generation --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass munges the code in the input function to better prepare it for
10// SelectionDAG-based code generation. This works around limitations in it's
11// basic-block-at-a-time approach. It should eventually be removed.
12//
13//===----------------------------------------------------------------------===//
14
15#include "llvm/ADT/APInt.h"
16#include "llvm/ADT/ArrayRef.h"
17#include "llvm/ADT/DenseMap.h"
18#include "llvm/ADT/MapVector.h"
19#include "llvm/ADT/PointerIntPair.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallPtrSet.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/ADT/Statistic.h"
24#include "llvm/Analysis/BlockFrequencyInfo.h"
25#include "llvm/Analysis/BranchProbabilityInfo.h"
26#include "llvm/Analysis/InstructionSimplify.h"
27#include "llvm/Analysis/LoopInfo.h"
28#include "llvm/Analysis/ProfileSummaryInfo.h"
29#include "llvm/Analysis/TargetLibraryInfo.h"
30#include "llvm/Analysis/TargetTransformInfo.h"
31#include "llvm/Analysis/ValueTracking.h"
32#include "llvm/Analysis/VectorUtils.h"
33#include "llvm/CodeGen/Analysis.h"
34#include "llvm/CodeGen/BasicBlockSectionsProfileReader.h"
35#include "llvm/CodeGen/ISDOpcodes.h"
36#include "llvm/CodeGen/SelectionDAGNodes.h"
37#include "llvm/CodeGen/TargetLowering.h"
38#include "llvm/CodeGen/TargetPassConfig.h"
39#include "llvm/CodeGen/TargetSubtargetInfo.h"
40#include "llvm/CodeGen/ValueTypes.h"
41#include "llvm/Config/llvm-config.h"
42#include "llvm/IR/Argument.h"
43#include "llvm/IR/Attributes.h"
44#include "llvm/IR/BasicBlock.h"
45#include "llvm/IR/Constant.h"
46#include "llvm/IR/Constants.h"
47#include "llvm/IR/DataLayout.h"
48#include "llvm/IR/DebugInfo.h"
49#include "llvm/IR/DerivedTypes.h"
50#include "llvm/IR/Dominators.h"
51#include "llvm/IR/Function.h"
52#include "llvm/IR/GetElementPtrTypeIterator.h"
53#include "llvm/IR/GlobalValue.h"
54#include "llvm/IR/GlobalVariable.h"
55#include "llvm/IR/IRBuilder.h"
56#include "llvm/IR/InlineAsm.h"
57#include "llvm/IR/InstrTypes.h"
58#include "llvm/IR/Instruction.h"
59#include "llvm/IR/Instructions.h"
60#include "llvm/IR/IntrinsicInst.h"
61#include "llvm/IR/Intrinsics.h"
62#include "llvm/IR/IntrinsicsAArch64.h"
63#include "llvm/IR/LLVMContext.h"
64#include "llvm/IR/MDBuilder.h"
65#include "llvm/IR/Module.h"
66#include "llvm/IR/Operator.h"
67#include "llvm/IR/PatternMatch.h"
68#include "llvm/IR/ProfDataUtils.h"
69#include "llvm/IR/Statepoint.h"
70#include "llvm/IR/Type.h"
71#include "llvm/IR/Use.h"
72#include "llvm/IR/User.h"
73#include "llvm/IR/Value.h"
74#include "llvm/IR/ValueHandle.h"
75#include "llvm/IR/ValueMap.h"
76#include "llvm/InitializePasses.h"
77#include "llvm/Pass.h"
78#include "llvm/Support/BlockFrequency.h"
79#include "llvm/Support/BranchProbability.h"
80#include "llvm/Support/Casting.h"
81#include "llvm/Support/CommandLine.h"
82#include "llvm/Support/Compiler.h"
83#include "llvm/Support/Debug.h"
84#include "llvm/Support/ErrorHandling.h"
85#include "llvm/Support/MachineValueType.h"
86#include "llvm/Support/MathExtras.h"
87#include "llvm/Support/raw_ostream.h"
88#include "llvm/Target/TargetMachine.h"
89#include "llvm/Target/TargetOptions.h"
90#include "llvm/Transforms/Utils/BasicBlockUtils.h"
91#include "llvm/Transforms/Utils/BypassSlowDivision.h"
92#include "llvm/Transforms/Utils/Local.h"
93#include "llvm/Transforms/Utils/SimplifyLibCalls.h"
94#include "llvm/Transforms/Utils/SizeOpts.h"
95#include <algorithm>
96#include <cassert>
97#include <cstdint>
98#include <iterator>
99#include <limits>
100#include <memory>
101#include <utility>
102#include <vector>
103
104using namespace llvm;
105using namespace llvm::PatternMatch;
106
107#define DEBUG_TYPE "codegenprepare"
108
109STATISTIC(NumBlocksElim, "Number of blocks eliminated");
110STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated");
111STATISTIC(NumGEPsElim, "Number of GEPs converted to casts");
112STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of "
113 "sunken Cmps");
114STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses "
115 "of sunken Casts");
116STATISTIC(NumMemoryInsts, "Number of memory instructions whose address "
117 "computations were sunk");
118STATISTIC(NumMemoryInstsPhiCreated,
119 "Number of phis created when address "
120 "computations were sunk to memory instructions");
121STATISTIC(NumMemoryInstsSelectCreated,
122 "Number of select created when address "
123 "computations were sunk to memory instructions");
124STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads");
125STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized");
126STATISTIC(NumAndsAdded,
127 "Number of and mask instructions added to form ext loads");
128STATISTIC(NumAndUses, "Number of uses of and mask instructions optimized");
129STATISTIC(NumRetsDup, "Number of return instructions duplicated");
130STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved");
131STATISTIC(NumSelectsExpanded, "Number of selects turned into branches");
132STATISTIC(NumStoreExtractExposed, "Number of store(extractelement) exposed");
133
134static cl::opt<bool> DisableBranchOpts(
135 "disable-cgp-branch-opts", cl::Hidden, cl::init(false),
136 cl::desc("Disable branch optimizations in CodeGenPrepare"));
137
138static cl::opt<bool>
139 DisableGCOpts("disable-cgp-gc-opts", cl::Hidden, cl::init(false),
140 cl::desc("Disable GC optimizations in CodeGenPrepare"));
141
142static cl::opt<bool> DisableSelectToBranch(
143 "disable-cgp-select2branch", cl::Hidden, cl::init(false),
144 cl::desc("Disable select to branch conversion."));
145
146static cl::opt<bool> AddrSinkUsingGEPs(
147 "addr-sink-using-gep", cl::Hidden, cl::init(true),
148 cl::desc("Address sinking in CGP using GEPs."));
149
150static cl::opt<bool> EnableAndCmpSinking(
151 "enable-andcmp-sinking", cl::Hidden, cl::init(true),
152 cl::desc("Enable sinkinig and/cmp into branches."));
153
154static cl::opt<bool> DisableStoreExtract(
155 "disable-cgp-store-extract", cl::Hidden, cl::init(false),
156 cl::desc("Disable store(extract) optimizations in CodeGenPrepare"));
157
158static cl::opt<bool> StressStoreExtract(
159 "stress-cgp-store-extract", cl::Hidden, cl::init(false),
160 cl::desc("Stress test store(extract) optimizations in CodeGenPrepare"));
161
162static cl::opt<bool> DisableExtLdPromotion(
163 "disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false),
164 cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in "
165 "CodeGenPrepare"));
166
167static cl::opt<bool> StressExtLdPromotion(
168 "stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false),
169 cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) "
170 "optimization in CodeGenPrepare"));
171
172static cl::opt<bool> DisablePreheaderProtect(
173 "disable-preheader-prot", cl::Hidden, cl::init(false),
174 cl::desc("Disable protection against removing loop preheaders"));
175
176static cl::opt<bool> ProfileGuidedSectionPrefix(
177 "profile-guided-section-prefix", cl::Hidden, cl::init(true),
178 cl::desc("Use profile info to add section prefix for hot/cold functions"));
179
180static cl::opt<bool> ProfileUnknownInSpecialSection(
181 "profile-unknown-in-special-section", cl::Hidden,
182 cl::desc("In profiling mode like sampleFDO, if a function doesn't have "
183 "profile, we cannot tell the function is cold for sure because "
184 "it may be a function newly added without ever being sampled. "
185 "With the flag enabled, compiler can put such profile unknown "
186 "functions into a special section, so runtime system can choose "
187 "to handle it in a different way than .text section, to save "
188 "RAM for example. "));
189
190static cl::opt<bool> BBSectionsGuidedSectionPrefix(
191 "bbsections-guided-section-prefix", cl::Hidden, cl::init(true),
192 cl::desc("Use the basic-block-sections profile to determine the text "
193 "section prefix for hot functions. Functions with "
194 "basic-block-sections profile will be placed in `.text.hot` "
195 "regardless of their FDO profile info. Other functions won't be "
196 "impacted, i.e., their prefixes will be decided by FDO/sampleFDO "
197 "profiles."));
198
199static cl::opt<unsigned> FreqRatioToSkipMerge(
200 "cgp-freq-ratio-to-skip-merge", cl::Hidden, cl::init(2),
201 cl::desc("Skip merging empty blocks if (frequency of empty block) / "
202 "(frequency of destination block) is greater than this ratio"));
203
204static cl::opt<bool> ForceSplitStore(
205 "force-split-store", cl::Hidden, cl::init(false),
206 cl::desc("Force store splitting no matter what the target query says."));
207
208static cl::opt<bool>
209EnableTypePromotionMerge("cgp-type-promotion-merge", cl::Hidden,
210 cl::desc("Enable merging of redundant sexts when one is dominating"
211 " the other."), cl::init(true));
212
213static cl::opt<bool> DisableComplexAddrModes(
214 "disable-complex-addr-modes", cl::Hidden, cl::init(false),
215 cl::desc("Disables combining addressing modes with different parts "
216 "in optimizeMemoryInst."));
217
218static cl::opt<bool>
219AddrSinkNewPhis("addr-sink-new-phis", cl::Hidden, cl::init(false),
220 cl::desc("Allow creation of Phis in Address sinking."));
221
222static cl::opt<bool>
223AddrSinkNewSelects("addr-sink-new-select", cl::Hidden, cl::init(true),
224 cl::desc("Allow creation of selects in Address sinking."));
225
226static cl::opt<bool> AddrSinkCombineBaseReg(
227 "addr-sink-combine-base-reg", cl::Hidden, cl::init(true),
228 cl::desc("Allow combining of BaseReg field in Address sinking."));
229
230static cl::opt<bool> AddrSinkCombineBaseGV(
231 "addr-sink-combine-base-gv", cl::Hidden, cl::init(true),
232 cl::desc("Allow combining of BaseGV field in Address sinking."));
233
234static cl::opt<bool> AddrSinkCombineBaseOffs(
235 "addr-sink-combine-base-offs", cl::Hidden, cl::init(true),
236 cl::desc("Allow combining of BaseOffs field in Address sinking."));
237
238static cl::opt<bool> AddrSinkCombineScaledReg(
239 "addr-sink-combine-scaled-reg", cl::Hidden, cl::init(true),
240 cl::desc("Allow combining of ScaledReg field in Address sinking."));
241
242static cl::opt<bool>
243 EnableGEPOffsetSplit("cgp-split-large-offset-gep", cl::Hidden,
244 cl::init(true),
245 cl::desc("Enable splitting large offset of GEP."));
246
247static cl::opt<bool> EnableICMP_EQToICMP_ST(
248 "cgp-icmp-eq2icmp-st", cl::Hidden, cl::init(false),
249 cl::desc("Enable ICMP_EQ to ICMP_S(L|G)T conversion."));
250
251static cl::opt<bool>
252 VerifyBFIUpdates("cgp-verify-bfi-updates", cl::Hidden, cl::init(false),
253 cl::desc("Enable BFI update verification for "
254 "CodeGenPrepare."));
255
256static cl::opt<bool> OptimizePhiTypes(
257 "cgp-optimize-phi-types", cl::Hidden, cl::init(false),
258 cl::desc("Enable converting phi types in CodeGenPrepare"));
259
260namespace {
261
262enum ExtType {
263 ZeroExtension, // Zero extension has been seen.
264 SignExtension, // Sign extension has been seen.
265 BothExtension // This extension type is used if we saw sext after
266 // ZeroExtension had been set, or if we saw zext after
267 // SignExtension had been set. It makes the type
268 // information of a promoted instruction invalid.
269};
270
271using SetOfInstrs = SmallPtrSet<Instruction *, 16>;
272using TypeIsSExt = PointerIntPair<Type *, 2, ExtType>;
273using InstrToOrigTy = DenseMap<Instruction *, TypeIsSExt>;
274using SExts = SmallVector<Instruction *, 16>;
275using ValueToSExts = DenseMap<Value *, SExts>;
276
277class TypePromotionTransaction;
278
279 class CodeGenPrepare : public FunctionPass {
280 const TargetMachine *TM = nullptr;
281 const TargetSubtargetInfo *SubtargetInfo;
282 const TargetLowering *TLI = nullptr;
283 const TargetRegisterInfo *TRI;
284 const TargetTransformInfo *TTI = nullptr;
285 const BasicBlockSectionsProfileReader *BBSectionsProfileReader = nullptr;
286 const TargetLibraryInfo *TLInfo;
287 const LoopInfo *LI;
288 std::unique_ptr<BlockFrequencyInfo> BFI;
289 std::unique_ptr<BranchProbabilityInfo> BPI;
290 ProfileSummaryInfo *PSI;
291
292 /// As we scan instructions optimizing them, this is the next instruction
293 /// to optimize. Transforms that can invalidate this should update it.
294 BasicBlock::iterator CurInstIterator;
295
296 /// Keeps track of non-local addresses that have been sunk into a block.
297 /// This allows us to avoid inserting duplicate code for blocks with
298 /// multiple load/stores of the same address. The usage of WeakTrackingVH
299 /// enables SunkAddrs to be treated as a cache whose entries can be
300 /// invalidated if a sunken address computation has been erased.
301 ValueMap<Value*, WeakTrackingVH> SunkAddrs;
302
303 /// Keeps track of all instructions inserted for the current function.
304 SetOfInstrs InsertedInsts;
305
306 /// Keeps track of the type of the related instruction before their
307 /// promotion for the current function.
308 InstrToOrigTy PromotedInsts;
309
310 /// Keep track of instructions removed during promotion.
311 SetOfInstrs RemovedInsts;
312
313 /// Keep track of sext chains based on their initial value.
314 DenseMap<Value *, Instruction *> SeenChainsForSExt;
315
316 /// Keep track of GEPs accessing the same data structures such as structs or
317 /// arrays that are candidates to be split later because of their large
318 /// size.
319 MapVector<
320 AssertingVH<Value>,
321 SmallVector<std::pair<AssertingVH<GetElementPtrInst>, int64_t>, 32>>
322 LargeOffsetGEPMap;
323
324 /// Keep track of new GEP base after splitting the GEPs having large offset.
325 SmallSet<AssertingVH<Value>, 2> NewGEPBases;
326
327 /// Map serial numbers to Large offset GEPs.
328 DenseMap<AssertingVH<GetElementPtrInst>, int> LargeOffsetGEPID;
329
330 /// Keep track of SExt promoted.
331 ValueToSExts ValToSExtendedUses;
332
333 /// True if the function has the OptSize attribute.
334 bool OptSize;
335
336 /// DataLayout for the Function being processed.
337 const DataLayout *DL = nullptr;
338
339 /// Building the dominator tree can be expensive, so we only build it
340 /// lazily and update it when required.
341 std::unique_ptr<DominatorTree> DT;
342
343 public:
344 static char ID; // Pass identification, replacement for typeid
345
346 CodeGenPrepare() : FunctionPass(ID) {
347 initializeCodeGenPreparePass(*PassRegistry::getPassRegistry());
348 }
349
350 bool runOnFunction(Function &F) override;
351
352 StringRef getPassName() const override { return "CodeGen Prepare"; }
353
354 void getAnalysisUsage(AnalysisUsage &AU) const override {
355 // FIXME: When we can selectively preserve passes, preserve the domtree.
356 AU.addRequired<ProfileSummaryInfoWrapperPass>();
357 AU.addRequired<TargetLibraryInfoWrapperPass>();
358 AU.addRequired<TargetPassConfig>();
359 AU.addRequired<TargetTransformInfoWrapperPass>();
360 AU.addRequired<LoopInfoWrapperPass>();
361 AU.addUsedIfAvailable<BasicBlockSectionsProfileReader>();
362 }
363
364 private:
365 template <typename F>
366 void resetIteratorIfInvalidatedWhileCalling(BasicBlock *BB, F f) {
367 // Substituting can cause recursive simplifications, which can invalidate
368 // our iterator. Use a WeakTrackingVH to hold onto it in case this
369 // happens.
370 Value *CurValue = &*CurInstIterator;
371 WeakTrackingVH IterHandle(CurValue);
372
373 f();
374
375 // If the iterator instruction was recursively deleted, start over at the
376 // start of the block.
377 if (IterHandle != CurValue) {
378 CurInstIterator = BB->begin();
379 SunkAddrs.clear();
380 }
381 }
382
383 // Get the DominatorTree, building if necessary.
384 DominatorTree &getDT(Function &F) {
385 if (!DT)
386 DT = std::make_unique<DominatorTree>(F);
387 return *DT;
388 }
389
390 void removeAllAssertingVHReferences(Value *V);
391 bool eliminateAssumptions(Function &F);
392 bool eliminateFallThrough(Function &F);
393 bool eliminateMostlyEmptyBlocks(Function &F);
394 BasicBlock *findDestBlockOfMergeableEmptyBlock(BasicBlock *BB);
395 bool canMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const;
396 void eliminateMostlyEmptyBlock(BasicBlock *BB);
397 bool isMergingEmptyBlockProfitable(BasicBlock *BB, BasicBlock *DestBB,
398 bool isPreheader);
399 bool makeBitReverse(Instruction &I);
400 bool optimizeBlock(BasicBlock &BB, bool &ModifiedDT);
401 bool optimizeInst(Instruction *I, bool &ModifiedDT);
402 bool optimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
403 Type *AccessTy, unsigned AddrSpace);
404 bool optimizeGatherScatterInst(Instruction *MemoryInst, Value *Ptr);
405 bool optimizeInlineAsmInst(CallInst *CS);
406 bool optimizeCallInst(CallInst *CI, bool &ModifiedDT);
407 bool optimizeExt(Instruction *&I);
408 bool optimizeExtUses(Instruction *I);
409 bool optimizeLoadExt(LoadInst *Load);
410 bool optimizeShiftInst(BinaryOperator *BO);
411 bool optimizeFunnelShift(IntrinsicInst *Fsh);
412 bool optimizeSelectInst(SelectInst *SI);
413 bool optimizeShuffleVectorInst(ShuffleVectorInst *SVI);
414 bool optimizeSwitchType(SwitchInst *SI);
415 bool optimizeSwitchPhiConstants(SwitchInst *SI);
416 bool optimizeSwitchInst(SwitchInst *SI);
417 bool optimizeExtractElementInst(Instruction *Inst);
418 bool dupRetToEnableTailCallOpts(BasicBlock *BB, bool &ModifiedDT);
419 bool fixupDbgValue(Instruction *I);
420 bool placeDbgValues(Function &F);
421 bool placePseudoProbes(Function &F);
422 bool canFormExtLd(const SmallVectorImpl<Instruction *> &MovedExts,
423 LoadInst *&LI, Instruction *&Inst, bool HasPromoted);
424 bool tryToPromoteExts(TypePromotionTransaction &TPT,
425 const SmallVectorImpl<Instruction *> &Exts,
426 SmallVectorImpl<Instruction *> &ProfitablyMovedExts,
427 unsigned CreatedInstsCost = 0);
428 bool mergeSExts(Function &F);
429 bool splitLargeGEPOffsets();
430 bool optimizePhiType(PHINode *Inst, SmallPtrSetImpl<PHINode *> &Visited,
431 SmallPtrSetImpl<Instruction *> &DeletedInstrs);
432 bool optimizePhiTypes(Function &F);
433 bool performAddressTypePromotion(
434 Instruction *&Inst,
435 bool AllowPromotionWithoutCommonHeader,
436 bool HasPromoted, TypePromotionTransaction &TPT,
437 SmallVectorImpl<Instruction *> &SpeculativelyMovedExts);
438 bool splitBranchCondition(Function &F, bool &ModifiedDT);
439 bool simplifyOffsetableRelocate(GCStatepointInst &I);
440
441 bool tryToSinkFreeOperands(Instruction *I);
442 bool replaceMathCmpWithIntrinsic(BinaryOperator *BO, Value *Arg0,
443 Value *Arg1, CmpInst *Cmp,
444 Intrinsic::ID IID);
445 bool optimizeCmp(CmpInst *Cmp, bool &ModifiedDT);
446 bool combineToUSubWithOverflow(CmpInst *Cmp, bool &ModifiedDT);
447 bool combineToUAddWithOverflow(CmpInst *Cmp, bool &ModifiedDT);
448 void verifyBFIUpdates(Function &F);
449 };
450
451} // end anonymous namespace
452
453char CodeGenPrepare::ID = 0;
454
455INITIALIZE_PASS_BEGIN(CodeGenPrepare, DEBUG_TYPE,
456 "Optimize for code generation", false, false)
457INITIALIZE_PASS_DEPENDENCY(BasicBlockSectionsProfileReader)
458INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
459INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
460INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
461INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
462INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
463INITIALIZE_PASS_END(CodeGenPrepare, DEBUG_TYPE,
464 "Optimize for code generation", false, false)
465
466FunctionPass *llvm::createCodeGenPreparePass() { return new CodeGenPrepare(); }
467
468bool CodeGenPrepare::runOnFunction(Function &F) {
469 if (skipFunction(F))
470 return false;
471
472 DL = &F.getParent()->getDataLayout();
473
474 bool EverMadeChange = false;
475 // Clear per function information.
476 InsertedInsts.clear();
477 PromotedInsts.clear();
478
479 TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
480 SubtargetInfo = TM->getSubtargetImpl(F);
481 TLI = SubtargetInfo->getTargetLowering();
482 TRI = SubtargetInfo->getRegisterInfo();
483 TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
484 TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
485 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
486 BPI.reset(new BranchProbabilityInfo(F, *LI));
487 BFI.reset(new BlockFrequencyInfo(F, *BPI, *LI));
488 PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
489 BBSectionsProfileReader =
490 getAnalysisIfAvailable<BasicBlockSectionsProfileReader>();
491 OptSize = F.hasOptSize();
492 // Use the basic-block-sections profile to promote hot functions to .text.hot if requested.
493 if (BBSectionsGuidedSectionPrefix && BBSectionsProfileReader &&
494 BBSectionsProfileReader->isFunctionHot(F.getName())) {
495 F.setSectionPrefix("hot");
496 } else if (ProfileGuidedSectionPrefix) {
497 // The hot attribute overwrites profile count based hotness while profile
498 // counts based hotness overwrite the cold attribute.
499 // This is a conservative behabvior.
500 if (F.hasFnAttribute(Attribute::Hot) ||
501 PSI->isFunctionHotInCallGraph(&F, *BFI))
502 F.setSectionPrefix("hot");
503 // If PSI shows this function is not hot, we will placed the function
504 // into unlikely section if (1) PSI shows this is a cold function, or
505 // (2) the function has a attribute of cold.
506 else if (PSI->isFunctionColdInCallGraph(&F, *BFI) ||
507 F.hasFnAttribute(Attribute::Cold))
508 F.setSectionPrefix("unlikely");
509 else if (ProfileUnknownInSpecialSection && PSI->hasPartialSampleProfile() &&
510 PSI->isFunctionHotnessUnknown(F))
511 F.setSectionPrefix("unknown");
512 }
513
514 /// This optimization identifies DIV instructions that can be
515 /// profitably bypassed and carried out with a shorter, faster divide.
516 if (!OptSize && !PSI->hasHugeWorkingSetSize() && TLI->isSlowDivBypassed()) {
517 const DenseMap<unsigned int, unsigned int> &BypassWidths =
518 TLI->getBypassSlowDivWidths();
519 BasicBlock* BB = &*F.begin();
520 while (BB != nullptr) {
521 // bypassSlowDivision may create new BBs, but we don't want to reapply the
522 // optimization to those blocks.
523 BasicBlock* Next = BB->getNextNode();
524 // F.hasOptSize is already checked in the outer if statement.
525 if (!llvm::shouldOptimizeForSize(BB, PSI, BFI.get()))
526 EverMadeChange |= bypassSlowDivision(BB, BypassWidths);
527 BB = Next;
528 }
529 }
530
531 // Get rid of @llvm.assume builtins before attempting to eliminate empty
532 // blocks, since there might be blocks that only contain @llvm.assume calls
533 // (plus arguments that we can get rid of).
534 EverMadeChange |= eliminateAssumptions(F);
535
536 // Eliminate blocks that contain only PHI nodes and an
537 // unconditional branch.
538 EverMadeChange |= eliminateMostlyEmptyBlocks(F);
539
540 bool ModifiedDT = false;
541 if (!DisableBranchOpts)
542 EverMadeChange |= splitBranchCondition(F, ModifiedDT);
543
544 // Split some critical edges where one of the sources is an indirect branch,
545 // to help generate sane code for PHIs involving such edges.
546 EverMadeChange |=
547 SplitIndirectBrCriticalEdges(F, /*IgnoreBlocksWithoutPHI=*/true);
548
549 bool MadeChange = true;
550 while (MadeChange) {
551 MadeChange = false;
552 DT.reset();
553 for (BasicBlock &BB : llvm::make_early_inc_range(F)) {
554 bool ModifiedDTOnIteration = false;
555 MadeChange |= optimizeBlock(BB, ModifiedDTOnIteration);
556
557 // Restart BB iteration if the dominator tree of the Function was changed
558 if (ModifiedDTOnIteration)
559 break;
560 }
561 if (EnableTypePromotionMerge && !ValToSExtendedUses.empty())
562 MadeChange |= mergeSExts(F);
563 if (!LargeOffsetGEPMap.empty())
564 MadeChange |= splitLargeGEPOffsets();
565 MadeChange |= optimizePhiTypes(F);
566
567 if (MadeChange)
568 eliminateFallThrough(F);
569
570 // Really free removed instructions during promotion.
571 for (Instruction *I : RemovedInsts)
572 I->deleteValue();
573
574 EverMadeChange |= MadeChange;
575 SeenChainsForSExt.clear();
576 ValToSExtendedUses.clear();
577 RemovedInsts.clear();
578 LargeOffsetGEPMap.clear();
579 LargeOffsetGEPID.clear();
580 }
581
582 NewGEPBases.clear();
583 SunkAddrs.clear();
584
585 if (!DisableBranchOpts) {
586 MadeChange = false;
587 // Use a set vector to get deterministic iteration order. The order the
588 // blocks are removed may affect whether or not PHI nodes in successors
589 // are removed.
590 SmallSetVector<BasicBlock*, 8> WorkList;
591 for (BasicBlock &BB : F) {
592 SmallVector<BasicBlock *, 2> Successors(successors(&BB));
593 MadeChange |= ConstantFoldTerminator(&BB, true);
594 if (!MadeChange) continue;
595
596 for (BasicBlock *Succ : Successors)
597 if (pred_empty(Succ))
598 WorkList.insert(Succ);
599 }
600
601 // Delete the dead blocks and any of their dead successors.
602 MadeChange |= !WorkList.empty();
603 while (!WorkList.empty()) {
604 BasicBlock *BB = WorkList.pop_back_val();
605 SmallVector<BasicBlock*, 2> Successors(successors(BB));
606
607 DeleteDeadBlock(BB);
608
609 for (BasicBlock *Succ : Successors)
610 if (pred_empty(Succ))
611 WorkList.insert(Succ);
612 }
613
614 // Merge pairs of basic blocks with unconditional branches, connected by
615 // a single edge.
616 if (EverMadeChange || MadeChange)
617 MadeChange |= eliminateFallThrough(F);
618
619 EverMadeChange |= MadeChange;
620 }
621
622 if (!DisableGCOpts) {
623 SmallVector<GCStatepointInst *, 2> Statepoints;
624 for (BasicBlock &BB : F)
625 for (Instruction &I : BB)
626 if (auto *SP = dyn_cast<GCStatepointInst>(&I))
627 Statepoints.push_back(SP);
628 for (auto &I : Statepoints)
629 EverMadeChange |= simplifyOffsetableRelocate(*I);
630 }
631
632 // Do this last to clean up use-before-def scenarios introduced by other
633 // preparatory transforms.
634 EverMadeChange |= placeDbgValues(F);
635 EverMadeChange |= placePseudoProbes(F);
636
637#ifndef NDEBUG
638 if (VerifyBFIUpdates)
639 verifyBFIUpdates(F);
640#endif
641
642 return EverMadeChange;
643}
644
645bool CodeGenPrepare::eliminateAssumptions(Function &F) {
646 bool MadeChange = false;
647 for (BasicBlock &BB : F) {
648 CurInstIterator = BB.begin();
649 while (CurInstIterator != BB.end()) {
650 Instruction *I = &*(CurInstIterator++);
651 if (auto *Assume = dyn_cast<AssumeInst>(I)) {
652 MadeChange = true;
653 Value *Operand = Assume->getOperand(0);
654 Assume->eraseFromParent();
655
656 resetIteratorIfInvalidatedWhileCalling(&BB, [&]() {
657 RecursivelyDeleteTriviallyDeadInstructions(Operand, TLInfo, nullptr);
658 });
659 }
660 }
661 }
662 return MadeChange;
663}
664
665/// An instruction is about to be deleted, so remove all references to it in our
666/// GEP-tracking data strcutures.
667void CodeGenPrepare::removeAllAssertingVHReferences(Value *V) {
668 LargeOffsetGEPMap.erase(V);
669 NewGEPBases.erase(V);
670
671 auto GEP = dyn_cast<GetElementPtrInst>(V);
672 if (!GEP)
673 return;
674
675 LargeOffsetGEPID.erase(GEP);
676
677 auto VecI = LargeOffsetGEPMap.find(GEP->getPointerOperand());
678 if (VecI == LargeOffsetGEPMap.end())
679 return;
680
681 auto &GEPVector = VecI->second;
682 llvm::erase_if(GEPVector, [=](auto &Elt) { return Elt.first == GEP; });
683
684 if (GEPVector.empty())
685 LargeOffsetGEPMap.erase(VecI);
686}
687
688// Verify BFI has been updated correctly by recomputing BFI and comparing them.
689void LLVM_ATTRIBUTE_UNUSED CodeGenPrepare::verifyBFIUpdates(Function &F) {
690 DominatorTree NewDT(F);
691 LoopInfo NewLI(NewDT);
692 BranchProbabilityInfo NewBPI(F, NewLI, TLInfo);
693 BlockFrequencyInfo NewBFI(F, NewBPI, NewLI);
694 NewBFI.verifyMatch(*BFI);
695}
696
697/// Merge basic blocks which are connected by a single edge, where one of the
698/// basic blocks has a single successor pointing to the other basic block,
699/// which has a single predecessor.
700bool CodeGenPrepare::eliminateFallThrough(Function &F) {
701 bool Changed = false;
702 // Scan all of the blocks in the function, except for the entry block.
703 // Use a temporary array to avoid iterator being invalidated when
704 // deleting blocks.
705 SmallVector<WeakTrackingVH, 16> Blocks;
706 for (auto &Block : llvm::drop_begin(F))
707 Blocks.push_back(&Block);
708
709 SmallSet<WeakTrackingVH, 16> Preds;
710 for (auto &Block : Blocks) {
711 auto *BB = cast_or_null<BasicBlock>(Block);
712 if (!BB)
713 continue;
714 // If the destination block has a single pred, then this is a trivial
715 // edge, just collapse it.
716 BasicBlock *SinglePred = BB->getSinglePredecessor();
717
718 // Don't merge if BB's address is taken.
719 if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) continue;
720
721 BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator());
722 if (Term && !Term->isConditional()) {
723 Changed = true;
724 LLVM_DEBUG(dbgs() << "To merge:\n" << *BB << "\n\n\n");
725
726 // Merge BB into SinglePred and delete it.
727 MergeBlockIntoPredecessor(BB);
728 Preds.insert(SinglePred);
729 }
730 }
731
732 // (Repeatedly) merging blocks into their predecessors can create redundant
733 // debug intrinsics.
734 for (const auto &Pred : Preds)
735 if (auto *BB = cast_or_null<BasicBlock>(Pred))
736 RemoveRedundantDbgInstrs(BB);
737
738 return Changed;
739}
740
741/// Find a destination block from BB if BB is mergeable empty block.
742BasicBlock *CodeGenPrepare::findDestBlockOfMergeableEmptyBlock(BasicBlock *BB) {
743 // If this block doesn't end with an uncond branch, ignore it.
744 BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator());
745 if (!BI || !BI->isUnconditional())
746 return nullptr;
747
748 // If the instruction before the branch (skipping debug info) isn't a phi
749 // node, then other stuff is happening here.
750 BasicBlock::iterator BBI = BI->getIterator();
751 if (BBI != BB->begin()) {
752 --BBI;
753 while (isa<DbgInfoIntrinsic>(BBI)) {
754 if (BBI == BB->begin())
755 break;
756 --BBI;
757 }
758 if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI))
759 return nullptr;
760 }
761
762 // Do not break infinite loops.
763 BasicBlock *DestBB = BI->getSuccessor(0);
764 if (DestBB == BB)
765 return nullptr;
766
767 if (!canMergeBlocks(BB, DestBB))
768 DestBB = nullptr;
769
770 return DestBB;
771}
772
773/// Eliminate blocks that contain only PHI nodes, debug info directives, and an
774/// unconditional branch. Passes before isel (e.g. LSR/loopsimplify) often split
775/// edges in ways that are non-optimal for isel. Start by eliminating these
776/// blocks so we can split them the way we want them.
777bool CodeGenPrepare::eliminateMostlyEmptyBlocks(Function &F) {
778 SmallPtrSet<BasicBlock *, 16> Preheaders;
779 SmallVector<Loop *, 16> LoopList(LI->begin(), LI->end());
780 while (!LoopList.empty()) {
781 Loop *L = LoopList.pop_back_val();
782 llvm::append_range(LoopList, *L);
783 if (BasicBlock *Preheader = L->getLoopPreheader())
784 Preheaders.insert(Preheader);
785 }
786
787 bool MadeChange = false;
788 // Copy blocks into a temporary array to avoid iterator invalidation issues
789 // as we remove them.
790 // Note that this intentionally skips the entry block.
791 SmallVector<WeakTrackingVH, 16> Blocks;
792 for (auto &Block : llvm::drop_begin(F))
793 Blocks.push_back(&Block);
794
795 for (auto &Block : Blocks) {
796 BasicBlock *BB = cast_or_null<BasicBlock>(Block);
797 if (!BB)
798 continue;
799 BasicBlock *DestBB = findDestBlockOfMergeableEmptyBlock(BB);
800 if (!DestBB ||
801 !isMergingEmptyBlockProfitable(BB, DestBB, Preheaders.count(BB)))
802 continue;
803
804 eliminateMostlyEmptyBlock(BB);
805 MadeChange = true;
806 }
807 return MadeChange;
808}
809
810bool CodeGenPrepare::isMergingEmptyBlockProfitable(BasicBlock *BB,
811 BasicBlock *DestBB,
812 bool isPreheader) {
813 // Do not delete loop preheaders if doing so would create a critical edge.
814 // Loop preheaders can be good locations to spill registers. If the
815 // preheader is deleted and we create a critical edge, registers may be
816 // spilled in the loop body instead.
817 if (!DisablePreheaderProtect && isPreheader &&
818 !(BB->getSinglePredecessor() &&
819 BB->getSinglePredecessor()->getSingleSuccessor()))
820 return false;
821
822 // Skip merging if the block's successor is also a successor to any callbr
823 // that leads to this block.
824 // FIXME: Is this really needed? Is this a correctness issue?
825 for (BasicBlock *Pred : predecessors(BB)) {
826 if (auto *CBI = dyn_cast<CallBrInst>((Pred)->getTerminator()))
827 for (unsigned i = 0, e = CBI->getNumSuccessors(); i != e; ++i)
828 if (DestBB == CBI->getSuccessor(i))
829 return false;
830 }
831
832 // Try to skip merging if the unique predecessor of BB is terminated by a
833 // switch or indirect branch instruction, and BB is used as an incoming block
834 // of PHIs in DestBB. In such case, merging BB and DestBB would cause ISel to
835 // add COPY instructions in the predecessor of BB instead of BB (if it is not
836 // merged). Note that the critical edge created by merging such blocks wont be
837 // split in MachineSink because the jump table is not analyzable. By keeping
838 // such empty block (BB), ISel will place COPY instructions in BB, not in the
839 // predecessor of BB.
840 BasicBlock *Pred = BB->getUniquePredecessor();
841 if (!Pred ||
842 !(isa<SwitchInst>(Pred->getTerminator()) ||
843 isa<IndirectBrInst>(Pred->getTerminator())))
844 return true;
845
846 if (BB->getTerminator() != BB->getFirstNonPHIOrDbg())
847 return true;
848
849 // We use a simple cost heuristic which determine skipping merging is
850 // profitable if the cost of skipping merging is less than the cost of
851 // merging : Cost(skipping merging) < Cost(merging BB), where the
852 // Cost(skipping merging) is Freq(BB) * (Cost(Copy) + Cost(Branch)), and
853 // the Cost(merging BB) is Freq(Pred) * Cost(Copy).
854 // Assuming Cost(Copy) == Cost(Branch), we could simplify it to :
855 // Freq(Pred) / Freq(BB) > 2.
856 // Note that if there are multiple empty blocks sharing the same incoming
857 // value for the PHIs in the DestBB, we consider them together. In such
858 // case, Cost(merging BB) will be the sum of their frequencies.
859
860 if (!isa<PHINode>(DestBB->begin()))
861 return true;
862
863 SmallPtrSet<BasicBlock *, 16> SameIncomingValueBBs;
864
865 // Find all other incoming blocks from which incoming values of all PHIs in
866 // DestBB are the same as the ones from BB.
867 for (BasicBlock *DestBBPred : predecessors(DestBB)) {
868 if (DestBBPred == BB)
869 continue;
870
871 if (llvm::all_of(DestBB->phis(), [&](const PHINode &DestPN) {
872 return DestPN.getIncomingValueForBlock(BB) ==
873 DestPN.getIncomingValueForBlock(DestBBPred);
874 }))
875 SameIncomingValueBBs.insert(DestBBPred);
876 }
877
878 // See if all BB's incoming values are same as the value from Pred. In this
879 // case, no reason to skip merging because COPYs are expected to be place in
880 // Pred already.
881 if (SameIncomingValueBBs.count(Pred))
882 return true;
883
884 BlockFrequency PredFreq = BFI->getBlockFreq(Pred);
885 BlockFrequency BBFreq = BFI->getBlockFreq(BB);
886
887 for (auto *SameValueBB : SameIncomingValueBBs)
888 if (SameValueBB->getUniquePredecessor() == Pred &&
889 DestBB == findDestBlockOfMergeableEmptyBlock(SameValueBB))
890 BBFreq += BFI->getBlockFreq(SameValueBB);
891
892 return PredFreq.getFrequency() <=
893 BBFreq.getFrequency() * FreqRatioToSkipMerge;
894}
895
896/// Return true if we can merge BB into DestBB if there is a single
897/// unconditional branch between them, and BB contains no other non-phi
898/// instructions.
899bool CodeGenPrepare::canMergeBlocks(const BasicBlock *BB,
900 const BasicBlock *DestBB) const {
901 // We only want to eliminate blocks whose phi nodes are used by phi nodes in
902 // the successor. If there are more complex condition (e.g. preheaders),
903 // don't mess around with them.
904 for (const PHINode &PN : BB->phis()) {
905 for (const User *U : PN.users()) {
906 const Instruction *UI = cast<Instruction>(U);
907 if (UI->getParent() != DestBB || !isa<PHINode>(UI))
908 return false;
909 // If User is inside DestBB block and it is a PHINode then check
910 // incoming value. If incoming value is not from BB then this is
911 // a complex condition (e.g. preheaders) we want to avoid here.
912 if (UI->getParent() == DestBB) {
913 if (const PHINode *UPN = dyn_cast<PHINode>(UI))
914 for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) {
915 Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I));
916 if (Insn && Insn->getParent() == BB &&
917 Insn->getParent() != UPN->getIncomingBlock(I))
918 return false;
919 }
920 }
921 }
922 }
923
924 // If BB and DestBB contain any common predecessors, then the phi nodes in BB
925 // and DestBB may have conflicting incoming values for the block. If so, we
926 // can't merge the block.
927 const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin());
928 if (!DestBBPN) return true; // no conflict.
929
930 // Collect the preds of BB.
931 SmallPtrSet<const BasicBlock*, 16> BBPreds;
932 if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
933 // It is faster to get preds from a PHI than with pred_iterator.
934 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
935 BBPreds.insert(BBPN->getIncomingBlock(i));
936 } else {
937 BBPreds.insert(pred_begin(BB), pred_end(BB));
938 }
939
940 // Walk the preds of DestBB.
941 for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) {
942 BasicBlock *Pred = DestBBPN->getIncomingBlock(i);
943 if (BBPreds.count(Pred)) { // Common predecessor?
944 for (const PHINode &PN : DestBB->phis()) {
945 const Value *V1 = PN.getIncomingValueForBlock(Pred);
946 const Value *V2 = PN.getIncomingValueForBlock(BB);
947
948 // If V2 is a phi node in BB, look up what the mapped value will be.
949 if (const PHINode *V2PN = dyn_cast<PHINode>(V2))
950 if (V2PN->getParent() == BB)
951 V2 = V2PN->getIncomingValueForBlock(Pred);
952
953 // If there is a conflict, bail out.
954 if (V1 != V2) return false;
955 }
956 }
957 }
958
959 return true;
960}
961
962/// Eliminate a basic block that has only phi's and an unconditional branch in
963/// it.
964void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBlock *BB) {
965 BranchInst *BI = cast<BranchInst>(BB->getTerminator());
966 BasicBlock *DestBB = BI->getSuccessor(0);
967
968 LLVM_DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n"
969 << *BB << *DestBB);
970
971 // If the destination block has a single pred, then this is a trivial edge,
972 // just collapse it.
973 if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) {
974 if (SinglePred != DestBB) {
975 assert(SinglePred == BB &&
976 "Single predecessor not the same as predecessor");
977 // Merge DestBB into SinglePred/BB and delete it.
978 MergeBlockIntoPredecessor(DestBB);
979 // Note: BB(=SinglePred) will not be deleted on this path.
980 // DestBB(=its single successor) is the one that was deleted.
981 LLVM_DEBUG(dbgs() << "AFTER:\n" << *SinglePred << "\n\n\n");
982 return;
983 }
984 }
985
986 // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB
987 // to handle the new incoming edges it is about to have.
988 for (PHINode &PN : DestBB->phis()) {
989 // Remove the incoming value for BB, and remember it.
990 Value *InVal = PN.removeIncomingValue(BB, false);
991
992 // Two options: either the InVal is a phi node defined in BB or it is some
993 // value that dominates BB.
994 PHINode *InValPhi = dyn_cast<PHINode>(InVal);
995 if (InValPhi && InValPhi->getParent() == BB) {
996 // Add all of the input values of the input PHI as inputs of this phi.
997 for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i)
998 PN.addIncoming(InValPhi->getIncomingValue(i),
999 InValPhi->getIncomingBlock(i));
1000 } else {
1001 // Otherwise, add one instance of the dominating value for each edge that
1002 // we will be adding.
1003 if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
1004 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
1005 PN.addIncoming(InVal, BBPN->getIncomingBlock(i));
1006 } else {
1007 for (BasicBlock *Pred : predecessors(BB))
1008 PN.addIncoming(InVal, Pred);
1009 }
1010 }
1011 }
1012
1013 // The PHIs are now updated, change everything that refers to BB to use
1014 // DestBB and remove BB.
1015 BB->replaceAllUsesWith(DestBB);
1016 BB->eraseFromParent();
1017 ++NumBlocksElim;
1018
1019 LLVM_DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n");
1020}
1021
1022// Computes a map of base pointer relocation instructions to corresponding
1023// derived pointer relocation instructions given a vector of all relocate calls
1024static void computeBaseDerivedRelocateMap(
1025 const SmallVectorImpl<GCRelocateInst *> &AllRelocateCalls,
1026 DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>>
1027 &RelocateInstMap) {
1028 // Collect information in two maps: one primarily for locating the base object
1029 // while filling the second map; the second map is the final structure holding
1030 // a mapping between Base and corresponding Derived relocate calls
1031 DenseMap<std::pair<unsigned, unsigned>, GCRelocateInst *> RelocateIdxMap;
1032 for (auto *ThisRelocate : AllRelocateCalls) {
1033 auto K = std::make_pair(ThisRelocate->getBasePtrIndex(),
1034 ThisRelocate->getDerivedPtrIndex());
1035 RelocateIdxMap.insert(std::make_pair(K, ThisRelocate));
1036 }
1037 for (auto &Item : RelocateIdxMap) {
1038 std::pair<unsigned, unsigned> Key = Item.first;
1039 if (Key.first == Key.second)
1040 // Base relocation: nothing to insert
1041 continue;
1042
1043 GCRelocateInst *I = Item.second;
1044 auto BaseKey = std::make_pair(Key.first, Key.first);
1045
1046 // We're iterating over RelocateIdxMap so we cannot modify it.
1047 auto MaybeBase = RelocateIdxMap.find(BaseKey);
1048 if (MaybeBase == RelocateIdxMap.end())
1049 // TODO: We might want to insert a new base object relocate and gep off
1050 // that, if there are enough derived object relocates.
1051 continue;
1052
1053 RelocateInstMap[MaybeBase->second].push_back(I);
1054 }
1055}
1056
1057// Accepts a GEP and extracts the operands into a vector provided they're all
1058// small integer constants
1059static bool getGEPSmallConstantIntOffsetV(GetElementPtrInst *GEP,
1060 SmallVectorImpl<Value *> &OffsetV) {
1061 for (unsigned i = 1; i < GEP->getNumOperands(); i++) {
1062 // Only accept small constant integer operands
1063 auto *Op = dyn_cast<ConstantInt>(GEP->getOperand(i));
1064 if (!Op || Op->getZExtValue() > 20)
1065 return false;
1066 }
1067
1068 for (unsigned i = 1; i < GEP->getNumOperands(); i++)
1069 OffsetV.push_back(GEP->getOperand(i));
1070 return true;
1071}
1072
1073// Takes a RelocatedBase (base pointer relocation instruction) and Targets to
1074// replace, computes a replacement, and affects it.
1075static bool
1076simplifyRelocatesOffABase(GCRelocateInst *RelocatedBase,
1077 const SmallVectorImpl<GCRelocateInst *> &Targets) {
1078 bool MadeChange = false;
1079 // We must ensure the relocation of derived pointer is defined after
1080 // relocation of base pointer. If we find a relocation corresponding to base
1081 // defined earlier than relocation of base then we move relocation of base
1082 // right before found relocation. We consider only relocation in the same
1083 // basic block as relocation of base. Relocations from other basic block will
1084 // be skipped by optimization and we do not care about them.
1085 for (auto R = RelocatedBase->getParent()->getFirstInsertionPt();
1086 &*R != RelocatedBase; ++R)
1087 if (auto *RI = dyn_cast<GCRelocateInst>(R))
1088 if (RI->getStatepoint() == RelocatedBase->getStatepoint())
1089 if (RI->getBasePtrIndex() == RelocatedBase->getBasePtrIndex()) {
1090 RelocatedBase->moveBefore(RI);
1091 break;
1092 }
1093
1094 for (GCRelocateInst *ToReplace : Targets) {
1095 assert(ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() &&
1096 "Not relocating a derived object of the original base object");
1097 if (ToReplace->getBasePtrIndex() == ToReplace->getDerivedPtrIndex()) {
1098 // A duplicate relocate call. TODO: coalesce duplicates.
1099 continue;
1100 }
1101
1102 if (RelocatedBase->getParent() != ToReplace->getParent()) {
1103 // Base and derived relocates are in different basic blocks.
1104 // In this case transform is only valid when base dominates derived
1105 // relocate. However it would be too expensive to check dominance
1106 // for each such relocate, so we skip the whole transformation.
1107 continue;
1108 }
1109
1110 Value *Base = ToReplace->getBasePtr();
1111 auto *Derived = dyn_cast<GetElementPtrInst>(ToReplace->getDerivedPtr());
1112 if (!Derived || Derived->getPointerOperand() != Base)
1113 continue;
1114
1115 SmallVector<Value *, 2> OffsetV;
1116 if (!getGEPSmallConstantIntOffsetV(Derived, OffsetV))
1117 continue;
1118
1119 // Create a Builder and replace the target callsite with a gep
1120 assert(RelocatedBase->getNextNode() &&
1121 "Should always have one since it's not a terminator");
1122
1123 // Insert after RelocatedBase
1124 IRBuilder<> Builder(RelocatedBase->getNextNode());
1125 Builder.SetCurrentDebugLocation(ToReplace->getDebugLoc());
1126
1127 // If gc_relocate does not match the actual type, cast it to the right type.
1128 // In theory, there must be a bitcast after gc_relocate if the type does not
1129 // match, and we should reuse it to get the derived pointer. But it could be
1130 // cases like this:
1131 // bb1:
1132 // ...
1133 // %g1 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...)
1134 // br label %merge
1135 //
1136 // bb2:
1137 // ...
1138 // %g2 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...)
1139 // br label %merge
1140 //
1141 // merge:
1142 // %p1 = phi i8 addrspace(1)* [ %g1, %bb1 ], [ %g2, %bb2 ]
1143 // %cast = bitcast i8 addrspace(1)* %p1 in to i32 addrspace(1)*
1144 //
1145 // In this case, we can not find the bitcast any more. So we insert a new bitcast
1146 // no matter there is already one or not. In this way, we can handle all cases, and
1147 // the extra bitcast should be optimized away in later passes.
1148 Value *ActualRelocatedBase = RelocatedBase;
1149 if (RelocatedBase->getType() != Base->getType()) {
1150 ActualRelocatedBase =
1151 Builder.CreateBitCast(RelocatedBase, Base->getType());
1152 }
1153 Value *Replacement = Builder.CreateGEP(
1154 Derived->getSourceElementType(), ActualRelocatedBase, makeArrayRef(OffsetV));
1155 Replacement->takeName(ToReplace);
1156 // If the newly generated derived pointer's type does not match the original derived
1157 // pointer's type, cast the new derived pointer to match it. Same reasoning as above.
1158 Value *ActualReplacement = Replacement;
1159 if (Replacement->getType() != ToReplace->getType()) {
1160 ActualReplacement =
1161 Builder.CreateBitCast(Replacement, ToReplace->getType());
1162 }
1163 ToReplace->replaceAllUsesWith(ActualReplacement);
1164 ToReplace->eraseFromParent();
1165
1166 MadeChange = true;
1167 }
1168 return MadeChange;
1169}
1170
1171// Turns this:
1172//
1173// %base = ...
1174// %ptr = gep %base + 15
1175// %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr)
1176// %base' = relocate(%tok, i32 4, i32 4)
1177// %ptr' = relocate(%tok, i32 4, i32 5)
1178// %val = load %ptr'
1179//
1180// into this:
1181//
1182// %base = ...
1183// %ptr = gep %base + 15
1184// %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr)
1185// %base' = gc.relocate(%tok, i32 4, i32 4)
1186// %ptr' = gep %base' + 15
1187// %val = load %ptr'
1188bool CodeGenPrepare::simplifyOffsetableRelocate(GCStatepointInst &I) {
1189 bool MadeChange = false;
1190 SmallVector<GCRelocateInst *, 2> AllRelocateCalls;
1191 for (auto *U : I.users())
1192 if (GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U))
1193 // Collect all the relocate calls associated with a statepoint
1194 AllRelocateCalls.push_back(Relocate);
1195
1196 // We need at least one base pointer relocation + one derived pointer
1197 // relocation to mangle
1198 if (AllRelocateCalls.size() < 2)
1199 return false;
1200
1201 // RelocateInstMap is a mapping from the base relocate instruction to the
1202 // corresponding derived relocate instructions
1203 DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> RelocateInstMap;
1204 computeBaseDerivedRelocateMap(AllRelocateCalls, RelocateInstMap);
1205 if (RelocateInstMap.empty())
1206 return false;
1207
1208 for (auto &Item : RelocateInstMap)
1209 // Item.first is the RelocatedBase to offset against
1210 // Item.second is the vector of Targets to replace
1211 MadeChange = simplifyRelocatesOffABase(Item.first, Item.second);
1212 return MadeChange;
1213}
1214
1215/// Sink the specified cast instruction into its user blocks.
1216static bool SinkCast(CastInst *CI) {
1217 BasicBlock *DefBB = CI->getParent();
1218
1219 /// InsertedCasts - Only insert a cast in each block once.
1220 DenseMap<BasicBlock*, CastInst*> InsertedCasts;
1221
1222 bool MadeChange = false;
1223 for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end();
1224 UI != E; ) {
1225 Use &TheUse = UI.getUse();
1226 Instruction *User = cast<Instruction>(*UI);
1227
1228 // Figure out which BB this cast is used in. For PHI's this is the
1229 // appropriate predecessor block.
1230 BasicBlock *UserBB = User->getParent();
1231 if (PHINode *PN = dyn_cast<PHINode>(User)) {
1232 UserBB = PN->getIncomingBlock(TheUse);
1233 }
1234
1235 // Preincrement use iterator so we don't invalidate it.
1236 ++UI;
1237
1238 // The first insertion point of a block containing an EH pad is after the
1239 // pad. If the pad is the user, we cannot sink the cast past the pad.
1240 if (User->isEHPad())
1241 continue;
1242
1243 // If the block selected to receive the cast is an EH pad that does not
1244 // allow non-PHI instructions before the terminator, we can't sink the
1245 // cast.
1246 if (UserBB->getTerminator()->isEHPad())
1247 continue;
1248
1249 // If this user is in the same block as the cast, don't change the cast.
1250 if (UserBB == DefBB) continue;
1251
1252 // If we have already inserted a cast into this block, use it.
1253 CastInst *&InsertedCast = InsertedCasts[UserBB];
1254
1255 if (!InsertedCast) {
1256 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
1257 assert(InsertPt != UserBB->end());
1258 InsertedCast = CastInst::Create(CI->getOpcode(), CI->getOperand(0),
1259 CI->getType(), "", &*InsertPt);
1260 InsertedCast->setDebugLoc(CI->getDebugLoc());
1261 }
1262
1263 // Replace a use of the cast with a use of the new cast.
1264 TheUse = InsertedCast;
1265 MadeChange = true;
1266 ++NumCastUses;
1267 }
1268
1269 // If we removed all uses, nuke the cast.
1270 if (CI->use_empty()) {
1271 salvageDebugInfo(*CI);
1272 CI->eraseFromParent();
1273 MadeChange = true;
1274 }
1275
1276 return MadeChange;
1277}
1278
1279/// If the specified cast instruction is a noop copy (e.g. it's casting from
1280/// one pointer type to another, i32->i8 on PPC), sink it into user blocks to
1281/// reduce the number of virtual registers that must be created and coalesced.
1282///
1283/// Return true if any changes are made.
1284static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI,
1285 const DataLayout &DL) {
1286 // Sink only "cheap" (or nop) address-space casts. This is a weaker condition
1287 // than sinking only nop casts, but is helpful on some platforms.
1288 if (auto *ASC = dyn_cast<AddrSpaceCastInst>(CI)) {
1289 if (!TLI.isFreeAddrSpaceCast(ASC->getSrcAddressSpace(),
1290 ASC->getDestAddressSpace()))
1291 return false;
1292 }
1293
1294 // If this is a noop copy,
1295 EVT SrcVT = TLI.getValueType(DL, CI->getOperand(0)->getType());
1296 EVT DstVT = TLI.getValueType(DL, CI->getType());
1297
1298 // This is an fp<->int conversion?
1299 if (SrcVT.isInteger() != DstVT.isInteger())
1300 return false;
1301
1302 // If this is an extension, it will be a zero or sign extension, which
1303 // isn't a noop.
1304 if (SrcVT.bitsLT(DstVT)) return false;
1305
1306 // If these values will be promoted, find out what they will be promoted
1307 // to. This helps us consider truncates on PPC as noop copies when they
1308 // are.
1309 if (TLI.getTypeAction(CI->getContext(), SrcVT) ==
1310 TargetLowering::TypePromoteInteger)
1311 SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT);
1312 if (TLI.getTypeAction(CI->getContext(), DstVT) ==
1313 TargetLowering::TypePromoteInteger)
1314 DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT);
1315
1316 // If, after promotion, these are the same types, this is a noop copy.
1317 if (SrcVT != DstVT)
1318 return false;
1319
1320 return SinkCast(CI);
1321}
1322
1323// Match a simple increment by constant operation. Note that if a sub is
1324// matched, the step is negated (as if the step had been canonicalized to
1325// an add, even though we leave the instruction alone.)
1326bool matchIncrement(const Instruction* IVInc, Instruction *&LHS,
1327 Constant *&Step) {
1328 if (match(IVInc, m_Add(m_Instruction(LHS), m_Constant(Step))) ||
1329 match(IVInc, m_ExtractValue<0>(m_Intrinsic<Intrinsic::uadd_with_overflow>(
1330 m_Instruction(LHS), m_Constant(Step)))))
1331 return true;
1332 if (match(IVInc, m_Sub(m_Instruction(LHS), m_Constant(Step))) ||
1333 match(IVInc, m_ExtractValue<0>(m_Intrinsic<Intrinsic::usub_with_overflow>(
1334 m_Instruction(LHS), m_Constant(Step))))) {
1335 Step = ConstantExpr::getNeg(Step);
1336 return true;
1337 }
1338 return false;
1339}
1340
1341/// If given \p PN is an inductive variable with value IVInc coming from the
1342/// backedge, and on each iteration it gets increased by Step, return pair
1343/// <IVInc, Step>. Otherwise, return None.
1344static Optional<std::pair<Instruction *, Constant *> >
1345getIVIncrement(const PHINode *PN, const LoopInfo *LI) {
1346 const Loop *L = LI->getLoopFor(PN->getParent());
1347 if (!L || L->getHeader() != PN->getParent() || !L->getLoopLatch())
1348 return None;
1349 auto *IVInc =
1350 dyn_cast<Instruction>(PN->getIncomingValueForBlock(L->getLoopLatch()));
1351 if (!IVInc || LI->getLoopFor(IVInc->getParent()) != L)
1352 return None;
1353 Instruction *LHS = nullptr;
1354 Constant *Step = nullptr;
1355 if (matchIncrement(IVInc, LHS, Step) && LHS == PN)
1356 return std::make_pair(IVInc, Step);
1357 return None;
1358}
1359
1360static bool isIVIncrement(const Value *V, const LoopInfo *LI) {
1361 auto *I = dyn_cast<Instruction>(V);
1362 if (!I)
1363 return false;
1364 Instruction *LHS = nullptr;
1365 Constant *Step = nullptr;
1366 if (!matchIncrement(I, LHS, Step))
1367 return false;
1368 if (auto *PN = dyn_cast<PHINode>(LHS))
1369 if (auto IVInc = getIVIncrement(PN, LI))
1370 return IVInc->first == I;
1371 return false;
1372}
1373
1374bool CodeGenPrepare::replaceMathCmpWithIntrinsic(BinaryOperator *BO,
1375 Value *Arg0, Value *Arg1,
1376 CmpInst *Cmp,
1377 Intrinsic::ID IID) {
1378 auto IsReplacableIVIncrement = [this, &Cmp](BinaryOperator *BO) {
1379 if (!isIVIncrement(BO, LI))
1380 return false;
1381 const Loop *L = LI->getLoopFor(BO->getParent());
1382 assert(L && "L should not be null after isIVIncrement()");
1383 // Do not risk on moving increment into a child loop.
1384 if (LI->getLoopFor(Cmp->getParent()) != L)
1385 return false;
1386
1387 // Finally, we need to ensure that the insert point will dominate all
1388 // existing uses of the increment.
1389
1390 auto &DT = getDT(*BO->getParent()->getParent());
1391 if (DT.dominates(Cmp->getParent(), BO->getParent()))
1392 // If we're moving up the dom tree, all uses are trivially dominated.
1393 // (This is the common case for code produced by LSR.)
1394 return true;
1395
1396 // Otherwise, special case the single use in the phi recurrence.
1397 return BO->hasOneUse() && DT.dominates(Cmp->getParent(), L->getLoopLatch());
1398 };
1399 if (BO->getParent() != Cmp->getParent() && !IsReplacableIVIncrement(BO)) {
1400 // We used to use a dominator tree here to allow multi-block optimization.
1401 // But that was problematic because:
1402 // 1. It could cause a perf regression by hoisting the math op into the
1403 // critical path.
1404 // 2. It could cause a perf regression by creating a value that was live
1405 // across multiple blocks and increasing register pressure.
1406 // 3. Use of a dominator tree could cause large compile-time regression.
1407 // This is because we recompute the DT on every change in the main CGP
1408 // run-loop. The recomputing is probably unnecessary in many cases, so if
1409 // that was fixed, using a DT here would be ok.
1410 //
1411 // There is one important particular case we still want to handle: if BO is
1412 // the IV increment. Important properties that make it profitable:
1413 // - We can speculate IV increment anywhere in the loop (as long as the
1414 // indvar Phi is its only user);
1415 // - Upon computing Cmp, we effectively compute something equivalent to the
1416 // IV increment (despite it loops differently in the IR). So moving it up
1417 // to the cmp point does not really increase register pressure.
1418 return false;
1419 }
1420
1421 // We allow matching the canonical IR (add X, C) back to (usubo X, -C).
1422 if (BO->getOpcode() == Instruction::Add &&
1423 IID == Intrinsic::usub_with_overflow) {
1424 assert(isa<Constant>(Arg1) && "Unexpected input for usubo");
1425 Arg1 = ConstantExpr::getNeg(cast<Constant>(Arg1));
1426 }
1427
1428 // Insert at the first instruction of the pair.
1429 Instruction *InsertPt = nullptr;
1430 for (Instruction &Iter : *Cmp->getParent()) {
1431 // If BO is an XOR, it is not guaranteed that it comes after both inputs to
1432 // the overflow intrinsic are defined.
1433 if ((BO->getOpcode() != Instruction::Xor && &Iter == BO) || &Iter == Cmp) {
1434 InsertPt = &Iter;
1435 break;
1436 }
1437 }
1438 assert(InsertPt != nullptr && "Parent block did not contain cmp or binop");
1439
1440 IRBuilder<> Builder(InsertPt);
1441 Value *MathOV = Builder.CreateBinaryIntrinsic(IID, Arg0, Arg1);
1442 if (BO->getOpcode() != Instruction::Xor) {
1443 Value *Math = Builder.CreateExtractValue(MathOV, 0, "math");
1444 BO->replaceAllUsesWith(Math);
1445 } else
1446 assert(BO->hasOneUse() &&
1447 "Patterns with XOr should use the BO only in the compare");
1448 Value *OV = Builder.CreateExtractValue(MathOV, 1, "ov");
1449 Cmp->replaceAllUsesWith(OV);
1450 Cmp->eraseFromParent();
1451 BO->eraseFromParent();
1452 return true;
1453}
1454
1455/// Match special-case patterns that check for unsigned add overflow.
1456static bool matchUAddWithOverflowConstantEdgeCases(CmpInst *Cmp,
1457 BinaryOperator *&Add) {
1458 // Add = add A, 1; Cmp = icmp eq A,-1 (overflow if A is max val)
1459 // Add = add A,-1; Cmp = icmp ne A, 0 (overflow if A is non-zero)
1460 Value *A = Cmp->getOperand(0), *B = Cmp->getOperand(1);
1461
1462 // We are not expecting non-canonical/degenerate code. Just bail out.
1463 if (isa<Constant>(A))
1464 return false;
1465
1466 ICmpInst::Predicate Pred = Cmp->getPredicate();
1467 if (Pred == ICmpInst::ICMP_EQ && match(B, m_AllOnes()))
1468 B = ConstantInt::get(B->getType(), 1);
1469 else if (Pred == ICmpInst::ICMP_NE && match(B, m_ZeroInt()))
1470 B = ConstantInt::get(B->getType(), -1);
1471 else
1472 return false;
1473
1474 // Check the users of the variable operand of the compare looking for an add
1475 // with the adjusted constant.
1476 for (User *U : A->users()) {
1477 if (match(U, m_Add(m_Specific(A), m_Specific(B)))) {
1478 Add = cast<BinaryOperator>(U);
1479 return true;
1480 }
1481 }
1482 return false;
1483}
1484
1485/// Try to combine the compare into a call to the llvm.uadd.with.overflow
1486/// intrinsic. Return true if any changes were made.
1487bool CodeGenPrepare::combineToUAddWithOverflow(CmpInst *Cmp,
1488 bool &ModifiedDT) {
1489 Value *A, *B;
1490 BinaryOperator *Add;
1491 if (!match(Cmp, m_UAddWithOverflow(m_Value(A), m_Value(B), m_BinOp(Add)))) {
1492 if (!matchUAddWithOverflowConstantEdgeCases(Cmp, Add))
1493 return false;
1494 // Set A and B in case we match matchUAddWithOverflowConstantEdgeCases.
1495 A = Add->getOperand(0);
1496 B = Add->getOperand(1);
1497 }
1498
1499 if (!TLI->shouldFormOverflowOp(ISD::UADDO,
1500 TLI->getValueType(*DL, Add->getType()),
1501 Add->hasNUsesOrMore(2)))
1502 return false;
1503
1504 // We don't want to move around uses of condition values this late, so we
1505 // check if it is legal to create the call to the intrinsic in the basic
1506 // block containing the icmp.
1507 if (Add->getParent() != Cmp->getParent() && !Add->hasOneUse())
1508 return false;
1509
1510 if (!replaceMathCmpWithIntrinsic(Add, A, B, Cmp,
1511 Intrinsic::uadd_with_overflow))
1512 return false;
1513
1514 // Reset callers - do not crash by iterating over a dead instruction.
1515 ModifiedDT = true;
1516 return true;
1517}
1518
1519bool CodeGenPrepare::combineToUSubWithOverflow(CmpInst *Cmp,
1520 bool &ModifiedDT) {
1521 // We are not expecting non-canonical/degenerate code. Just bail out.
1522 Value *A = Cmp->getOperand(0), *B = Cmp->getOperand(1);
1523 if (isa<Constant>(A) && isa<Constant>(B))
1524 return false;
1525
1526 // Convert (A u> B) to (A u< B) to simplify pattern matching.
1527 ICmpInst::Predicate Pred = Cmp->getPredicate();
1528 if (Pred == ICmpInst::ICMP_UGT) {
1529 std::swap(A, B);
1530 Pred = ICmpInst::ICMP_ULT;
1531 }
1532 // Convert special-case: (A == 0) is the same as (A u< 1).
1533 if (Pred == ICmpInst::ICMP_EQ && match(B, m_ZeroInt())) {
1534 B = ConstantInt::get(B->getType(), 1);
1535 Pred = ICmpInst::ICMP_ULT;
1536 }
1537 // Convert special-case: (A != 0) is the same as (0 u< A).
1538 if (Pred == ICmpInst::ICMP_NE && match(B, m_ZeroInt())) {
1539 std::swap(A, B);
1540 Pred = ICmpInst::ICMP_ULT;
1541 }
1542 if (Pred != ICmpInst::ICMP_ULT)
1543 return false;
1544
1545 // Walk the users of a variable operand of a compare looking for a subtract or
1546 // add with that same operand. Also match the 2nd operand of the compare to
1547 // the add/sub, but that may be a negated constant operand of an add.
1548 Value *CmpVariableOperand = isa<Constant>(A) ? B : A;
1549 BinaryOperator *Sub = nullptr;
1550 for (User *U : CmpVariableOperand->users()) {
1551 // A - B, A u< B --> usubo(A, B)
1552 if (match(U, m_Sub(m_Specific(A), m_Specific(B)))) {
1553 Sub = cast<BinaryOperator>(U);
1554 break;
1555 }
1556
1557 // A + (-C), A u< C (canonicalized form of (sub A, C))
1558 const APInt *CmpC, *AddC;
1559 if (match(U, m_Add(m_Specific(A), m_APInt(AddC))) &&
1560 match(B, m_APInt(CmpC)) && *AddC == -(*CmpC)) {
1561 Sub = cast<BinaryOperator>(U);
1562 break;
1563 }
1564 }
1565 if (!Sub)
1566 return false;
1567
1568 if (!TLI->shouldFormOverflowOp(ISD::USUBO,
1569 TLI->getValueType(*DL, Sub->getType()),
1570 Sub->hasNUsesOrMore(2)))
1571 return false;
1572
1573 if (!replaceMathCmpWithIntrinsic(Sub, Sub->getOperand(0), Sub->getOperand(1),
1574 Cmp, Intrinsic::usub_with_overflow))
1575 return false;
1576
1577 // Reset callers - do not crash by iterating over a dead instruction.
1578 ModifiedDT = true;
1579 return true;
1580}
1581
1582/// Sink the given CmpInst into user blocks to reduce the number of virtual
1583/// registers that must be created and coalesced. This is a clear win except on
1584/// targets with multiple condition code registers (PowerPC), where it might
1585/// lose; some adjustment may be wanted there.
1586///
1587/// Return true if any changes are made.
1588static bool sinkCmpExpression(CmpInst *Cmp, const TargetLowering &TLI) {
1589 if (TLI.hasMultipleConditionRegisters())
1590 return false;
1591
1592 // Avoid sinking soft-FP comparisons, since this can move them into a loop.
1593 if (TLI.useSoftFloat() && isa<FCmpInst>(Cmp))
1594 return false;
1595
1596 // Only insert a cmp in each block once.
1597 DenseMap<BasicBlock*, CmpInst*> InsertedCmps;
1598
1599 bool MadeChange = false;
1600 for (Value::user_iterator UI = Cmp->user_begin(), E = Cmp->user_end();
1601 UI != E; ) {
1602 Use &TheUse = UI.getUse();
1603 Instruction *User = cast<Instruction>(*UI);
1604
1605 // Preincrement use iterator so we don't invalidate it.
1606 ++UI;
1607
1608 // Don't bother for PHI nodes.
1609 if (isa<PHINode>(User))
1610 continue;
1611
1612 // Figure out which BB this cmp is used in.
1613 BasicBlock *UserBB = User->getParent();
1614 BasicBlock *DefBB = Cmp->getParent();
1615
1616 // If this user is in the same block as the cmp, don't change the cmp.
1617 if (UserBB == DefBB) continue;
1618
1619 // If we have already inserted a cmp into this block, use it.
1620 CmpInst *&InsertedCmp = InsertedCmps[UserBB];
1621
1622 if (!InsertedCmp) {
1623 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
1624 assert(InsertPt != UserBB->end());
1625 InsertedCmp =
1626 CmpInst::Create(Cmp->getOpcode(), Cmp->getPredicate(),
1627 Cmp->getOperand(0), Cmp->getOperand(1), "",
1628 &*InsertPt);
1629 // Propagate the debug info.
1630 InsertedCmp->setDebugLoc(Cmp->getDebugLoc());
1631 }
1632
1633 // Replace a use of the cmp with a use of the new cmp.
1634 TheUse = InsertedCmp;
1635 MadeChange = true;
1636 ++NumCmpUses;
1637 }
1638
1639 // If we removed all uses, nuke the cmp.
1640 if (Cmp->use_empty()) {
1641 Cmp->eraseFromParent();
1642 MadeChange = true;
1643 }
1644
1645 return MadeChange;
1646}
1647
1648/// For pattern like:
1649///
1650/// DomCond = icmp sgt/slt CmpOp0, CmpOp1 (might not be in DomBB)
1651/// ...
1652/// DomBB:
1653/// ...
1654/// br DomCond, TrueBB, CmpBB
1655/// CmpBB: (with DomBB being the single predecessor)
1656/// ...
1657/// Cmp = icmp eq CmpOp0, CmpOp1
1658/// ...
1659///
1660/// It would use two comparison on targets that lowering of icmp sgt/slt is
1661/// different from lowering of icmp eq (PowerPC). This function try to convert
1662/// 'Cmp = icmp eq CmpOp0, CmpOp1' to ' Cmp = icmp slt/sgt CmpOp0, CmpOp1'.
1663/// After that, DomCond and Cmp can use the same comparison so reduce one
1664/// comparison.
1665///
1666/// Return true if any changes are made.
1667static bool foldICmpWithDominatingICmp(CmpInst *Cmp,
1668 const TargetLowering &TLI) {
1669 if (!EnableICMP_EQToICMP_ST && TLI.isEqualityCmpFoldedWithSignedCmp())
1670 return false;
1671
1672 ICmpInst::Predicate Pred = Cmp->getPredicate();
1673 if (Pred != ICmpInst::ICMP_EQ)
1674 return false;
1675
1676 // If icmp eq has users other than BranchInst and SelectInst, converting it to
1677 // icmp slt/sgt would introduce more redundant LLVM IR.
1678 for (User *U : Cmp->users()) {
1679 if (isa<BranchInst>(U))
1680 continue;
1681 if (isa<SelectInst>(U) && cast<SelectInst>(U)->getCondition() == Cmp)
1682 continue;
1683 return false;
1684 }
1685
1686 // This is a cheap/incomplete check for dominance - just match a single
1687 // predecessor with a conditional branch.
1688 BasicBlock *CmpBB = Cmp->getParent();
1689 BasicBlock *DomBB = CmpBB->getSinglePredecessor();
1690 if (!DomBB)
1691 return false;
1692
1693 // We want to ensure that the only way control gets to the comparison of
1694 // interest is that a less/greater than comparison on the same operands is
1695 // false.
1696 Value *DomCond;
1697 BasicBlock *TrueBB, *FalseBB;
1698 if (!match(DomBB->getTerminator(), m_Br(m_Value(DomCond), TrueBB, FalseBB)))
1699 return false;
1700 if (CmpBB != FalseBB)
1701 return false;
1702
1703 Value *CmpOp0 = Cmp->getOperand(0), *CmpOp1 = Cmp->getOperand(1);
1704 ICmpInst::Predicate DomPred;
1705 if (!match(DomCond, m_ICmp(DomPred, m_Specific(CmpOp0), m_Specific(CmpOp1))))
1706 return false;
1707 if (DomPred != ICmpInst::ICMP_SGT && DomPred != ICmpInst::ICMP_SLT)
1708 return false;
1709
1710 // Convert the equality comparison to the opposite of the dominating
1711 // comparison and swap the direction for all branch/select users.
1712 // We have conceptually converted:
1713 // Res = (a < b) ? <LT_RES> : (a == b) ? <EQ_RES> : <GT_RES>;
1714 // to
1715 // Res = (a < b) ? <LT_RES> : (a > b) ? <GT_RES> : <EQ_RES>;
1716 // And similarly for branches.
1717 for (User *U : Cmp->users()) {
1718 if (auto *BI = dyn_cast<BranchInst>(U)) {
1719 assert(BI->isConditional() && "Must be conditional");
1720 BI->swapSuccessors();
1721 continue;
1722 }
1723 if (auto *SI = dyn_cast<SelectInst>(U)) {
1724 // Swap operands
1725 SI->swapValues();
1726 SI->swapProfMetadata();
1727 continue;
1728 }
1729 llvm_unreachable("Must be a branch or a select");
1730 }
1731 Cmp->setPredicate(CmpInst::getSwappedPredicate(DomPred));
1732 return true;
1733}
1734
1735bool CodeGenPrepare::optimizeCmp(CmpInst *Cmp, bool &ModifiedDT) {
1736 if (sinkCmpExpression(Cmp, *TLI))
1737 return true;
1738
1739 if (combineToUAddWithOverflow(Cmp, ModifiedDT))
1740 return true;
1741
1742 if (combineToUSubWithOverflow(Cmp, ModifiedDT))
1743 return true;
1744
1745 if (foldICmpWithDominatingICmp(Cmp, *TLI))
1746 return true;
1747
1748 return false;
1749}
1750
1751/// Duplicate and sink the given 'and' instruction into user blocks where it is
1752/// used in a compare to allow isel to generate better code for targets where
1753/// this operation can be combined.
1754///
1755/// Return true if any changes are made.
1756static bool sinkAndCmp0Expression(Instruction *AndI,
1757 const TargetLowering &TLI,
1758 SetOfInstrs &InsertedInsts) {
1759 // Double-check that we're not trying to optimize an instruction that was
1760 // already optimized by some other part of this pass.
1761 assert(!InsertedInsts.count(AndI) &&
1762 "Attempting to optimize already optimized and instruction");
1763 (void) InsertedInsts;
1764
1765 // Nothing to do for single use in same basic block.
1766 if (AndI->hasOneUse() &&
1767 AndI->getParent() == cast<Instruction>(*AndI->user_begin())->getParent())
1768 return false;
1769
1770 // Try to avoid cases where sinking/duplicating is likely to increase register
1771 // pressure.
1772 if (!isa<ConstantInt>(AndI->getOperand(0)) &&
1773 !isa<ConstantInt>(AndI->getOperand(1)) &&
1774 AndI->getOperand(0)->hasOneUse() && AndI->getOperand(1)->hasOneUse())
1775 return false;
1776
1777 for (auto *U : AndI->users()) {
1778 Instruction *User = cast<Instruction>(U);
1779
1780 // Only sink 'and' feeding icmp with 0.
1781 if (!isa<ICmpInst>(User))
1782 return false;
1783
1784 auto *CmpC = dyn_cast<ConstantInt>(User->getOperand(1));
1785 if (!CmpC || !CmpC->isZero())
1786 return false;
1787 }
1788
1789 if (!TLI.isMaskAndCmp0FoldingBeneficial(*AndI))
1790 return false;
1791
1792 LLVM_DEBUG(dbgs() << "found 'and' feeding only icmp 0;\n");
1793 LLVM_DEBUG(AndI->getParent()->dump());
1794
1795 // Push the 'and' into the same block as the icmp 0. There should only be
1796 // one (icmp (and, 0)) in each block, since CSE/GVN should have removed any
1797 // others, so we don't need to keep track of which BBs we insert into.
1798 for (Value::user_iterator UI = AndI->user_begin(), E = AndI->user_end();
1799 UI != E; ) {
1800 Use &TheUse = UI.getUse();
1801 Instruction *User = cast<Instruction>(*UI);
1802
1803 // Preincrement use iterator so we don't invalidate it.
1804 ++UI;
1805
1806 LLVM_DEBUG(dbgs() << "sinking 'and' use: " << *User << "\n");
1807
1808 // Keep the 'and' in the same place if the use is already in the same block.
1809 Instruction *InsertPt =
1810 User->getParent() == AndI->getParent() ? AndI : User;
1811 Instruction *InsertedAnd =
1812 BinaryOperator::Create(Instruction::And, AndI->getOperand(0),
1813 AndI->getOperand(1), "", InsertPt);
1814 // Propagate the debug info.
1815 InsertedAnd->setDebugLoc(AndI->getDebugLoc());
1816
1817 // Replace a use of the 'and' with a use of the new 'and'.
1818 TheUse = InsertedAnd;
1819 ++NumAndUses;
1820 LLVM_DEBUG(User->getParent()->dump());
1821 }
1822
1823 // We removed all uses, nuke the and.
1824 AndI->eraseFromParent();
1825 return true;
1826}
1827
1828/// Check if the candidates could be combined with a shift instruction, which
1829/// includes:
1830/// 1. Truncate instruction
1831/// 2. And instruction and the imm is a mask of the low bits:
1832/// imm & (imm+1) == 0
1833static bool isExtractBitsCandidateUse(Instruction *User) {
1834 if (!isa<TruncInst>(User)) {
1835 if (User->getOpcode() != Instruction::And ||
1836 !isa<ConstantInt>(User->getOperand(1)))
1837 return false;
1838
1839 const APInt &Cimm = cast<ConstantInt>(User->getOperand(1))->getValue();
1840
1841 if ((Cimm & (Cimm + 1)).getBoolValue())
1842 return false;
1843 }
1844 return true;
1845}
1846
1847/// Sink both shift and truncate instruction to the use of truncate's BB.
1848static bool
1849SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI,
1850 DenseMap<BasicBlock *, BinaryOperator *> &InsertedShifts,
1851 const TargetLowering &TLI, const DataLayout &DL) {
1852 BasicBlock *UserBB = User->getParent();
1853 DenseMap<BasicBlock *, CastInst *> InsertedTruncs;
1854 auto *TruncI = cast<TruncInst>(User);
1855 bool MadeChange = false;
1856
1857 for (Value::user_iterator TruncUI = TruncI->user_begin(),
1858 TruncE = TruncI->user_end();
1859 TruncUI != TruncE;) {
1860
1861 Use &TruncTheUse = TruncUI.getUse();
1862 Instruction *TruncUser = cast<Instruction>(*TruncUI);
1863 // Preincrement use iterator so we don't invalidate it.
1864
1865 ++TruncUI;
1866
1867 int ISDOpcode = TLI.InstructionOpcodeToISD(TruncUser->getOpcode());
1868 if (!ISDOpcode)
1869 continue;
1870
1871 // If the use is actually a legal node, there will not be an
1872 // implicit truncate.
1873 // FIXME: always querying the result type is just an
1874 // approximation; some nodes' legality is determined by the
1875 // operand or other means. There's no good way to find out though.
1876 if (TLI.isOperationLegalOrCustom(
1877 ISDOpcode, TLI.getValueType(DL, TruncUser->getType(), true)))
1878 continue;
1879
1880 // Don't bother for PHI nodes.
1881 if (isa<PHINode>(TruncUser))
1882 continue;
1883
1884 BasicBlock *TruncUserBB = TruncUser->getParent();
1885
1886 if (UserBB == TruncUserBB)
1887 continue;
1888
1889 BinaryOperator *&InsertedShift = InsertedShifts[TruncUserBB];
1890 CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB];
1891
1892 if (!InsertedShift && !InsertedTrunc) {
1893 BasicBlock::iterator InsertPt = TruncUserBB->getFirstInsertionPt();
1894 assert(InsertPt != TruncUserBB->end());
1895 // Sink the shift
1896 if (ShiftI->getOpcode() == Instruction::AShr)
1897 InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI,
1898 "", &*InsertPt);
1899 else
1900 InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI,
1901 "", &*InsertPt);
1902 InsertedShift->setDebugLoc(ShiftI->getDebugLoc());
1903
1904 // Sink the trunc
1905 BasicBlock::iterator TruncInsertPt = TruncUserBB->getFirstInsertionPt();
1906 TruncInsertPt++;
1907 assert(TruncInsertPt != TruncUserBB->end());
1908
1909 InsertedTrunc = CastInst::Create(TruncI->getOpcode(), InsertedShift,
1910 TruncI->getType(), "", &*TruncInsertPt);
1911 InsertedTrunc->setDebugLoc(TruncI->getDebugLoc());
1912
1913 MadeChange = true;
1914
1915 TruncTheUse = InsertedTrunc;
1916 }
1917 }
1918 return MadeChange;
1919}
1920
1921/// Sink the shift *right* instruction into user blocks if the uses could
1922/// potentially be combined with this shift instruction and generate BitExtract
1923/// instruction. It will only be applied if the architecture supports BitExtract
1924/// instruction. Here is an example:
1925/// BB1:
1926/// %x.extract.shift = lshr i64 %arg1, 32
1927/// BB2:
1928/// %x.extract.trunc = trunc i64 %x.extract.shift to i16
1929/// ==>
1930///
1931/// BB2:
1932/// %x.extract.shift.1 = lshr i64 %arg1, 32
1933/// %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16
1934///
1935/// CodeGen will recognize the pattern in BB2 and generate BitExtract
1936/// instruction.
1937/// Return true if any changes are made.
1938static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI,
1939 const TargetLowering &TLI,
1940 const DataLayout &DL) {
1941 BasicBlock *DefBB = ShiftI->getParent();
1942
1943 /// Only insert instructions in each block once.
1944 DenseMap<BasicBlock *, BinaryOperator *> InsertedShifts;
1945
1946 bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(DL, ShiftI->getType()));
1947
1948 bool MadeChange = false;
1949 for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end();
1950 UI != E;) {
1951 Use &TheUse = UI.getUse();
1952 Instruction *User = cast<Instruction>(*UI);
1953 // Preincrement use iterator so we don't invalidate it.
1954 ++UI;
1955
1956 // Don't bother for PHI nodes.
1957 if (isa<PHINode>(User))
1958 continue;
1959
1960 if (!isExtractBitsCandidateUse(User))
1961 continue;
1962
1963 BasicBlock *UserBB = User->getParent();
1964
1965 if (UserBB == DefBB) {
1966 // If the shift and truncate instruction are in the same BB. The use of
1967 // the truncate(TruncUse) may still introduce another truncate if not
1968 // legal. In this case, we would like to sink both shift and truncate
1969 // instruction to the BB of TruncUse.
1970 // for example:
1971 // BB1:
1972 // i64 shift.result = lshr i64 opnd, imm
1973 // trunc.result = trunc shift.result to i16
1974 //
1975 // BB2:
1976 // ----> We will have an implicit truncate here if the architecture does
1977 // not have i16 compare.
1978 // cmp i16 trunc.result, opnd2
1979 //
1980 if (isa<TruncInst>(User) && shiftIsLegal
1981 // If the type of the truncate is legal, no truncate will be
1982 // introduced in other basic blocks.
1983 &&
1984 (!TLI.isTypeLegal(TLI.getValueType(DL, User->getType()))))
1985 MadeChange =
1986 SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI, DL);
1987
1988 continue;
1989 }
1990 // If we have already inserted a shift into this block, use it.
1991 BinaryOperator *&InsertedShift = InsertedShifts[UserBB];
1992
1993 if (!InsertedShift) {
1994 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
1995 assert(InsertPt != UserBB->end());
1996
1997 if (ShiftI->getOpcode() == Instruction::AShr)
1998 InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI,
1999 "", &*InsertPt);
2000 else
2001 InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI,
2002 "", &*InsertPt);
2003 InsertedShift->setDebugLoc(ShiftI->getDebugLoc());
2004
2005 MadeChange = true;
2006 }
2007
2008 // Replace a use of the shift with a use of the new shift.
2009 TheUse = InsertedShift;
2010 }
2011
2012 // If we removed all uses, or there are none, nuke the shift.
2013 if (ShiftI->use_empty()) {
2014 salvageDebugInfo(*ShiftI);
2015 ShiftI->eraseFromParent();
2016 MadeChange = true;
2017 }
2018
2019 return MadeChange;
2020}
2021
2022/// If counting leading or trailing zeros is an expensive operation and a zero
2023/// input is defined, add a check for zero to avoid calling the intrinsic.
2024///
2025/// We want to transform:
2026/// %z = call i64 @llvm.cttz.i64(i64 %A, i1 false)
2027///
2028/// into:
2029/// entry:
2030/// %cmpz = icmp eq i64 %A, 0
2031/// br i1 %cmpz, label %cond.end, label %cond.false
2032/// cond.false:
2033/// %z = call i64 @llvm.cttz.i64(i64 %A, i1 true)
2034/// br label %cond.end
2035/// cond.end:
2036/// %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ]
2037///
2038/// If the transform is performed, return true and set ModifiedDT to true.
2039static bool despeculateCountZeros(IntrinsicInst *CountZeros,
2040 const TargetLowering *TLI,
2041 const DataLayout *DL,
2042 bool &ModifiedDT) {
2043 // If a zero input is undefined, it doesn't make sense to despeculate that.
2044 if (match(CountZeros->getOperand(1), m_One()))
2045 return false;
2046
2047 // If it's cheap to speculate, there's nothing to do.
2048 auto IntrinsicID = CountZeros->getIntrinsicID();
2049 if ((IntrinsicID == Intrinsic::cttz && TLI->isCheapToSpeculateCttz()) ||
2050 (IntrinsicID == Intrinsic::ctlz && TLI->isCheapToSpeculateCtlz()))
2051 return false;
2052
2053 // Only handle legal scalar cases. Anything else requires too much work.
2054 Type *Ty = CountZeros->getType();
2055 unsigned SizeInBits = Ty->getScalarSizeInBits();
2056 if (Ty->isVectorTy() || SizeInBits > DL->getLargestLegalIntTypeSizeInBits())
2057 return false;
2058
2059 // Bail if the value is never zero.
2060 Use &Op = CountZeros->getOperandUse(0);
2061 if (isKnownNonZero(Op, *DL))
2062 return false;
2063
2064 // The intrinsic will be sunk behind a compare against zero and branch.
2065 BasicBlock *StartBlock = CountZeros->getParent();
2066 BasicBlock *CallBlock = StartBlock->splitBasicBlock(CountZeros, "cond.false");
2067
2068 // Create another block after the count zero intrinsic. A PHI will be added
2069 // in this block to select the result of the intrinsic or the bit-width
2070 // constant if the input to the intrinsic is zero.
2071 BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(CountZeros));
2072 BasicBlock *EndBlock = CallBlock->splitBasicBlock(SplitPt, "cond.end");
2073
2074 // Set up a builder to create a compare, conditional branch, and PHI.
2075 IRBuilder<> Builder(CountZeros->getContext());
2076 Builder.SetInsertPoint(StartBlock->getTerminator());
2077 Builder.SetCurrentDebugLocation(CountZeros->getDebugLoc());
2078
2079 // Replace the unconditional branch that was created by the first split with
2080 // a compare against zero and a conditional branch.
2081 Value *Zero = Constant::getNullValue(Ty);
2082 // Avoid introducing branch on poison. This also replaces the ctz operand.
2083 if (!isGuaranteedNotToBeUndefOrPoison(Op))
2084 Op = Builder.CreateFreeze(Op, Op->getName() + ".fr");
2085 Value *Cmp = Builder.CreateICmpEQ(Op, Zero, "cmpz");
2086 Builder.CreateCondBr(Cmp, EndBlock, CallBlock);
2087 StartBlock->getTerminator()->eraseFromParent();
2088
2089 // Create a PHI in the end block to select either the output of the intrinsic
2090 // or the bit width of the operand.
2091 Builder.SetInsertPoint(&EndBlock->front());
2092 PHINode *PN = Builder.CreatePHI(Ty, 2, "ctz");
2093 CountZeros->replaceAllUsesWith(PN);
2094 Value *BitWidth = Builder.getInt(APInt(SizeInBits, SizeInBits));
2095 PN->addIncoming(BitWidth, StartBlock);
2096 PN->addIncoming(CountZeros, CallBlock);
2097
2098 // We are explicitly handling the zero case, so we can set the intrinsic's
2099 // undefined zero argument to 'true'. This will also prevent reprocessing the
2100 // intrinsic; we only despeculate when a zero input is defined.
2101 CountZeros->setArgOperand(1, Builder.getTrue());
2102 ModifiedDT = true;
2103 return true;
2104}
2105
2106bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool &ModifiedDT) {
2107 BasicBlock *BB = CI->getParent();
2108
2109 // Lower inline assembly if we can.
2110 // If we found an inline asm expession, and if the target knows how to
2111 // lower it to normal LLVM code, do so now.
2112 if (CI->isInlineAsm()) {
2113 if (TLI->ExpandInlineAsm(CI)) {
2114 // Avoid invalidating the iterator.
2115 CurInstIterator = BB->begin();
2116 // Avoid processing instructions out of order, which could cause
2117 // reuse before a value is defined.
2118 SunkAddrs.clear();
2119 return true;
2120 }
2121 // Sink address computing for memory operands into the block.
2122 if (optimizeInlineAsmInst(CI))
2123 return true;
2124 }
2125
2126 // Align the pointer arguments to this call if the target thinks it's a good
2127 // idea
2128 unsigned MinSize;
2129 Align PrefAlign;
2130 if (TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) {
2131 for (auto &Arg : CI->args()) {
2132 // We want to align both objects whose address is used directly and
2133 // objects whose address is used in casts and GEPs, though it only makes
2134 // sense for GEPs if the offset is a multiple of the desired alignment and
2135 // if size - offset meets the size threshold.
2136 if (!Arg->getType()->isPointerTy())
2137 continue;
2138 APInt Offset(DL->getIndexSizeInBits(
2139 cast<PointerType>(Arg->getType())->getAddressSpace()),
2140 0);
2141 Value *Val = Arg->stripAndAccumulateInBoundsConstantOffsets(*DL, Offset);
2142 uint64_t Offset2 = Offset.getLimitedValue();
2143 if (!isAligned(PrefAlign, Offset2))
2144 continue;
2145 AllocaInst *AI;
2146 if ((AI = dyn_cast<AllocaInst>(Val)) && AI->getAlign() < PrefAlign &&
2147 DL->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2)
2148 AI->setAlignment(PrefAlign);
2149 // Global variables can only be aligned if they are defined in this
2150 // object (i.e. they are uniquely initialized in this object), and
2151 // over-aligning global variables that have an explicit section is
2152 // forbidden.
2153 GlobalVariable *GV;
2154 if ((GV = dyn_cast<GlobalVariable>(Val)) && GV->canIncreaseAlignment() &&
2155 GV->getPointerAlignment(*DL) < PrefAlign &&
2156 DL->getTypeAllocSize(GV->getValueType()) >=
2157 MinSize + Offset2)
2158 GV->setAlignment(PrefAlign);
2159 }
2160 // If this is a memcpy (or similar) then we may be able to improve the
2161 // alignment
2162 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(CI)) {
2163 Align DestAlign = getKnownAlignment(MI->getDest(), *DL);
2164 MaybeAlign MIDestAlign = MI->getDestAlign();
2165 if (!MIDestAlign || DestAlign > *MIDestAlign)
2166 MI->setDestAlignment(DestAlign);
2167 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
2168 MaybeAlign MTISrcAlign = MTI->getSourceAlign();
2169 Align SrcAlign = getKnownAlignment(MTI->getSource(), *DL);
2170 if (!MTISrcAlign || SrcAlign > *MTISrcAlign)
2171 MTI->setSourceAlignment(SrcAlign);
2172 }
2173 }
2174 }
2175
2176 // If we have a cold call site, try to sink addressing computation into the
2177 // cold block. This interacts with our handling for loads and stores to
2178 // ensure that we can fold all uses of a potential addressing computation
2179 // into their uses. TODO: generalize this to work over profiling data
2180 if (CI->hasFnAttr(Attribute::Cold) &&
2181 !OptSize && !llvm::shouldOptimizeForSize(BB, PSI, BFI.get()))
2182 for (auto &Arg : CI->args()) {
2183 if (!Arg->getType()->isPointerTy())
2184 continue;
2185 unsigned AS = Arg->getType()->getPointerAddressSpace();
2186 return optimizeMemoryInst(CI, Arg, Arg->getType(), AS);
2187 }
2188
2189 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
2190 if (II) {
2191 switch (II->getIntrinsicID()) {
2192 default: break;
2193 case Intrinsic::assume:
2194 llvm_unreachable("llvm.assume should have been removed already");
2195 case Intrinsic::experimental_widenable_condition: {
2196 // Give up on future widening oppurtunties so that we can fold away dead
2197 // paths and merge blocks before going into block-local instruction
2198 // selection.
2199 if (II->use_empty()) {
2200 II->eraseFromParent();
2201 return true;
2202 }
2203 Constant *RetVal = ConstantInt::getTrue(II->getContext());
2204 resetIteratorIfInvalidatedWhileCalling(BB, [&]() {
2205 replaceAndRecursivelySimplify(CI, RetVal, TLInfo, nullptr);
2206 });
2207 return true;
2208 }
2209 case Intrinsic::objectsize:
2210 llvm_unreachable("llvm.objectsize.* should have been lowered already");
2211 case Intrinsic::is_constant:
2212 llvm_unreachable("llvm.is.constant.* should have been lowered already");
2213 case Intrinsic::aarch64_stlxr:
2214 case Intrinsic::aarch64_stxr: {
2215 ZExtInst *ExtVal = dyn_cast<ZExtInst>(CI->getArgOperand(0));
2216 if (!ExtVal || !ExtVal->hasOneUse() ||
2217 ExtVal->getParent() == CI->getParent())
2218 return false;
2219 // Sink a zext feeding stlxr/stxr before it, so it can be folded into it.
2220 ExtVal->moveBefore(CI);
2221 // Mark this instruction as "inserted by CGP", so that other
2222 // optimizations don't touch it.
2223 InsertedInsts.insert(ExtVal);
2224 return true;
2225 }
2226
2227 case Intrinsic::launder_invariant_group:
2228 case Intrinsic::strip_invariant_group: {
2229 Value *ArgVal = II->getArgOperand(0);
2230 auto it = LargeOffsetGEPMap.find(II);
2231 if (it != LargeOffsetGEPMap.end()) {
2232 // Merge entries in LargeOffsetGEPMap to reflect the RAUW.
2233 // Make sure not to have to deal with iterator invalidation
2234 // after possibly adding ArgVal to LargeOffsetGEPMap.
2235 auto GEPs = std::move(it->second);
2236 LargeOffsetGEPMap[ArgVal].append(GEPs.begin(), GEPs.end());
2237 LargeOffsetGEPMap.erase(II);
2238 }
2239
2240 II->replaceAllUsesWith(ArgVal);
2241 II->eraseFromParent();
2242 return true;
2243 }
2244 case Intrinsic::cttz:
2245 case Intrinsic::ctlz:
2246 // If counting zeros is expensive, try to avoid it.
2247 return despeculateCountZeros(II, TLI, DL, ModifiedDT);
2248 case Intrinsic::fshl:
2249 case Intrinsic::fshr:
2250 return optimizeFunnelShift(II);
2251 case Intrinsic::dbg_value:
2252 return fixupDbgValue(II);
2253 case Intrinsic::vscale: {
2254 // If datalayout has no special restrictions on vector data layout,
2255 // replace `llvm.vscale` by an equivalent constant expression
2256 // to benefit from cheap constant propagation.
2257 Type *ScalableVectorTy =
2258 VectorType::get(Type::getInt8Ty(II->getContext()), 1, true);
2259 if (DL->getTypeAllocSize(ScalableVectorTy).getKnownMinSize() == 8) {
2260 auto *Null = Constant::getNullValue(ScalableVectorTy->getPointerTo());
2261 auto *One = ConstantInt::getSigned(II->getType(), 1);
2262 auto *CGep =
2263 ConstantExpr::getGetElementPtr(ScalableVectorTy, Null, One);
2264 II->replaceAllUsesWith(ConstantExpr::getPtrToInt(CGep, II->getType()));
2265 II->eraseFromParent();
2266 return true;
2267 }
2268 break;
2269 }
2270 case Intrinsic::masked_gather:
2271 return optimizeGatherScatterInst(II, II->getArgOperand(0));
2272 case Intrinsic::masked_scatter:
2273 return optimizeGatherScatterInst(II, II->getArgOperand(1));
2274 }
2275
2276 SmallVector<Value *, 2> PtrOps;
2277 Type *AccessTy;
2278 if (TLI->getAddrModeArguments(II, PtrOps, AccessTy))
2279 while (!PtrOps.empty()) {
2280 Value *PtrVal = PtrOps.pop_back_val();
2281 unsigned AS = PtrVal->getType()->getPointerAddressSpace();
2282 if (optimizeMemoryInst(II, PtrVal, AccessTy, AS))
2283 return true;
2284 }
2285 }
2286
2287 // From here on out we're working with named functions.
2288 if (!CI->getCalledFunction()) return false;
2289
2290 // Lower all default uses of _chk calls. This is very similar
2291 // to what InstCombineCalls does, but here we are only lowering calls
2292 // to fortified library functions (e.g. __memcpy_chk) that have the default
2293 // "don't know" as the objectsize. Anything else should be left alone.
2294 FortifiedLibCallSimplifier Simplifier(TLInfo, true);
2295 IRBuilder<> Builder(CI);
2296 if (Value *V = Simplifier.optimizeCall(CI, Builder)) {
2297 CI->replaceAllUsesWith(V);
2298 CI->eraseFromParent();
2299 return true;
2300 }
2301
2302 return false;
2303}
2304
2305/// Look for opportunities to duplicate return instructions to the predecessor
2306/// to enable tail call optimizations. The case it is currently looking for is:
2307/// @code
2308/// bb0:
2309/// %tmp0 = tail call i32 @f0()
2310/// br label %return
2311/// bb1:
2312/// %tmp1 = tail call i32 @f1()
2313/// br label %return
2314/// bb2:
2315/// %tmp2 = tail call i32 @f2()
2316/// br label %return
2317/// return:
2318/// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ]
2319/// ret i32 %retval
2320/// @endcode
2321///
2322/// =>
2323///
2324/// @code
2325/// bb0:
2326/// %tmp0 = tail call i32 @f0()
2327/// ret i32 %tmp0
2328/// bb1:
2329/// %tmp1 = tail call i32 @f1()
2330/// ret i32 %tmp1
2331/// bb2:
2332/// %tmp2 = tail call i32 @f2()
2333/// ret i32 %tmp2
2334/// @endcode
2335bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB, bool &ModifiedDT) {
2336 ReturnInst *RetI = dyn_cast<ReturnInst>(BB->getTerminator());
2337 if (!RetI)
2338 return false;
2339
2340 PHINode *PN = nullptr;
2341 ExtractValueInst *EVI = nullptr;
2342 BitCastInst *BCI = nullptr;
2343 Value *V = RetI->getReturnValue();
2344 if (V) {
2345 BCI = dyn_cast<BitCastInst>(V);
2346 if (BCI)
2347 V = BCI->getOperand(0);
2348
2349 EVI = dyn_cast<ExtractValueInst>(V);
2350 if (EVI) {
2351 V = EVI->getOperand(0);
2352 if (!llvm::all_of(EVI->indices(), [](unsigned idx) { return idx == 0; }))
2353 return false;
2354 }
2355
2356 PN = dyn_cast<PHINode>(V);
2357 if (!PN)
2358 return false;
2359 }
2360
2361 if (PN && PN->getParent() != BB)
2362 return false;
2363
2364 auto isLifetimeEndOrBitCastFor = [](const Instruction *Inst) {
2365 const BitCastInst *BC = dyn_cast<BitCastInst>(Inst);
2366 if (BC && BC->hasOneUse())
2367 Inst = BC->user_back();
2368
2369 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
2370 return II->getIntrinsicID() == Intrinsic::lifetime_end;
2371 return false;
2372 };
2373
2374 // Make sure there are no instructions between the first instruction
2375 // and return.
2376 const Instruction *BI = BB->getFirstNonPHI();
2377 // Skip over debug and the bitcast.
2378 while (isa<DbgInfoIntrinsic>(BI) || BI == BCI || BI == EVI ||
2379 isa<PseudoProbeInst>(BI) || isLifetimeEndOrBitCastFor(BI))
2380 BI = BI->getNextNode();
2381 if (BI != RetI)
2382 return false;
2383
2384 /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail
2385 /// call.
2386 const Function *F = BB->getParent();
2387 SmallVector<BasicBlock*, 4> TailCallBBs;
2388 if (PN) {
2389 for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) {
2390 // Look through bitcasts.
2391 Value *IncomingVal = PN->getIncomingValue(I)->stripPointerCasts();
2392 CallInst *CI = dyn_cast<CallInst>(IncomingVal);
2393 BasicBlock *PredBB = PN->getIncomingBlock(I);
2394 // Make sure the phi value is indeed produced by the tail call.
2395 if (CI && CI->hasOneUse() && CI->getParent() == PredBB &&
2396 TLI->mayBeEmittedAsTailCall(CI) &&
2397 attributesPermitTailCall(F, CI, RetI, *TLI))
2398 TailCallBBs.push_back(PredBB);
2399 }
2400 } else {
2401 SmallPtrSet<BasicBlock*, 4> VisitedBBs;
2402 for (BasicBlock *Pred : predecessors(BB)) {
2403 if (!VisitedBBs.insert(Pred).second)
2404 continue;
2405 if (Instruction *I = Pred->rbegin()->getPrevNonDebugInstruction(true)) {
2406 CallInst *CI = dyn_cast<CallInst>(I);
2407 if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI) &&
2408 attributesPermitTailCall(F, CI, RetI, *TLI))
2409 TailCallBBs.push_back(Pred);
2410 }
2411 }
2412 }
2413
2414 bool Changed = false;
2415 for (auto const &TailCallBB : TailCallBBs) {
2416 // Make sure the call instruction is followed by an unconditional branch to
2417 // the return block.
2418 BranchInst *BI = dyn_cast<BranchInst>(TailCallBB->getTerminator());
2419 if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB)
2420 continue;
2421
2422 // Duplicate the return into TailCallBB.
2423 (void)FoldReturnIntoUncondBranch(RetI, BB, TailCallBB);
2424 assert(!VerifyBFIUpdates ||
2425 BFI->getBlockFreq(BB) >= BFI->getBlockFreq(TailCallBB));
2426 BFI->setBlockFreq(
2427 BB,
2428 (BFI->getBlockFreq(BB) - BFI->getBlockFreq(TailCallBB)).getFrequency());
2429 ModifiedDT = Changed = true;
2430 ++NumRetsDup;
2431 }
2432
2433 // If we eliminated all predecessors of the block, delete the block now.
2434 if (Changed && !BB->hasAddressTaken() && pred_empty(BB))
2435 BB->eraseFromParent();
2436
2437 return Changed;
2438}
2439
2440//===----------------------------------------------------------------------===//
2441// Memory Optimization
2442//===----------------------------------------------------------------------===//
2443
2444namespace {
2445
2446/// This is an extended version of TargetLowering::AddrMode
2447/// which holds actual Value*'s for register values.
2448struct ExtAddrMode : public TargetLowering::AddrMode {
2449 Value *BaseReg = nullptr;
2450 Value *ScaledReg = nullptr;
2451 Value *OriginalValue = nullptr;
2452 bool InBounds = true;
2453
2454 enum FieldName {
2455 NoField = 0x00,
2456 BaseRegField = 0x01,
2457 BaseGVField = 0x02,
2458 BaseOffsField = 0x04,
2459 ScaledRegField = 0x08,
2460 ScaleField = 0x10,
2461 MultipleFields = 0xff
2462 };
2463
2464
2465 ExtAddrMode() = default;
2466
2467 void print(raw_ostream &OS) const;
2468 void dump() const;
2469
2470 FieldName compare(const ExtAddrMode &other) {
2471 // First check that the types are the same on each field, as differing types
2472 // is something we can't cope with later on.
2473 if (BaseReg && other.BaseReg &&
2474 BaseReg->getType() != other.BaseReg->getType())
2475 return MultipleFields;
2476 if (BaseGV && other.BaseGV &&
2477 BaseGV->getType() != other.BaseGV->getType())
2478 return MultipleFields;
2479 if (ScaledReg && other.ScaledReg &&
2480 ScaledReg->getType() != other.ScaledReg->getType())
2481 return MultipleFields;
2482
2483 // Conservatively reject 'inbounds' mismatches.
2484 if (InBounds != other.InBounds)
2485 return MultipleFields;
2486
2487 // Check each field to see if it differs.
2488 unsigned Result = NoField;
2489 if (BaseReg != other.BaseReg)
2490 Result |= BaseRegField;
2491 if (BaseGV != other.BaseGV)
2492 Result |= BaseGVField;
2493 if (BaseOffs != other.BaseOffs)
2494 Result |= BaseOffsField;
2495 if (ScaledReg != other.ScaledReg)
2496 Result |= ScaledRegField;
2497 // Don't count 0 as being a different scale, because that actually means
2498 // unscaled (which will already be counted by having no ScaledReg).
2499 if (Scale && other.Scale && Scale != other.Scale)
2500 Result |= ScaleField;
2501
2502 if (countPopulation(Result) > 1)
2503 return MultipleFields;
2504 else
2505 return static_cast<FieldName>(Result);
2506 }
2507
2508 // An AddrMode is trivial if it involves no calculation i.e. it is just a base
2509 // with no offset.
2510 bool isTrivial() {
2511 // An AddrMode is (BaseGV + BaseReg + BaseOffs + ScaleReg * Scale) so it is
2512 // trivial if at most one of these terms is nonzero, except that BaseGV and
2513 // BaseReg both being zero actually means a null pointer value, which we
2514 // consider to be 'non-zero' here.
2515 return !BaseOffs && !Scale && !(BaseGV && BaseReg);
2516 }
2517
2518 Value *GetFieldAsValue(FieldName Field, Type *IntPtrTy) {
2519 switch (Field) {
2520 default:
2521 return nullptr;
2522 case BaseRegField:
2523 return BaseReg;
2524 case BaseGVField:
2525 return BaseGV;
2526 case ScaledRegField:
2527 return ScaledReg;
2528 case BaseOffsField:
2529 return ConstantInt::get(IntPtrTy, BaseOffs);
2530 }
2531 }
2532
2533 void SetCombinedField(FieldName Field, Value *V,
2534 const SmallVectorImpl<ExtAddrMode> &AddrModes) {
2535 switch (Field) {
2536 default:
2537 llvm_unreachable("Unhandled fields are expected to be rejected earlier");
2538 break;
2539 case ExtAddrMode::BaseRegField:
2540 BaseReg = V;
2541 break;
2542 case ExtAddrMode::BaseGVField:
2543 // A combined BaseGV is an Instruction, not a GlobalValue, so it goes
2544 // in the BaseReg field.
2545 assert(BaseReg == nullptr);
2546 BaseReg = V;
2547 BaseGV = nullptr;
2548 break;
2549 case ExtAddrMode::ScaledRegField:
2550 ScaledReg = V;
2551 // If we have a mix of scaled and unscaled addrmodes then we want scale
2552 // to be the scale and not zero.
2553 if (!Scale)
2554 for (const ExtAddrMode &AM : AddrModes)
2555 if (AM.Scale) {
2556 Scale = AM.Scale;
2557 break;
2558 }
2559 break;
2560 case ExtAddrMode::BaseOffsField:
2561 // The offset is no longer a constant, so it goes in ScaledReg with a
2562 // scale of 1.
2563 assert(ScaledReg == nullptr);
2564 ScaledReg = V;
2565 Scale = 1;
2566 BaseOffs = 0;
2567 break;
2568 }
2569 }
2570};
2571
2572#ifndef NDEBUG
2573static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) {
2574 AM.print(OS);
2575 return OS;
2576}
2577#endif
2578