1//===- Local.cpp - Functions to perform local transformations -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This family of functions perform various local transformations to the
10// program.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/Transforms/Utils/Local.h"
15#include "llvm/ADT/APInt.h"
16#include "llvm/ADT/DenseMap.h"
17#include "llvm/ADT/DenseMapInfo.h"
18#include "llvm/ADT/DenseSet.h"
19#include "llvm/ADT/Hashing.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SetVector.h"
22#include "llvm/ADT/SmallPtrSet.h"
23#include "llvm/ADT/SmallVector.h"
24#include "llvm/ADT/Statistic.h"
25#include "llvm/Analysis/AssumeBundleQueries.h"
26#include "llvm/Analysis/ConstantFolding.h"
27#include "llvm/Analysis/DomTreeUpdater.h"
28#include "llvm/Analysis/InstructionSimplify.h"
29#include "llvm/Analysis/MemoryBuiltins.h"
30#include "llvm/Analysis/MemorySSAUpdater.h"
31#include "llvm/Analysis/TargetLibraryInfo.h"
32#include "llvm/Analysis/ValueTracking.h"
33#include "llvm/Analysis/VectorUtils.h"
34#include "llvm/BinaryFormat/Dwarf.h"
35#include "llvm/IR/Argument.h"
36#include "llvm/IR/Attributes.h"
37#include "llvm/IR/BasicBlock.h"
38#include "llvm/IR/CFG.h"
39#include "llvm/IR/Constant.h"
40#include "llvm/IR/ConstantRange.h"
41#include "llvm/IR/Constants.h"
42#include "llvm/IR/DIBuilder.h"
43#include "llvm/IR/DataLayout.h"
44#include "llvm/IR/DebugInfo.h"
45#include "llvm/IR/DebugInfoMetadata.h"
46#include "llvm/IR/DebugLoc.h"
47#include "llvm/IR/DerivedTypes.h"
48#include "llvm/IR/Dominators.h"
49#include "llvm/IR/EHPersonalities.h"
50#include "llvm/IR/Function.h"
51#include "llvm/IR/GetElementPtrTypeIterator.h"
52#include "llvm/IR/GlobalObject.h"
53#include "llvm/IR/IRBuilder.h"
54#include "llvm/IR/InstrTypes.h"
55#include "llvm/IR/Instruction.h"
56#include "llvm/IR/Instructions.h"
57#include "llvm/IR/IntrinsicInst.h"
58#include "llvm/IR/Intrinsics.h"
59#include "llvm/IR/IntrinsicsWebAssembly.h"
60#include "llvm/IR/LLVMContext.h"
61#include "llvm/IR/MDBuilder.h"
62#include "llvm/IR/MemoryModelRelaxationAnnotations.h"
63#include "llvm/IR/Metadata.h"
64#include "llvm/IR/Module.h"
65#include "llvm/IR/PatternMatch.h"
66#include "llvm/IR/ProfDataUtils.h"
67#include "llvm/IR/Type.h"
68#include "llvm/IR/Use.h"
69#include "llvm/IR/User.h"
70#include "llvm/IR/Value.h"
71#include "llvm/IR/ValueHandle.h"
72#include "llvm/Support/Casting.h"
73#include "llvm/Support/CommandLine.h"
74#include "llvm/Support/Debug.h"
75#include "llvm/Support/ErrorHandling.h"
76#include "llvm/Support/KnownBits.h"
77#include "llvm/Support/raw_ostream.h"
78#include "llvm/Transforms/Utils/BasicBlockUtils.h"
79#include "llvm/Transforms/Utils/ValueMapper.h"
80#include <algorithm>
81#include <cassert>
82#include <cstdint>
83#include <iterator>
84#include <map>
85#include <optional>
86#include <utility>
87
88using namespace llvm;
89using namespace llvm::PatternMatch;
90
91extern cl::opt<bool> UseNewDbgInfoFormat;
92
93#define DEBUG_TYPE "local"
94
95STATISTIC(NumRemoved, "Number of unreachable basic blocks removed");
96STATISTIC(NumPHICSEs, "Number of PHI's that got CSE'd");
97
98static cl::opt<bool> PHICSEDebugHash(
99 "phicse-debug-hash",
100#ifdef EXPENSIVE_CHECKS
101 cl::init(true),
102#else
103 cl::init(Val: false),
104#endif
105 cl::Hidden,
106 cl::desc("Perform extra assertion checking to verify that PHINodes's hash "
107 "function is well-behaved w.r.t. its isEqual predicate"));
108
109static cl::opt<unsigned> PHICSENumPHISmallSize(
110 "phicse-num-phi-smallsize", cl::init(Val: 32), cl::Hidden,
111 cl::desc(
112 "When the basic block contains not more than this number of PHI nodes, "
113 "perform a (faster!) exhaustive search instead of set-driven one."));
114
115// Max recursion depth for collectBitParts used when detecting bswap and
116// bitreverse idioms.
117static const unsigned BitPartRecursionMaxDepth = 48;
118
119//===----------------------------------------------------------------------===//
120// Local constant propagation.
121//
122
123/// ConstantFoldTerminator - If a terminator instruction is predicated on a
124/// constant value, convert it into an unconditional branch to the constant
125/// destination. This is a nontrivial operation because the successors of this
126/// basic block must have their PHI nodes updated.
127/// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
128/// conditions and indirectbr addresses this might make dead if
129/// DeleteDeadConditions is true.
130bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
131 const TargetLibraryInfo *TLI,
132 DomTreeUpdater *DTU) {
133 Instruction *T = BB->getTerminator();
134 IRBuilder<> Builder(T);
135
136 // Branch - See if we are conditional jumping on constant
137 if (auto *BI = dyn_cast<BranchInst>(Val: T)) {
138 if (BI->isUnconditional()) return false; // Can't optimize uncond branch
139
140 BasicBlock *Dest1 = BI->getSuccessor(i: 0);
141 BasicBlock *Dest2 = BI->getSuccessor(i: 1);
142
143 if (Dest2 == Dest1) { // Conditional branch to same location?
144 // This branch matches something like this:
145 // br bool %cond, label %Dest, label %Dest
146 // and changes it into: br label %Dest
147
148 // Let the basic block know that we are letting go of one copy of it.
149 assert(BI->getParent() && "Terminator not inserted in block!");
150 Dest1->removePredecessor(Pred: BI->getParent());
151
152 // Replace the conditional branch with an unconditional one.
153 BranchInst *NewBI = Builder.CreateBr(Dest: Dest1);
154
155 // Transfer the metadata to the new branch instruction.
156 NewBI->copyMetadata(SrcInst: *BI, WL: {LLVMContext::MD_loop, LLVMContext::MD_dbg,
157 LLVMContext::MD_annotation});
158
159 Value *Cond = BI->getCondition();
160 BI->eraseFromParent();
161 if (DeleteDeadConditions)
162 RecursivelyDeleteTriviallyDeadInstructions(V: Cond, TLI);
163 return true;
164 }
165
166 if (auto *Cond = dyn_cast<ConstantInt>(Val: BI->getCondition())) {
167 // Are we branching on constant?
168 // YES. Change to unconditional branch...
169 BasicBlock *Destination = Cond->getZExtValue() ? Dest1 : Dest2;
170 BasicBlock *OldDest = Cond->getZExtValue() ? Dest2 : Dest1;
171
172 // Let the basic block know that we are letting go of it. Based on this,
173 // it will adjust it's PHI nodes.
174 OldDest->removePredecessor(Pred: BB);
175
176 // Replace the conditional branch with an unconditional one.
177 BranchInst *NewBI = Builder.CreateBr(Dest: Destination);
178
179 // Transfer the metadata to the new branch instruction.
180 NewBI->copyMetadata(SrcInst: *BI, WL: {LLVMContext::MD_loop, LLVMContext::MD_dbg,
181 LLVMContext::MD_annotation});
182
183 BI->eraseFromParent();
184 if (DTU)
185 DTU->applyUpdates(Updates: {{DominatorTree::Delete, BB, OldDest}});
186 return true;
187 }
188
189 return false;
190 }
191
192 if (auto *SI = dyn_cast<SwitchInst>(Val: T)) {
193 // If we are switching on a constant, we can convert the switch to an
194 // unconditional branch.
195 auto *CI = dyn_cast<ConstantInt>(Val: SI->getCondition());
196 BasicBlock *DefaultDest = SI->getDefaultDest();
197 BasicBlock *TheOnlyDest = DefaultDest;
198
199 // If the default is unreachable, ignore it when searching for TheOnlyDest.
200 if (isa<UnreachableInst>(Val: DefaultDest->getFirstNonPHIOrDbg()) &&
201 SI->getNumCases() > 0) {
202 TheOnlyDest = SI->case_begin()->getCaseSuccessor();
203 }
204
205 bool Changed = false;
206
207 // Figure out which case it goes to.
208 for (auto It = SI->case_begin(), End = SI->case_end(); It != End;) {
209 // Found case matching a constant operand?
210 if (It->getCaseValue() == CI) {
211 TheOnlyDest = It->getCaseSuccessor();
212 break;
213 }
214
215 // Check to see if this branch is going to the same place as the default
216 // dest. If so, eliminate it as an explicit compare.
217 if (It->getCaseSuccessor() == DefaultDest) {
218 MDNode *MD = getValidBranchWeightMDNode(I: *SI);
219 unsigned NCases = SI->getNumCases();
220 // Fold the case metadata into the default if there will be any branches
221 // left, unless the metadata doesn't match the switch.
222 if (NCases > 1 && MD) {
223 // Collect branch weights into a vector.
224 SmallVector<uint32_t, 8> Weights;
225 extractBranchWeights(ProfileData: MD, Weights);
226
227 // Merge weight of this case to the default weight.
228 unsigned Idx = It->getCaseIndex();
229 // TODO: Add overflow check.
230 Weights[0] += Weights[Idx + 1];
231 // Remove weight for this case.
232 std::swap(a&: Weights[Idx + 1], b&: Weights.back());
233 Weights.pop_back();
234 setBranchWeights(I&: *SI, Weights);
235 }
236 // Remove this entry.
237 BasicBlock *ParentBB = SI->getParent();
238 DefaultDest->removePredecessor(Pred: ParentBB);
239 It = SI->removeCase(I: It);
240 End = SI->case_end();
241
242 // Removing this case may have made the condition constant. In that
243 // case, update CI and restart iteration through the cases.
244 if (auto *NewCI = dyn_cast<ConstantInt>(Val: SI->getCondition())) {
245 CI = NewCI;
246 It = SI->case_begin();
247 }
248
249 Changed = true;
250 continue;
251 }
252
253 // Otherwise, check to see if the switch only branches to one destination.
254 // We do this by reseting "TheOnlyDest" to null when we find two non-equal
255 // destinations.
256 if (It->getCaseSuccessor() != TheOnlyDest)
257 TheOnlyDest = nullptr;
258
259 // Increment this iterator as we haven't removed the case.
260 ++It;
261 }
262
263 if (CI && !TheOnlyDest) {
264 // Branching on a constant, but not any of the cases, go to the default
265 // successor.
266 TheOnlyDest = SI->getDefaultDest();
267 }
268
269 // If we found a single destination that we can fold the switch into, do so
270 // now.
271 if (TheOnlyDest) {
272 // Insert the new branch.
273 Builder.CreateBr(Dest: TheOnlyDest);
274 BasicBlock *BB = SI->getParent();
275
276 SmallSet<BasicBlock *, 8> RemovedSuccessors;
277
278 // Remove entries from PHI nodes which we no longer branch to...
279 BasicBlock *SuccToKeep = TheOnlyDest;
280 for (BasicBlock *Succ : successors(I: SI)) {
281 if (DTU && Succ != TheOnlyDest)
282 RemovedSuccessors.insert(Ptr: Succ);
283 // Found case matching a constant operand?
284 if (Succ == SuccToKeep) {
285 SuccToKeep = nullptr; // Don't modify the first branch to TheOnlyDest
286 } else {
287 Succ->removePredecessor(Pred: BB);
288 }
289 }
290
291 // Delete the old switch.
292 Value *Cond = SI->getCondition();
293 SI->eraseFromParent();
294 if (DeleteDeadConditions)
295 RecursivelyDeleteTriviallyDeadInstructions(V: Cond, TLI);
296 if (DTU) {
297 std::vector<DominatorTree::UpdateType> Updates;
298 Updates.reserve(n: RemovedSuccessors.size());
299 for (auto *RemovedSuccessor : RemovedSuccessors)
300 Updates.push_back(x: {DominatorTree::Delete, BB, RemovedSuccessor});
301 DTU->applyUpdates(Updates);
302 }
303 return true;
304 }
305
306 if (SI->getNumCases() == 1) {
307 // Otherwise, we can fold this switch into a conditional branch
308 // instruction if it has only one non-default destination.
309 auto FirstCase = *SI->case_begin();
310 Value *Cond = Builder.CreateICmpEQ(LHS: SI->getCondition(),
311 RHS: FirstCase.getCaseValue(), Name: "cond");
312
313 // Insert the new branch.
314 BranchInst *NewBr = Builder.CreateCondBr(Cond,
315 True: FirstCase.getCaseSuccessor(),
316 False: SI->getDefaultDest());
317 SmallVector<uint32_t> Weights;
318 if (extractBranchWeights(I: *SI, Weights) && Weights.size() == 2) {
319 uint32_t DefWeight = Weights[0];
320 uint32_t CaseWeight = Weights[1];
321 // The TrueWeight should be the weight for the single case of SI.
322 NewBr->setMetadata(KindID: LLVMContext::MD_prof,
323 Node: MDBuilder(BB->getContext())
324 .createBranchWeights(TrueWeight: CaseWeight, FalseWeight: DefWeight));
325 }
326
327 // Update make.implicit metadata to the newly-created conditional branch.
328 MDNode *MakeImplicitMD = SI->getMetadata(KindID: LLVMContext::MD_make_implicit);
329 if (MakeImplicitMD)
330 NewBr->setMetadata(KindID: LLVMContext::MD_make_implicit, Node: MakeImplicitMD);
331
332 // Delete the old switch.
333 SI->eraseFromParent();
334 return true;
335 }
336 return Changed;
337 }
338
339 if (auto *IBI = dyn_cast<IndirectBrInst>(Val: T)) {
340 // indirectbr blockaddress(@F, @BB) -> br label @BB
341 if (auto *BA =
342 dyn_cast<BlockAddress>(Val: IBI->getAddress()->stripPointerCasts())) {
343 BasicBlock *TheOnlyDest = BA->getBasicBlock();
344 SmallSet<BasicBlock *, 8> RemovedSuccessors;
345
346 // Insert the new branch.
347 Builder.CreateBr(Dest: TheOnlyDest);
348
349 BasicBlock *SuccToKeep = TheOnlyDest;
350 for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) {
351 BasicBlock *DestBB = IBI->getDestination(i);
352 if (DTU && DestBB != TheOnlyDest)
353 RemovedSuccessors.insert(Ptr: DestBB);
354 if (IBI->getDestination(i) == SuccToKeep) {
355 SuccToKeep = nullptr;
356 } else {
357 DestBB->removePredecessor(Pred: BB);
358 }
359 }
360 Value *Address = IBI->getAddress();
361 IBI->eraseFromParent();
362 if (DeleteDeadConditions)
363 // Delete pointer cast instructions.
364 RecursivelyDeleteTriviallyDeadInstructions(V: Address, TLI);
365
366 // Also zap the blockaddress constant if there are no users remaining,
367 // otherwise the destination is still marked as having its address taken.
368 if (BA->use_empty())
369 BA->destroyConstant();
370
371 // If we didn't find our destination in the IBI successor list, then we
372 // have undefined behavior. Replace the unconditional branch with an
373 // 'unreachable' instruction.
374 if (SuccToKeep) {
375 BB->getTerminator()->eraseFromParent();
376 new UnreachableInst(BB->getContext(), BB);
377 }
378
379 if (DTU) {
380 std::vector<DominatorTree::UpdateType> Updates;
381 Updates.reserve(n: RemovedSuccessors.size());
382 for (auto *RemovedSuccessor : RemovedSuccessors)
383 Updates.push_back(x: {DominatorTree::Delete, BB, RemovedSuccessor});
384 DTU->applyUpdates(Updates);
385 }
386 return true;
387 }
388 }
389
390 return false;
391}
392
393//===----------------------------------------------------------------------===//
394// Local dead code elimination.
395//
396
397/// isInstructionTriviallyDead - Return true if the result produced by the
398/// instruction is not used, and the instruction has no side effects.
399///
400bool llvm::isInstructionTriviallyDead(Instruction *I,
401 const TargetLibraryInfo *TLI) {
402 if (!I->use_empty())
403 return false;
404 return wouldInstructionBeTriviallyDead(I, TLI);
405}
406
407bool llvm::wouldInstructionBeTriviallyDeadOnUnusedPaths(
408 Instruction *I, const TargetLibraryInfo *TLI) {
409 // Instructions that are "markers" and have implied meaning on code around
410 // them (without explicit uses), are not dead on unused paths.
411 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Val: I))
412 if (II->getIntrinsicID() == Intrinsic::stacksave ||
413 II->getIntrinsicID() == Intrinsic::launder_invariant_group ||
414 II->isLifetimeStartOrEnd())
415 return false;
416 return wouldInstructionBeTriviallyDead(I, TLI);
417}
418
419bool llvm::wouldInstructionBeTriviallyDead(const Instruction *I,
420 const TargetLibraryInfo *TLI) {
421 if (I->isTerminator())
422 return false;
423
424 // We don't want the landingpad-like instructions removed by anything this
425 // general.
426 if (I->isEHPad())
427 return false;
428
429 // We don't want debug info removed by anything this general.
430 if (isa<DbgVariableIntrinsic>(Val: I))
431 return false;
432
433 if (const DbgLabelInst *DLI = dyn_cast<DbgLabelInst>(Val: I)) {
434 if (DLI->getLabel())
435 return false;
436 return true;
437 }
438
439 if (auto *CB = dyn_cast<CallBase>(Val: I))
440 if (isRemovableAlloc(V: CB, TLI))
441 return true;
442
443 if (!I->willReturn()) {
444 auto *II = dyn_cast<IntrinsicInst>(Val: I);
445 if (!II)
446 return false;
447
448 switch (II->getIntrinsicID()) {
449 case Intrinsic::experimental_guard: {
450 // Guards on true are operationally no-ops. In the future we can
451 // consider more sophisticated tradeoffs for guards considering potential
452 // for check widening, but for now we keep things simple.
453 auto *Cond = dyn_cast<ConstantInt>(Val: II->getArgOperand(i: 0));
454 return Cond && Cond->isOne();
455 }
456 // TODO: These intrinsics are not safe to remove, because this may remove
457 // a well-defined trap.
458 case Intrinsic::wasm_trunc_signed:
459 case Intrinsic::wasm_trunc_unsigned:
460 case Intrinsic::ptrauth_auth:
461 case Intrinsic::ptrauth_resign:
462 return true;
463 default:
464 return false;
465 }
466 }
467
468 if (!I->mayHaveSideEffects())
469 return true;
470
471 // Special case intrinsics that "may have side effects" but can be deleted
472 // when dead.
473 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Val: I)) {
474 // Safe to delete llvm.stacksave and launder.invariant.group if dead.
475 if (II->getIntrinsicID() == Intrinsic::stacksave ||
476 II->getIntrinsicID() == Intrinsic::launder_invariant_group)
477 return true;
478
479 // Intrinsics declare sideeffects to prevent them from moving, but they are
480 // nops without users.
481 if (II->getIntrinsicID() == Intrinsic::allow_runtime_check ||
482 II->getIntrinsicID() == Intrinsic::allow_ubsan_check)
483 return true;
484
485 if (II->isLifetimeStartOrEnd()) {
486 auto *Arg = II->getArgOperand(i: 1);
487 // Lifetime intrinsics are dead when their right-hand is undef.
488 if (isa<UndefValue>(Val: Arg))
489 return true;
490 // If the right-hand is an alloc, global, or argument and the only uses
491 // are lifetime intrinsics then the intrinsics are dead.
492 if (isa<AllocaInst>(Val: Arg) || isa<GlobalValue>(Val: Arg) || isa<Argument>(Val: Arg))
493 return llvm::all_of(Range: Arg->uses(), P: [](Use &Use) {
494 if (IntrinsicInst *IntrinsicUse =
495 dyn_cast<IntrinsicInst>(Val: Use.getUser()))
496 return IntrinsicUse->isLifetimeStartOrEnd();
497 return false;
498 });
499 return false;
500 }
501
502 // Assumptions are dead if their condition is trivially true.
503 if (II->getIntrinsicID() == Intrinsic::assume &&
504 isAssumeWithEmptyBundle(Assume: cast<AssumeInst>(Val: *II))) {
505 if (ConstantInt *Cond = dyn_cast<ConstantInt>(Val: II->getArgOperand(i: 0)))
506 return !Cond->isZero();
507
508 return false;
509 }
510
511 if (auto *FPI = dyn_cast<ConstrainedFPIntrinsic>(Val: I)) {
512 std::optional<fp::ExceptionBehavior> ExBehavior =
513 FPI->getExceptionBehavior();
514 return *ExBehavior != fp::ebStrict;
515 }
516 }
517
518 if (auto *Call = dyn_cast<CallBase>(Val: I)) {
519 if (Value *FreedOp = getFreedOperand(CB: Call, TLI))
520 if (Constant *C = dyn_cast<Constant>(Val: FreedOp))
521 return C->isNullValue() || isa<UndefValue>(Val: C);
522 if (isMathLibCallNoop(Call, TLI))
523 return true;
524 }
525
526 // Non-volatile atomic loads from constants can be removed.
527 if (auto *LI = dyn_cast<LoadInst>(Val: I))
528 if (auto *GV = dyn_cast<GlobalVariable>(
529 Val: LI->getPointerOperand()->stripPointerCasts()))
530 if (!LI->isVolatile() && GV->isConstant())
531 return true;
532
533 return false;
534}
535
536/// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a
537/// trivially dead instruction, delete it. If that makes any of its operands
538/// trivially dead, delete them too, recursively. Return true if any
539/// instructions were deleted.
540bool llvm::RecursivelyDeleteTriviallyDeadInstructions(
541 Value *V, const TargetLibraryInfo *TLI, MemorySSAUpdater *MSSAU,
542 std::function<void(Value *)> AboutToDeleteCallback) {
543 Instruction *I = dyn_cast<Instruction>(Val: V);
544 if (!I || !isInstructionTriviallyDead(I, TLI))
545 return false;
546
547 SmallVector<WeakTrackingVH, 16> DeadInsts;
548 DeadInsts.push_back(Elt: I);
549 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU,
550 AboutToDeleteCallback);
551
552 return true;
553}
554
555bool llvm::RecursivelyDeleteTriviallyDeadInstructionsPermissive(
556 SmallVectorImpl<WeakTrackingVH> &DeadInsts, const TargetLibraryInfo *TLI,
557 MemorySSAUpdater *MSSAU,
558 std::function<void(Value *)> AboutToDeleteCallback) {
559 unsigned S = 0, E = DeadInsts.size(), Alive = 0;
560 for (; S != E; ++S) {
561 auto *I = dyn_cast_or_null<Instruction>(Val&: DeadInsts[S]);
562 if (!I || !isInstructionTriviallyDead(I)) {
563 DeadInsts[S] = nullptr;
564 ++Alive;
565 }
566 }
567 if (Alive == E)
568 return false;
569 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU,
570 AboutToDeleteCallback);
571 return true;
572}
573
574void llvm::RecursivelyDeleteTriviallyDeadInstructions(
575 SmallVectorImpl<WeakTrackingVH> &DeadInsts, const TargetLibraryInfo *TLI,
576 MemorySSAUpdater *MSSAU,
577 std::function<void(Value *)> AboutToDeleteCallback) {
578 // Process the dead instruction list until empty.
579 while (!DeadInsts.empty()) {
580 Value *V = DeadInsts.pop_back_val();
581 Instruction *I = cast_or_null<Instruction>(Val: V);
582 if (!I)
583 continue;
584 assert(isInstructionTriviallyDead(I, TLI) &&
585 "Live instruction found in dead worklist!");
586 assert(I->use_empty() && "Instructions with uses are not dead.");
587
588 // Don't lose the debug info while deleting the instructions.
589 salvageDebugInfo(I&: *I);
590
591 if (AboutToDeleteCallback)
592 AboutToDeleteCallback(I);
593
594 // Null out all of the instruction's operands to see if any operand becomes
595 // dead as we go.
596 for (Use &OpU : I->operands()) {
597 Value *OpV = OpU.get();
598 OpU.set(nullptr);
599
600 if (!OpV->use_empty())
601 continue;
602
603 // If the operand is an instruction that became dead as we nulled out the
604 // operand, and if it is 'trivially' dead, delete it in a future loop
605 // iteration.
606 if (Instruction *OpI = dyn_cast<Instruction>(Val: OpV))
607 if (isInstructionTriviallyDead(I: OpI, TLI))
608 DeadInsts.push_back(Elt: OpI);
609 }
610 if (MSSAU)
611 MSSAU->removeMemoryAccess(I);
612
613 I->eraseFromParent();
614 }
615}
616
617bool llvm::replaceDbgUsesWithUndef(Instruction *I) {
618 SmallVector<DbgVariableIntrinsic *, 1> DbgUsers;
619 SmallVector<DbgVariableRecord *, 1> DPUsers;
620 findDbgUsers(DbgInsts&: DbgUsers, V: I, DbgVariableRecords: &DPUsers);
621 for (auto *DII : DbgUsers)
622 DII->setKillLocation();
623 for (auto *DVR : DPUsers)
624 DVR->setKillLocation();
625 return !DbgUsers.empty() || !DPUsers.empty();
626}
627
628/// areAllUsesEqual - Check whether the uses of a value are all the same.
629/// This is similar to Instruction::hasOneUse() except this will also return
630/// true when there are no uses or multiple uses that all refer to the same
631/// value.
632static bool areAllUsesEqual(Instruction *I) {
633 Value::user_iterator UI = I->user_begin();
634 Value::user_iterator UE = I->user_end();
635 if (UI == UE)
636 return true;
637
638 User *TheUse = *UI;
639 for (++UI; UI != UE; ++UI) {
640 if (*UI != TheUse)
641 return false;
642 }
643 return true;
644}
645
646/// RecursivelyDeleteDeadPHINode - If the specified value is an effectively
647/// dead PHI node, due to being a def-use chain of single-use nodes that
648/// either forms a cycle or is terminated by a trivially dead instruction,
649/// delete it. If that makes any of its operands trivially dead, delete them
650/// too, recursively. Return true if a change was made.
651bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN,
652 const TargetLibraryInfo *TLI,
653 llvm::MemorySSAUpdater *MSSAU) {
654 SmallPtrSet<Instruction*, 4> Visited;
655 for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects();
656 I = cast<Instruction>(Val: *I->user_begin())) {
657 if (I->use_empty())
658 return RecursivelyDeleteTriviallyDeadInstructions(V: I, TLI, MSSAU);
659
660 // If we find an instruction more than once, we're on a cycle that
661 // won't prove fruitful.
662 if (!Visited.insert(Ptr: I).second) {
663 // Break the cycle and delete the instruction and its operands.
664 I->replaceAllUsesWith(V: PoisonValue::get(T: I->getType()));
665 (void)RecursivelyDeleteTriviallyDeadInstructions(V: I, TLI, MSSAU);
666 return true;
667 }
668 }
669 return false;
670}
671
672static bool
673simplifyAndDCEInstruction(Instruction *I,
674 SmallSetVector<Instruction *, 16> &WorkList,
675 const DataLayout &DL,
676 const TargetLibraryInfo *TLI) {
677 if (isInstructionTriviallyDead(I, TLI)) {
678 salvageDebugInfo(I&: *I);
679
680 // Null out all of the instruction's operands to see if any operand becomes
681 // dead as we go.
682 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
683 Value *OpV = I->getOperand(i);
684 I->setOperand(i, Val: nullptr);
685
686 if (!OpV->use_empty() || I == OpV)
687 continue;
688
689 // If the operand is an instruction that became dead as we nulled out the
690 // operand, and if it is 'trivially' dead, delete it in a future loop
691 // iteration.
692 if (Instruction *OpI = dyn_cast<Instruction>(Val: OpV))
693 if (isInstructionTriviallyDead(I: OpI, TLI))
694 WorkList.insert(X: OpI);
695 }
696
697 I->eraseFromParent();
698
699 return true;
700 }
701
702 if (Value *SimpleV = simplifyInstruction(I, Q: DL)) {
703 // Add the users to the worklist. CAREFUL: an instruction can use itself,
704 // in the case of a phi node.
705 for (User *U : I->users()) {
706 if (U != I) {
707 WorkList.insert(X: cast<Instruction>(Val: U));
708 }
709 }
710
711 // Replace the instruction with its simplified value.
712 bool Changed = false;
713 if (!I->use_empty()) {
714 I->replaceAllUsesWith(V: SimpleV);
715 Changed = true;
716 }
717 if (isInstructionTriviallyDead(I, TLI)) {
718 I->eraseFromParent();
719 Changed = true;
720 }
721 return Changed;
722 }
723 return false;
724}
725
726/// SimplifyInstructionsInBlock - Scan the specified basic block and try to
727/// simplify any instructions in it and recursively delete dead instructions.
728///
729/// This returns true if it changed the code, note that it can delete
730/// instructions in other blocks as well in this block.
731bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB,
732 const TargetLibraryInfo *TLI) {
733 bool MadeChange = false;
734 const DataLayout &DL = BB->getModule()->getDataLayout();
735
736#ifndef NDEBUG
737 // In debug builds, ensure that the terminator of the block is never replaced
738 // or deleted by these simplifications. The idea of simplification is that it
739 // cannot introduce new instructions, and there is no way to replace the
740 // terminator of a block without introducing a new instruction.
741 AssertingVH<Instruction> TerminatorVH(&BB->back());
742#endif
743
744 SmallSetVector<Instruction *, 16> WorkList;
745 // Iterate over the original function, only adding insts to the worklist
746 // if they actually need to be revisited. This avoids having to pre-init
747 // the worklist with the entire function's worth of instructions.
748 for (BasicBlock::iterator BI = BB->begin(), E = std::prev(x: BB->end());
749 BI != E;) {
750 assert(!BI->isTerminator());
751 Instruction *I = &*BI;
752 ++BI;
753
754 // We're visiting this instruction now, so make sure it's not in the
755 // worklist from an earlier visit.
756 if (!WorkList.count(key: I))
757 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
758 }
759
760 while (!WorkList.empty()) {
761 Instruction *I = WorkList.pop_back_val();
762 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
763 }
764 return MadeChange;
765}
766
767//===----------------------------------------------------------------------===//
768// Control Flow Graph Restructuring.
769//
770
771void llvm::MergeBasicBlockIntoOnlyPred(BasicBlock *DestBB,
772 DomTreeUpdater *DTU) {
773
774 // If BB has single-entry PHI nodes, fold them.
775 while (PHINode *PN = dyn_cast<PHINode>(Val: DestBB->begin())) {
776 Value *NewVal = PN->getIncomingValue(i: 0);
777 // Replace self referencing PHI with poison, it must be dead.
778 if (NewVal == PN) NewVal = PoisonValue::get(T: PN->getType());
779 PN->replaceAllUsesWith(V: NewVal);
780 PN->eraseFromParent();
781 }
782
783 BasicBlock *PredBB = DestBB->getSinglePredecessor();
784 assert(PredBB && "Block doesn't have a single predecessor!");
785
786 bool ReplaceEntryBB = PredBB->isEntryBlock();
787
788 // DTU updates: Collect all the edges that enter
789 // PredBB. These dominator edges will be redirected to DestBB.
790 SmallVector<DominatorTree::UpdateType, 32> Updates;
791
792 if (DTU) {
793 // To avoid processing the same predecessor more than once.
794 SmallPtrSet<BasicBlock *, 2> SeenPreds;
795 Updates.reserve(N: Updates.size() + 2 * pred_size(BB: PredBB) + 1);
796 for (BasicBlock *PredOfPredBB : predecessors(BB: PredBB))
797 // This predecessor of PredBB may already have DestBB as a successor.
798 if (PredOfPredBB != PredBB)
799 if (SeenPreds.insert(Ptr: PredOfPredBB).second)
800 Updates.push_back(Elt: {DominatorTree::Insert, PredOfPredBB, DestBB});
801 SeenPreds.clear();
802 for (BasicBlock *PredOfPredBB : predecessors(BB: PredBB))
803 if (SeenPreds.insert(Ptr: PredOfPredBB).second)
804 Updates.push_back(Elt: {DominatorTree::Delete, PredOfPredBB, PredBB});
805 Updates.push_back(Elt: {DominatorTree::Delete, PredBB, DestBB});
806 }
807
808 // Zap anything that took the address of DestBB. Not doing this will give the
809 // address an invalid value.
810 if (DestBB->hasAddressTaken()) {
811 BlockAddress *BA = BlockAddress::get(BB: DestBB);
812 Constant *Replacement =
813 ConstantInt::get(Ty: Type::getInt32Ty(C&: BA->getContext()), V: 1);
814 BA->replaceAllUsesWith(V: ConstantExpr::getIntToPtr(C: Replacement,
815 Ty: BA->getType()));
816 BA->destroyConstant();
817 }
818
819 // Anything that branched to PredBB now branches to DestBB.
820 PredBB->replaceAllUsesWith(V: DestBB);
821
822 // Splice all the instructions from PredBB to DestBB.
823 PredBB->getTerminator()->eraseFromParent();
824 DestBB->splice(ToIt: DestBB->begin(), FromBB: PredBB);
825 new UnreachableInst(PredBB->getContext(), PredBB);
826
827 // If the PredBB is the entry block of the function, move DestBB up to
828 // become the entry block after we erase PredBB.
829 if (ReplaceEntryBB)
830 DestBB->moveAfter(MovePos: PredBB);
831
832 if (DTU) {
833 assert(PredBB->size() == 1 &&
834 isa<UnreachableInst>(PredBB->getTerminator()) &&
835 "The successor list of PredBB isn't empty before "
836 "applying corresponding DTU updates.");
837 DTU->applyUpdatesPermissive(Updates);
838 DTU->deleteBB(DelBB: PredBB);
839 // Recalculation of DomTree is needed when updating a forward DomTree and
840 // the Entry BB is replaced.
841 if (ReplaceEntryBB && DTU->hasDomTree()) {
842 // The entry block was removed and there is no external interface for
843 // the dominator tree to be notified of this change. In this corner-case
844 // we recalculate the entire tree.
845 DTU->recalculate(F&: *(DestBB->getParent()));
846 }
847 }
848
849 else {
850 PredBB->eraseFromParent(); // Nuke BB if DTU is nullptr.
851 }
852}
853
854/// Return true if we can choose one of these values to use in place of the
855/// other. Note that we will always choose the non-undef value to keep.
856static bool CanMergeValues(Value *First, Value *Second) {
857 return First == Second || isa<UndefValue>(Val: First) || isa<UndefValue>(Val: Second);
858}
859
860/// Return true if we can fold BB, an almost-empty BB ending in an unconditional
861/// branch to Succ, into Succ.
862///
863/// Assumption: Succ is the single successor for BB.
864static bool
865CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ,
866 const SmallPtrSetImpl<BasicBlock *> &BBPreds) {
867 assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!");
868
869 LLVM_DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into "
870 << Succ->getName() << "\n");
871 // Shortcut, if there is only a single predecessor it must be BB and merging
872 // is always safe
873 if (Succ->getSinglePredecessor())
874 return true;
875
876 // Look at all the phi nodes in Succ, to see if they present a conflict when
877 // merging these blocks
878 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(Val: I); ++I) {
879 PHINode *PN = cast<PHINode>(Val&: I);
880
881 // If the incoming value from BB is again a PHINode in
882 // BB which has the same incoming value for *PI as PN does, we can
883 // merge the phi nodes and then the blocks can still be merged
884 PHINode *BBPN = dyn_cast<PHINode>(Val: PN->getIncomingValueForBlock(BB));
885 if (BBPN && BBPN->getParent() == BB) {
886 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
887 BasicBlock *IBB = PN->getIncomingBlock(i: PI);
888 if (BBPreds.count(Ptr: IBB) &&
889 !CanMergeValues(First: BBPN->getIncomingValueForBlock(BB: IBB),
890 Second: PN->getIncomingValue(i: PI))) {
891 LLVM_DEBUG(dbgs()
892 << "Can't fold, phi node " << PN->getName() << " in "
893 << Succ->getName() << " is conflicting with "
894 << BBPN->getName() << " with regard to common predecessor "
895 << IBB->getName() << "\n");
896 return false;
897 }
898 }
899 } else {
900 Value* Val = PN->getIncomingValueForBlock(BB);
901 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
902 // See if the incoming value for the common predecessor is equal to the
903 // one for BB, in which case this phi node will not prevent the merging
904 // of the block.
905 BasicBlock *IBB = PN->getIncomingBlock(i: PI);
906 if (BBPreds.count(Ptr: IBB) &&
907 !CanMergeValues(First: Val, Second: PN->getIncomingValue(i: PI))) {
908 LLVM_DEBUG(dbgs() << "Can't fold, phi node " << PN->getName()
909 << " in " << Succ->getName()
910 << " is conflicting with regard to common "
911 << "predecessor " << IBB->getName() << "\n");
912 return false;
913 }
914 }
915 }
916 }
917
918 return true;
919}
920
921using PredBlockVector = SmallVector<BasicBlock *, 16>;
922using IncomingValueMap = DenseMap<BasicBlock *, Value *>;
923
924/// Determines the value to use as the phi node input for a block.
925///
926/// Select between \p OldVal any value that we know flows from \p BB
927/// to a particular phi on the basis of which one (if either) is not
928/// undef. Update IncomingValues based on the selected value.
929///
930/// \param OldVal The value we are considering selecting.
931/// \param BB The block that the value flows in from.
932/// \param IncomingValues A map from block-to-value for other phi inputs
933/// that we have examined.
934///
935/// \returns the selected value.
936static Value *selectIncomingValueForBlock(Value *OldVal, BasicBlock *BB,
937 IncomingValueMap &IncomingValues) {
938 if (!isa<UndefValue>(Val: OldVal)) {
939 assert((!IncomingValues.count(BB) ||
940 IncomingValues.find(BB)->second == OldVal) &&
941 "Expected OldVal to match incoming value from BB!");
942
943 IncomingValues.insert(KV: std::make_pair(x&: BB, y&: OldVal));
944 return OldVal;
945 }
946
947 IncomingValueMap::const_iterator It = IncomingValues.find(Val: BB);
948 if (It != IncomingValues.end()) return It->second;
949
950 return OldVal;
951}
952
953/// Create a map from block to value for the operands of a
954/// given phi.
955///
956/// Create a map from block to value for each non-undef value flowing
957/// into \p PN.
958///
959/// \param PN The phi we are collecting the map for.
960/// \param IncomingValues [out] The map from block to value for this phi.
961static void gatherIncomingValuesToPhi(PHINode *PN,
962 IncomingValueMap &IncomingValues) {
963 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
964 BasicBlock *BB = PN->getIncomingBlock(i);
965 Value *V = PN->getIncomingValue(i);
966
967 if (!isa<UndefValue>(Val: V))
968 IncomingValues.insert(KV: std::make_pair(x&: BB, y&: V));
969 }
970}
971
972/// Replace the incoming undef values to a phi with the values
973/// from a block-to-value map.
974///
975/// \param PN The phi we are replacing the undefs in.
976/// \param IncomingValues A map from block to value.
977static void replaceUndefValuesInPhi(PHINode *PN,
978 const IncomingValueMap &IncomingValues) {
979 SmallVector<unsigned> TrueUndefOps;
980 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
981 Value *V = PN->getIncomingValue(i);
982
983 if (!isa<UndefValue>(Val: V)) continue;
984
985 BasicBlock *BB = PN->getIncomingBlock(i);
986 IncomingValueMap::const_iterator It = IncomingValues.find(Val: BB);
987
988 // Keep track of undef/poison incoming values. Those must match, so we fix
989 // them up below if needed.
990 // Note: this is conservatively correct, but we could try harder and group
991 // the undef values per incoming basic block.
992 if (It == IncomingValues.end()) {
993 TrueUndefOps.push_back(Elt: i);
994 continue;
995 }
996
997 // There is a defined value for this incoming block, so map this undef
998 // incoming value to the defined value.
999 PN->setIncomingValue(i, V: It->second);
1000 }
1001
1002 // If there are both undef and poison values incoming, then convert those
1003 // values to undef. It is invalid to have different values for the same
1004 // incoming block.
1005 unsigned PoisonCount = count_if(Range&: TrueUndefOps, P: [&](unsigned i) {
1006 return isa<PoisonValue>(Val: PN->getIncomingValue(i));
1007 });
1008 if (PoisonCount != 0 && PoisonCount != TrueUndefOps.size()) {
1009 for (unsigned i : TrueUndefOps)
1010 PN->setIncomingValue(i, V: UndefValue::get(T: PN->getType()));
1011 }
1012}
1013
1014// Only when they shares a single common predecessor, return true.
1015// Only handles cases when BB can't be merged while its predecessors can be
1016// redirected.
1017static bool
1018CanRedirectPredsOfEmptyBBToSucc(BasicBlock *BB, BasicBlock *Succ,
1019 const SmallPtrSetImpl<BasicBlock *> &BBPreds,
1020 const SmallPtrSetImpl<BasicBlock *> &SuccPreds,
1021 BasicBlock *&CommonPred) {
1022
1023 // There must be phis in BB, otherwise BB will be merged into Succ directly
1024 if (BB->phis().empty() || Succ->phis().empty())
1025 return false;
1026
1027 // BB must have predecessors not shared that can be redirected to Succ
1028 if (!BB->hasNPredecessorsOrMore(N: 2))
1029 return false;
1030
1031 // Get single common predecessors of both BB and Succ
1032 for (BasicBlock *SuccPred : SuccPreds) {
1033 if (BBPreds.count(Ptr: SuccPred)) {
1034 if (CommonPred)
1035 return false;
1036 CommonPred = SuccPred;
1037 }
1038 }
1039
1040 return true;
1041}
1042
1043/// Replace a value flowing from a block to a phi with
1044/// potentially multiple instances of that value flowing from the
1045/// block's predecessors to the phi.
1046///
1047/// \param BB The block with the value flowing into the phi.
1048/// \param BBPreds The predecessors of BB.
1049/// \param PN The phi that we are updating.
1050/// \param CommonPred The common predecessor of BB and PN's BasicBlock
1051static void redirectValuesFromPredecessorsToPhi(BasicBlock *BB,
1052 const PredBlockVector &BBPreds,
1053 PHINode *PN,
1054 BasicBlock *CommonPred) {
1055 Value *OldVal = PN->removeIncomingValue(BB, DeletePHIIfEmpty: false);
1056 assert(OldVal && "No entry in PHI for Pred BB!");
1057
1058 IncomingValueMap IncomingValues;
1059
1060 // We are merging two blocks - BB, and the block containing PN - and
1061 // as a result we need to redirect edges from the predecessors of BB
1062 // to go to the block containing PN, and update PN
1063 // accordingly. Since we allow merging blocks in the case where the
1064 // predecessor and successor blocks both share some predecessors,
1065 // and where some of those common predecessors might have undef
1066 // values flowing into PN, we want to rewrite those values to be
1067 // consistent with the non-undef values.
1068
1069 gatherIncomingValuesToPhi(PN, IncomingValues);
1070
1071 // If this incoming value is one of the PHI nodes in BB, the new entries
1072 // in the PHI node are the entries from the old PHI.
1073 if (isa<PHINode>(Val: OldVal) && cast<PHINode>(Val: OldVal)->getParent() == BB) {
1074 PHINode *OldValPN = cast<PHINode>(Val: OldVal);
1075 for (unsigned i = 0, e = OldValPN->getNumIncomingValues(); i != e; ++i) {
1076 // Note that, since we are merging phi nodes and BB and Succ might
1077 // have common predecessors, we could end up with a phi node with
1078 // identical incoming branches. This will be cleaned up later (and
1079 // will trigger asserts if we try to clean it up now, without also
1080 // simplifying the corresponding conditional branch).
1081 BasicBlock *PredBB = OldValPN->getIncomingBlock(i);
1082
1083 if (PredBB == CommonPred)
1084 continue;
1085
1086 Value *PredVal = OldValPN->getIncomingValue(i);
1087 Value *Selected =
1088 selectIncomingValueForBlock(OldVal: PredVal, BB: PredBB, IncomingValues);
1089
1090 // And add a new incoming value for this predecessor for the
1091 // newly retargeted branch.
1092 PN->addIncoming(V: Selected, BB: PredBB);
1093 }
1094 if (CommonPred)
1095 PN->addIncoming(V: OldValPN->getIncomingValueForBlock(BB: CommonPred), BB);
1096
1097 } else {
1098 for (unsigned i = 0, e = BBPreds.size(); i != e; ++i) {
1099 // Update existing incoming values in PN for this
1100 // predecessor of BB.
1101 BasicBlock *PredBB = BBPreds[i];
1102
1103 if (PredBB == CommonPred)
1104 continue;
1105
1106 Value *Selected =
1107 selectIncomingValueForBlock(OldVal, BB: PredBB, IncomingValues);
1108
1109 // And add a new incoming value for this predecessor for the
1110 // newly retargeted branch.
1111 PN->addIncoming(V: Selected, BB: PredBB);
1112 }
1113 if (CommonPred)
1114 PN->addIncoming(V: OldVal, BB);
1115 }
1116
1117 replaceUndefValuesInPhi(PN, IncomingValues);
1118}
1119
1120bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB,
1121 DomTreeUpdater *DTU) {
1122 assert(BB != &BB->getParent()->getEntryBlock() &&
1123 "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!");
1124
1125 // We can't simplify infinite loops.
1126 BasicBlock *Succ = cast<BranchInst>(Val: BB->getTerminator())->getSuccessor(i: 0);
1127 if (BB == Succ)
1128 return false;
1129
1130 SmallPtrSet<BasicBlock *, 16> BBPreds(pred_begin(BB), pred_end(BB));
1131 SmallPtrSet<BasicBlock *, 16> SuccPreds(pred_begin(BB: Succ), pred_end(BB: Succ));
1132
1133 // The single common predecessor of BB and Succ when BB cannot be killed
1134 BasicBlock *CommonPred = nullptr;
1135
1136 bool BBKillable = CanPropagatePredecessorsForPHIs(BB, Succ, BBPreds);
1137
1138 // Even if we can not fold bB into Succ, we may be able to redirect the
1139 // predecessors of BB to Succ.
1140 bool BBPhisMergeable =
1141 BBKillable ||
1142 CanRedirectPredsOfEmptyBBToSucc(BB, Succ, BBPreds, SuccPreds, CommonPred);
1143
1144 if (!BBKillable && !BBPhisMergeable)
1145 return false;
1146
1147 // Check to see if merging these blocks/phis would cause conflicts for any of
1148 // the phi nodes in BB or Succ. If not, we can safely merge.
1149
1150 // Check for cases where Succ has multiple predecessors and a PHI node in BB
1151 // has uses which will not disappear when the PHI nodes are merged. It is
1152 // possible to handle such cases, but difficult: it requires checking whether
1153 // BB dominates Succ, which is non-trivial to calculate in the case where
1154 // Succ has multiple predecessors. Also, it requires checking whether
1155 // constructing the necessary self-referential PHI node doesn't introduce any
1156 // conflicts; this isn't too difficult, but the previous code for doing this
1157 // was incorrect.
1158 //
1159 // Note that if this check finds a live use, BB dominates Succ, so BB is
1160 // something like a loop pre-header (or rarely, a part of an irreducible CFG);
1161 // folding the branch isn't profitable in that case anyway.
1162 if (!Succ->getSinglePredecessor()) {
1163 BasicBlock::iterator BBI = BB->begin();
1164 while (isa<PHINode>(Val: *BBI)) {
1165 for (Use &U : BBI->uses()) {
1166 if (PHINode* PN = dyn_cast<PHINode>(Val: U.getUser())) {
1167 if (PN->getIncomingBlock(U) != BB)
1168 return false;
1169 } else {
1170 return false;
1171 }
1172 }
1173 ++BBI;
1174 }
1175 }
1176
1177 if (BBPhisMergeable && CommonPred)
1178 LLVM_DEBUG(dbgs() << "Found Common Predecessor between: " << BB->getName()
1179 << " and " << Succ->getName() << " : "
1180 << CommonPred->getName() << "\n");
1181
1182 // 'BB' and 'BB->Pred' are loop latches, bail out to presrve inner loop
1183 // metadata.
1184 //
1185 // FIXME: This is a stop-gap solution to preserve inner-loop metadata given
1186 // current status (that loop metadata is implemented as metadata attached to
1187 // the branch instruction in the loop latch block). To quote from review
1188 // comments, "the current representation of loop metadata (using a loop latch
1189 // terminator attachment) is known to be fundamentally broken. Loop latches
1190 // are not uniquely associated with loops (both in that a latch can be part of
1191 // multiple loops and a loop may have multiple latches). Loop headers are. The
1192 // solution to this problem is also known: Add support for basic block
1193 // metadata, and attach loop metadata to the loop header."
1194 //
1195 // Why bail out:
1196 // In this case, we expect 'BB' is the latch for outer-loop and 'BB->Pred' is
1197 // the latch for inner-loop (see reason below), so bail out to prerserve
1198 // inner-loop metadata rather than eliminating 'BB' and attaching its metadata
1199 // to this inner-loop.
1200 // - The reason we believe 'BB' and 'BB->Pred' have different inner-most
1201 // loops: assuming 'BB' and 'BB->Pred' are from the same inner-most loop L,
1202 // then 'BB' is the header and latch of 'L' and thereby 'L' must consist of
1203 // one self-looping basic block, which is contradictory with the assumption.
1204 //
1205 // To illustrate how inner-loop metadata is dropped:
1206 //
1207 // CFG Before
1208 //
1209 // BB is while.cond.exit, attached with loop metdata md2.
1210 // BB->Pred is for.body, attached with loop metadata md1.
1211 //
1212 // entry
1213 // |
1214 // v
1215 // ---> while.cond -------------> while.end
1216 // | |
1217 // | v
1218 // | while.body
1219 // | |
1220 // | v
1221 // | for.body <---- (md1)
1222 // | | |______|
1223 // | v
1224 // | while.cond.exit (md2)
1225 // | |
1226 // |_______|
1227 //
1228 // CFG After
1229 //
1230 // while.cond1 is the merge of while.cond.exit and while.cond above.
1231 // for.body is attached with md2, and md1 is dropped.
1232 // If LoopSimplify runs later (as a part of loop pass), it could create
1233 // dedicated exits for inner-loop (essentially adding `while.cond.exit`
1234 // back), but won't it won't see 'md1' nor restore it for the inner-loop.
1235 //
1236 // entry
1237 // |
1238 // v
1239 // ---> while.cond1 -------------> while.end
1240 // | |
1241 // | v
1242 // | while.body
1243 // | |
1244 // | v
1245 // | for.body <---- (md2)
1246 // |_______| |______|
1247 if (Instruction *TI = BB->getTerminator())
1248 if (TI->hasMetadata(KindID: LLVMContext::MD_loop))
1249 for (BasicBlock *Pred : predecessors(BB))
1250 if (Instruction *PredTI = Pred->getTerminator())
1251 if (PredTI->hasMetadata(KindID: LLVMContext::MD_loop))
1252 return false;
1253
1254 if (BBKillable)
1255 LLVM_DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB);
1256 else if (BBPhisMergeable)
1257 LLVM_DEBUG(dbgs() << "Merge Phis in Trivial BB: \n" << *BB);
1258
1259 SmallVector<DominatorTree::UpdateType, 32> Updates;
1260
1261 if (DTU) {
1262 // To avoid processing the same predecessor more than once.
1263 SmallPtrSet<BasicBlock *, 8> SeenPreds;
1264 // All predecessors of BB (except the common predecessor) will be moved to
1265 // Succ.
1266 Updates.reserve(N: Updates.size() + 2 * pred_size(BB) + 1);
1267
1268 for (auto *PredOfBB : predecessors(BB)) {
1269 // Do not modify those common predecessors of BB and Succ
1270 if (!SuccPreds.contains(Ptr: PredOfBB))
1271 if (SeenPreds.insert(Ptr: PredOfBB).second)
1272 Updates.push_back(Elt: {DominatorTree::Insert, PredOfBB, Succ});
1273 }
1274
1275 SeenPreds.clear();
1276
1277 for (auto *PredOfBB : predecessors(BB))
1278 // When BB cannot be killed, do not remove the edge between BB and
1279 // CommonPred.
1280 if (SeenPreds.insert(Ptr: PredOfBB).second && PredOfBB != CommonPred)
1281 Updates.push_back(Elt: {DominatorTree::Delete, PredOfBB, BB});
1282
1283 if (BBKillable)
1284 Updates.push_back(Elt: {DominatorTree::Delete, BB, Succ});
1285 }
1286
1287 if (isa<PHINode>(Val: Succ->begin())) {
1288 // If there is more than one pred of succ, and there are PHI nodes in
1289 // the successor, then we need to add incoming edges for the PHI nodes
1290 //
1291 const PredBlockVector BBPreds(predecessors(BB));
1292
1293 // Loop over all of the PHI nodes in the successor of BB.
1294 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(Val: I); ++I) {
1295 PHINode *PN = cast<PHINode>(Val&: I);
1296 redirectValuesFromPredecessorsToPhi(BB, BBPreds, PN, CommonPred);
1297 }
1298 }
1299
1300 if (Succ->getSinglePredecessor()) {
1301 // BB is the only predecessor of Succ, so Succ will end up with exactly
1302 // the same predecessors BB had.
1303 // Copy over any phi, debug or lifetime instruction.
1304 BB->getTerminator()->eraseFromParent();
1305 Succ->splice(ToIt: Succ->getFirstNonPHIIt(), FromBB: BB);
1306 } else {
1307 while (PHINode *PN = dyn_cast<PHINode>(Val: &BB->front())) {
1308 // We explicitly check for such uses for merging phis.
1309 assert(PN->use_empty() && "There shouldn't be any uses here!");
1310 PN->eraseFromParent();
1311 }
1312 }
1313
1314 // If the unconditional branch we replaced contains llvm.loop metadata, we
1315 // add the metadata to the branch instructions in the predecessors.
1316 if (Instruction *TI = BB->getTerminator())
1317 if (MDNode *LoopMD = TI->getMetadata(KindID: LLVMContext::MD_loop))
1318 for (BasicBlock *Pred : predecessors(BB))
1319 Pred->getTerminator()->setMetadata(KindID: LLVMContext::MD_loop, Node: LoopMD);
1320
1321 if (BBKillable) {
1322 // Everything that jumped to BB now goes to Succ.
1323 BB->replaceAllUsesWith(V: Succ);
1324
1325 if (!Succ->hasName())
1326 Succ->takeName(V: BB);
1327
1328 // Clear the successor list of BB to match updates applying to DTU later.
1329 if (BB->getTerminator())
1330 BB->back().eraseFromParent();
1331
1332 new UnreachableInst(BB->getContext(), BB);
1333 assert(succ_empty(BB) && "The successor list of BB isn't empty before "
1334 "applying corresponding DTU updates.");
1335 } else if (BBPhisMergeable) {
1336 // Everything except CommonPred that jumped to BB now goes to Succ.
1337 BB->replaceUsesWithIf(New: Succ, ShouldReplace: [BBPreds, CommonPred](Use &U) -> bool {
1338 if (Instruction *UseInst = dyn_cast<Instruction>(Val: U.getUser()))
1339 return UseInst->getParent() != CommonPred &&
1340 BBPreds.contains(Ptr: UseInst->getParent());
1341 return false;
1342 });
1343 }
1344
1345 if (DTU)
1346 DTU->applyUpdates(Updates);
1347
1348 if (BBKillable)
1349 DeleteDeadBlock(BB, DTU);
1350
1351 return true;
1352}
1353
1354static bool
1355EliminateDuplicatePHINodesNaiveImpl(BasicBlock *BB,
1356 SmallPtrSetImpl<PHINode *> &ToRemove) {
1357 // This implementation doesn't currently consider undef operands
1358 // specially. Theoretically, two phis which are identical except for
1359 // one having an undef where the other doesn't could be collapsed.
1360
1361 bool Changed = false;
1362
1363 // Examine each PHI.
1364 // Note that increment of I must *NOT* be in the iteration_expression, since
1365 // we don't want to immediately advance when we restart from the beginning.
1366 for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(Val&: I);) {
1367 ++I;
1368 // Is there an identical PHI node in this basic block?
1369 // Note that we only look in the upper square's triangle,
1370 // we already checked that the lower triangle PHI's aren't identical.
1371 for (auto J = I; PHINode *DuplicatePN = dyn_cast<PHINode>(Val&: J); ++J) {
1372 if (ToRemove.contains(Ptr: DuplicatePN))
1373 continue;
1374 if (!DuplicatePN->isIdenticalToWhenDefined(I: PN))
1375 continue;
1376 // A duplicate. Replace this PHI with the base PHI.
1377 ++NumPHICSEs;
1378 DuplicatePN->replaceAllUsesWith(V: PN);
1379 ToRemove.insert(Ptr: DuplicatePN);
1380 Changed = true;
1381
1382 // The RAUW can change PHIs that we already visited.
1383 I = BB->begin();
1384 break; // Start over from the beginning.
1385 }
1386 }
1387 return Changed;
1388}
1389
1390static bool
1391EliminateDuplicatePHINodesSetBasedImpl(BasicBlock *BB,
1392 SmallPtrSetImpl<PHINode *> &ToRemove) {
1393 // This implementation doesn't currently consider undef operands
1394 // specially. Theoretically, two phis which are identical except for
1395 // one having an undef where the other doesn't could be collapsed.
1396
1397 struct PHIDenseMapInfo {
1398 static PHINode *getEmptyKey() {
1399 return DenseMapInfo<PHINode *>::getEmptyKey();
1400 }
1401
1402 static PHINode *getTombstoneKey() {
1403 return DenseMapInfo<PHINode *>::getTombstoneKey();
1404 }
1405
1406 static bool isSentinel(PHINode *PN) {
1407 return PN == getEmptyKey() || PN == getTombstoneKey();
1408 }
1409
1410 // WARNING: this logic must be kept in sync with
1411 // Instruction::isIdenticalToWhenDefined()!
1412 static unsigned getHashValueImpl(PHINode *PN) {
1413 // Compute a hash value on the operands. Instcombine will likely have
1414 // sorted them, which helps expose duplicates, but we have to check all
1415 // the operands to be safe in case instcombine hasn't run.
1416 return static_cast<unsigned>(hash_combine(
1417 args: hash_combine_range(first: PN->value_op_begin(), last: PN->value_op_end()),
1418 args: hash_combine_range(first: PN->block_begin(), last: PN->block_end())));
1419 }
1420
1421 static unsigned getHashValue(PHINode *PN) {
1422#ifndef NDEBUG
1423 // If -phicse-debug-hash was specified, return a constant -- this
1424 // will force all hashing to collide, so we'll exhaustively search
1425 // the table for a match, and the assertion in isEqual will fire if
1426 // there's a bug causing equal keys to hash differently.
1427 if (PHICSEDebugHash)
1428 return 0;
1429#endif
1430 return getHashValueImpl(PN);
1431 }
1432
1433 static bool isEqualImpl(PHINode *LHS, PHINode *RHS) {
1434 if (isSentinel(PN: LHS) || isSentinel(PN: RHS))
1435 return LHS == RHS;
1436 return LHS->isIdenticalTo(I: RHS);
1437 }
1438
1439 static bool isEqual(PHINode *LHS, PHINode *RHS) {
1440 // These comparisons are nontrivial, so assert that equality implies
1441 // hash equality (DenseMap demands this as an invariant).
1442 bool Result = isEqualImpl(LHS, RHS);
1443 assert(!Result || (isSentinel(LHS) && LHS == RHS) ||
1444 getHashValueImpl(LHS) == getHashValueImpl(RHS));
1445 return Result;
1446 }
1447 };
1448
1449 // Set of unique PHINodes.
1450 DenseSet<PHINode *, PHIDenseMapInfo> PHISet;
1451 PHISet.reserve(Size: 4 * PHICSENumPHISmallSize);
1452
1453 // Examine each PHI.
1454 bool Changed = false;
1455 for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(Val: I++);) {
1456 if (ToRemove.contains(Ptr: PN))
1457 continue;
1458 auto Inserted = PHISet.insert(V: PN);
1459 if (!Inserted.second) {
1460 // A duplicate. Replace this PHI with its duplicate.
1461 ++NumPHICSEs;
1462 PN->replaceAllUsesWith(V: *Inserted.first);
1463 ToRemove.insert(Ptr: PN);
1464 Changed = true;
1465
1466 // The RAUW can change PHIs that we already visited. Start over from the
1467 // beginning.
1468 PHISet.clear();
1469 I = BB->begin();
1470 }
1471 }
1472
1473 return Changed;
1474}
1475
1476bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB,
1477 SmallPtrSetImpl<PHINode *> &ToRemove) {
1478 if (
1479#ifndef NDEBUG
1480 !PHICSEDebugHash &&
1481#endif
1482 hasNItemsOrLess(C: BB->phis(), N: PHICSENumPHISmallSize))
1483 return EliminateDuplicatePHINodesNaiveImpl(BB, ToRemove);
1484 return EliminateDuplicatePHINodesSetBasedImpl(BB, ToRemove);
1485}
1486
1487bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB) {
1488 SmallPtrSet<PHINode *, 8> ToRemove;
1489 bool Changed = EliminateDuplicatePHINodes(BB, ToRemove);
1490 for (PHINode *PN : ToRemove)
1491 PN->eraseFromParent();
1492 return Changed;
1493}
1494
1495Align llvm::tryEnforceAlignment(Value *V, Align PrefAlign,
1496 const DataLayout &DL) {
1497 V = V->stripPointerCasts();
1498
1499 if (AllocaInst *AI = dyn_cast<AllocaInst>(Val: V)) {
1500 // TODO: Ideally, this function would not be called if PrefAlign is smaller
1501 // than the current alignment, as the known bits calculation should have
1502 // already taken it into account. However, this is not always the case,
1503 // as computeKnownBits() has a depth limit, while stripPointerCasts()
1504 // doesn't.
1505 Align CurrentAlign = AI->getAlign();
1506 if (PrefAlign <= CurrentAlign)
1507 return CurrentAlign;
1508
1509 // If the preferred alignment is greater than the natural stack alignment
1510 // then don't round up. This avoids dynamic stack realignment.
1511 if (DL.exceedsNaturalStackAlignment(Alignment: PrefAlign))
1512 return CurrentAlign;
1513 AI->setAlignment(PrefAlign);
1514 return PrefAlign;
1515 }
1516
1517 if (auto *GO = dyn_cast<GlobalObject>(Val: V)) {
1518 // TODO: as above, this shouldn't be necessary.
1519 Align CurrentAlign = GO->getPointerAlignment(DL);
1520 if (PrefAlign <= CurrentAlign)
1521 return CurrentAlign;
1522
1523 // If there is a large requested alignment and we can, bump up the alignment
1524 // of the global. If the memory we set aside for the global may not be the
1525 // memory used by the final program then it is impossible for us to reliably
1526 // enforce the preferred alignment.
1527 if (!GO->canIncreaseAlignment())
1528 return CurrentAlign;
1529
1530 if (GO->isThreadLocal()) {
1531 unsigned MaxTLSAlign = GO->getParent()->getMaxTLSAlignment() / CHAR_BIT;
1532 if (MaxTLSAlign && PrefAlign > Align(MaxTLSAlign))
1533 PrefAlign = Align(MaxTLSAlign);
1534 }
1535
1536 GO->setAlignment(PrefAlign);
1537 return PrefAlign;
1538 }
1539
1540 return Align(1);
1541}
1542
1543Align llvm::getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign,
1544 const DataLayout &DL,
1545 const Instruction *CxtI,
1546 AssumptionCache *AC,
1547 const DominatorTree *DT) {
1548 assert(V->getType()->isPointerTy() &&
1549 "getOrEnforceKnownAlignment expects a pointer!");
1550
1551 KnownBits Known = computeKnownBits(V, DL, Depth: 0, AC, CxtI, DT);
1552 unsigned TrailZ = Known.countMinTrailingZeros();
1553
1554 // Avoid trouble with ridiculously large TrailZ values, such as
1555 // those computed from a null pointer.
1556 // LLVM doesn't support alignments larger than (1 << MaxAlignmentExponent).
1557 TrailZ = std::min(a: TrailZ, b: +Value::MaxAlignmentExponent);
1558
1559 Align Alignment = Align(1ull << std::min(a: Known.getBitWidth() - 1, b: TrailZ));
1560
1561 if (PrefAlign && *PrefAlign > Alignment)
1562 Alignment = std::max(a: Alignment, b: tryEnforceAlignment(V, PrefAlign: *PrefAlign, DL));
1563
1564 // We don't need to make any adjustment.
1565 return Alignment;
1566}
1567
1568///===---------------------------------------------------------------------===//
1569/// Dbg Intrinsic utilities
1570///
1571
1572/// See if there is a dbg.value intrinsic for DIVar for the PHI node.
1573static bool PhiHasDebugValue(DILocalVariable *DIVar,
1574 DIExpression *DIExpr,
1575 PHINode *APN) {
1576 // Since we can't guarantee that the original dbg.declare intrinsic
1577 // is removed by LowerDbgDeclare(), we need to make sure that we are
1578 // not inserting the same dbg.value intrinsic over and over.
1579 SmallVector<DbgValueInst *, 1> DbgValues;
1580 SmallVector<DbgVariableRecord *, 1> DbgVariableRecords;
1581 findDbgValues(DbgValues, V: APN, DbgVariableRecords: &DbgVariableRecords);
1582 for (auto *DVI : DbgValues) {
1583 assert(is_contained(DVI->getValues(), APN));
1584 if ((DVI->getVariable() == DIVar) && (DVI->getExpression() == DIExpr))
1585 return true;
1586 }
1587 for (auto *DVR : DbgVariableRecords) {
1588 assert(is_contained(DVR->location_ops(), APN));
1589 if ((DVR->getVariable() == DIVar) && (DVR->getExpression() == DIExpr))
1590 return true;
1591 }
1592 return false;
1593}
1594
1595/// Check if the alloc size of \p ValTy is large enough to cover the variable
1596/// (or fragment of the variable) described by \p DII.
1597///
1598/// This is primarily intended as a helper for the different
1599/// ConvertDebugDeclareToDebugValue functions. The dbg.declare that is converted
1600/// describes an alloca'd variable, so we need to use the alloc size of the
1601/// value when doing the comparison. E.g. an i1 value will be identified as
1602/// covering an n-bit fragment, if the store size of i1 is at least n bits.
1603static bool valueCoversEntireFragment(Type *ValTy, DbgVariableIntrinsic *DII) {
1604 const DataLayout &DL = DII->getModule()->getDataLayout();
1605 TypeSize ValueSize = DL.getTypeAllocSizeInBits(Ty: ValTy);
1606 if (std::optional<uint64_t> FragmentSize = DII->getFragmentSizeInBits())
1607 return TypeSize::isKnownGE(LHS: ValueSize, RHS: TypeSize::getFixed(ExactSize: *FragmentSize));
1608
1609 // We can't always calculate the size of the DI variable (e.g. if it is a
1610 // VLA). Try to use the size of the alloca that the dbg intrinsic describes
1611 // intead.
1612 if (DII->isAddressOfVariable()) {
1613 // DII should have exactly 1 location when it is an address.
1614 assert(DII->getNumVariableLocationOps() == 1 &&
1615 "address of variable must have exactly 1 location operand.");
1616 if (auto *AI =
1617 dyn_cast_or_null<AllocaInst>(Val: DII->getVariableLocationOp(OpIdx: 0))) {
1618 if (std::optional<TypeSize> FragmentSize =
1619 AI->getAllocationSizeInBits(DL)) {
1620 return TypeSize::isKnownGE(LHS: ValueSize, RHS: *FragmentSize);
1621 }
1622 }
1623 }
1624 // Could not determine size of variable. Conservatively return false.
1625 return false;
1626}
1627// RemoveDIs: duplicate implementation of the above, using DbgVariableRecords,
1628// the replacement for dbg.values.
1629static bool valueCoversEntireFragment(Type *ValTy, DbgVariableRecord *DVR) {
1630 const DataLayout &DL = DVR->getModule()->getDataLayout();
1631 TypeSize ValueSize = DL.getTypeAllocSizeInBits(Ty: ValTy);
1632 if (std::optional<uint64_t> FragmentSize = DVR->getFragmentSizeInBits())
1633 return TypeSize::isKnownGE(LHS: ValueSize, RHS: TypeSize::getFixed(ExactSize: *FragmentSize));
1634
1635 // We can't always calculate the size of the DI variable (e.g. if it is a
1636 // VLA). Try to use the size of the alloca that the dbg intrinsic describes
1637 // intead.
1638 if (DVR->isAddressOfVariable()) {
1639 // DVR should have exactly 1 location when it is an address.
1640 assert(DVR->getNumVariableLocationOps() == 1 &&
1641 "address of variable must have exactly 1 location operand.");
1642 if (auto *AI =
1643 dyn_cast_or_null<AllocaInst>(Val: DVR->getVariableLocationOp(OpIdx: 0))) {
1644 if (std::optional<TypeSize> FragmentSize = AI->getAllocationSizeInBits(DL)) {
1645 return TypeSize::isKnownGE(LHS: ValueSize, RHS: *FragmentSize);
1646 }
1647 }
1648 }
1649 // Could not determine size of variable. Conservatively return false.
1650 return false;
1651}
1652
1653static void insertDbgValueOrDbgVariableRecord(DIBuilder &Builder, Value *DV,
1654 DILocalVariable *DIVar,
1655 DIExpression *DIExpr,
1656 const DebugLoc &NewLoc,
1657 BasicBlock::iterator Instr) {
1658 if (!UseNewDbgInfoFormat) {
1659 auto DbgVal = Builder.insertDbgValueIntrinsic(Val: DV, VarInfo: DIVar, Expr: DIExpr, DL: NewLoc,
1660 InsertBefore: (Instruction *)nullptr);
1661 DbgVal.get<Instruction *>()->insertBefore(InsertPos: Instr);
1662 } else {
1663 // RemoveDIs: if we're using the new debug-info format, allocate a
1664 // DbgVariableRecord directly instead of a dbg.value intrinsic.
1665 ValueAsMetadata *DVAM = ValueAsMetadata::get(V: DV);
1666 DbgVariableRecord *DV =
1667 new DbgVariableRecord(DVAM, DIVar, DIExpr, NewLoc.get());
1668 Instr->getParent()->insertDbgRecordBefore(DR: DV, Here: Instr);
1669 }
1670}
1671
1672static void insertDbgValueOrDbgVariableRecordAfter(
1673 DIBuilder &Builder, Value *DV, DILocalVariable *DIVar, DIExpression *DIExpr,
1674 const DebugLoc &NewLoc, BasicBlock::iterator Instr) {
1675 if (!UseNewDbgInfoFormat) {
1676 auto DbgVal = Builder.insertDbgValueIntrinsic(Val: DV, VarInfo: DIVar, Expr: DIExpr, DL: NewLoc,
1677 InsertBefore: (Instruction *)nullptr);
1678 DbgVal.get<Instruction *>()->insertAfter(InsertPos: &*Instr);
1679 } else {
1680 // RemoveDIs: if we're using the new debug-info format, allocate a
1681 // DbgVariableRecord directly instead of a dbg.value intrinsic.
1682 ValueAsMetadata *DVAM = ValueAsMetadata::get(V: DV);
1683 DbgVariableRecord *DV =
1684 new DbgVariableRecord(DVAM, DIVar, DIExpr, NewLoc.get());
1685 Instr->getParent()->insertDbgRecordAfter(DR: DV, I: &*Instr);
1686 }
1687}
1688
1689/// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value
1690/// that has an associated llvm.dbg.declare intrinsic.
1691void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
1692 StoreInst *SI, DIBuilder &Builder) {
1693 assert(DII->isAddressOfVariable() || isa<DbgAssignIntrinsic>(DII));
1694 auto *DIVar = DII->getVariable();
1695 assert(DIVar && "Missing variable");
1696 auto *DIExpr = DII->getExpression();
1697 Value *DV = SI->getValueOperand();
1698
1699 DebugLoc NewLoc = getDebugValueLoc(DII);
1700
1701 // If the alloca describes the variable itself, i.e. the expression in the
1702 // dbg.declare doesn't start with a dereference, we can perform the
1703 // conversion if the value covers the entire fragment of DII.
1704 // If the alloca describes the *address* of DIVar, i.e. DIExpr is
1705 // *just* a DW_OP_deref, we use DV as is for the dbg.value.
1706 // We conservatively ignore other dereferences, because the following two are
1707 // not equivalent:
1708 // dbg.declare(alloca, ..., !Expr(deref, plus_uconstant, 2))
1709 // dbg.value(DV, ..., !Expr(deref, plus_uconstant, 2))
1710 // The former is adding 2 to the address of the variable, whereas the latter
1711 // is adding 2 to the value of the variable. As such, we insist on just a
1712 // deref expression.
1713 bool CanConvert =
1714 DIExpr->isDeref() || (!DIExpr->startsWithDeref() &&
1715 valueCoversEntireFragment(ValTy: DV->getType(), DII));
1716 if (CanConvert) {
1717 insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc,
1718 Instr: SI->getIterator());
1719 return;
1720 }
1721
1722 // FIXME: If storing to a part of the variable described by the dbg.declare,
1723 // then we want to insert a dbg.value for the corresponding fragment.
1724 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: " << *DII
1725 << '\n');
1726 // For now, when there is a store to parts of the variable (but we do not
1727 // know which part) we insert an dbg.value intrinsic to indicate that we
1728 // know nothing about the variable's content.
1729 DV = UndefValue::get(T: DV->getType());
1730 insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc,
1731 Instr: SI->getIterator());
1732}
1733
1734/// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value
1735/// that has an associated llvm.dbg.declare intrinsic.
1736void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
1737 LoadInst *LI, DIBuilder &Builder) {
1738 auto *DIVar = DII->getVariable();
1739 auto *DIExpr = DII->getExpression();
1740 assert(DIVar && "Missing variable");
1741
1742 if (!valueCoversEntireFragment(ValTy: LI->getType(), DII)) {
1743 // FIXME: If only referring to a part of the variable described by the
1744 // dbg.declare, then we want to insert a dbg.value for the corresponding
1745 // fragment.
1746 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "
1747 << *DII << '\n');
1748 return;
1749 }
1750
1751 DebugLoc NewLoc = getDebugValueLoc(DII);
1752
1753 // We are now tracking the loaded value instead of the address. In the
1754 // future if multi-location support is added to the IR, it might be
1755 // preferable to keep tracking both the loaded value and the original
1756 // address in case the alloca can not be elided.
1757 insertDbgValueOrDbgVariableRecordAfter(Builder, DV: LI, DIVar, DIExpr, NewLoc,
1758 Instr: LI->getIterator());
1759}
1760
1761void llvm::ConvertDebugDeclareToDebugValue(DbgVariableRecord *DVR,
1762 StoreInst *SI, DIBuilder &Builder) {
1763 assert(DVR->isAddressOfVariable() || DVR->isDbgAssign());
1764 auto *DIVar = DVR->getVariable();
1765 assert(DIVar && "Missing variable");
1766 auto *DIExpr = DVR->getExpression();
1767 Value *DV = SI->getValueOperand();
1768
1769 DebugLoc NewLoc = getDebugValueLoc(DVR);
1770
1771 // If the alloca describes the variable itself, i.e. the expression in the
1772 // dbg.declare doesn't start with a dereference, we can perform the
1773 // conversion if the value covers the entire fragment of DII.
1774 // If the alloca describes the *address* of DIVar, i.e. DIExpr is
1775 // *just* a DW_OP_deref, we use DV as is for the dbg.value.
1776 // We conservatively ignore other dereferences, because the following two are
1777 // not equivalent:
1778 // dbg.declare(alloca, ..., !Expr(deref, plus_uconstant, 2))
1779 // dbg.value(DV, ..., !Expr(deref, plus_uconstant, 2))
1780 // The former is adding 2 to the address of the variable, whereas the latter
1781 // is adding 2 to the value of the variable. As such, we insist on just a
1782 // deref expression.
1783 bool CanConvert =
1784 DIExpr->isDeref() || (!DIExpr->startsWithDeref() &&
1785 valueCoversEntireFragment(ValTy: DV->getType(), DVR));
1786 if (CanConvert) {
1787 insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc,
1788 Instr: SI->getIterator());
1789 return;
1790 }
1791
1792 // FIXME: If storing to a part of the variable described by the dbg.declare,
1793 // then we want to insert a dbg.value for the corresponding fragment.
1794 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: " << *DVR
1795 << '\n');
1796 assert(UseNewDbgInfoFormat);
1797
1798 // For now, when there is a store to parts of the variable (but we do not
1799 // know which part) we insert an dbg.value intrinsic to indicate that we
1800 // know nothing about the variable's content.
1801 DV = UndefValue::get(T: DV->getType());
1802 ValueAsMetadata *DVAM = ValueAsMetadata::get(V: DV);
1803 DbgVariableRecord *NewDVR =
1804 new DbgVariableRecord(DVAM, DIVar, DIExpr, NewLoc.get());
1805 SI->getParent()->insertDbgRecordBefore(DR: NewDVR, Here: SI->getIterator());
1806}
1807
1808/// Inserts a llvm.dbg.value intrinsic after a phi that has an associated
1809/// llvm.dbg.declare intrinsic.
1810void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
1811 PHINode *APN, DIBuilder &Builder) {
1812 auto *DIVar = DII->getVariable();
1813 auto *DIExpr = DII->getExpression();
1814 assert(DIVar && "Missing variable");
1815
1816 if (PhiHasDebugValue(DIVar, DIExpr, APN))
1817 return;
1818
1819 if (!valueCoversEntireFragment(ValTy: APN->getType(), DII)) {
1820 // FIXME: If only referring to a part of the variable described by the
1821 // dbg.declare, then we want to insert a dbg.value for the corresponding
1822 // fragment.
1823 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "
1824 << *DII << '\n');
1825 return;
1826 }
1827
1828 BasicBlock *BB = APN->getParent();
1829 auto InsertionPt = BB->getFirstInsertionPt();
1830
1831 DebugLoc NewLoc = getDebugValueLoc(DII);
1832
1833 // The block may be a catchswitch block, which does not have a valid
1834 // insertion point.
1835 // FIXME: Insert dbg.value markers in the successors when appropriate.
1836 if (InsertionPt != BB->end()) {
1837 insertDbgValueOrDbgVariableRecord(Builder, DV: APN, DIVar, DIExpr, NewLoc,
1838 Instr: InsertionPt);
1839 }
1840}
1841
1842void llvm::ConvertDebugDeclareToDebugValue(DbgVariableRecord *DVR, LoadInst *LI,
1843 DIBuilder &Builder) {
1844 auto *DIVar = DVR->getVariable();
1845 auto *DIExpr = DVR->getExpression();
1846 assert(DIVar && "Missing variable");
1847
1848 if (!valueCoversEntireFragment(ValTy: LI->getType(), DVR)) {
1849 // FIXME: If only referring to a part of the variable described by the
1850 // dbg.declare, then we want to insert a DbgVariableRecord for the
1851 // corresponding fragment.
1852 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to DbgVariableRecord: "
1853 << *DVR << '\n');
1854 return;
1855 }
1856
1857 DebugLoc NewLoc = getDebugValueLoc(DVR);
1858
1859 // We are now tracking the loaded value instead of the address. In the
1860 // future if multi-location support is added to the IR, it might be
1861 // preferable to keep tracking both the loaded value and the original
1862 // address in case the alloca can not be elided.
1863 assert(UseNewDbgInfoFormat);
1864
1865 // Create a DbgVariableRecord directly and insert.
1866 ValueAsMetadata *LIVAM = ValueAsMetadata::get(V: LI);
1867 DbgVariableRecord *DV =
1868 new DbgVariableRecord(LIVAM, DIVar, DIExpr, NewLoc.get());
1869 LI->getParent()->insertDbgRecordAfter(DR: DV, I: LI);
1870}
1871
1872/// Determine whether this alloca is either a VLA or an array.
1873static bool isArray(AllocaInst *AI) {
1874 return AI->isArrayAllocation() ||
1875 (AI->getAllocatedType() && AI->getAllocatedType()->isArrayTy());
1876}
1877
1878/// Determine whether this alloca is a structure.
1879static bool isStructure(AllocaInst *AI) {
1880 return AI->getAllocatedType() && AI->getAllocatedType()->isStructTy();
1881}
1882void llvm::ConvertDebugDeclareToDebugValue(DbgVariableRecord *DVR, PHINode *APN,
1883 DIBuilder &Builder) {
1884 auto *DIVar = DVR->getVariable();
1885 auto *DIExpr = DVR->getExpression();
1886 assert(DIVar && "Missing variable");
1887
1888 if (PhiHasDebugValue(DIVar, DIExpr, APN))
1889 return;
1890
1891 if (!valueCoversEntireFragment(ValTy: APN->getType(), DVR)) {
1892 // FIXME: If only referring to a part of the variable described by the
1893 // dbg.declare, then we want to insert a DbgVariableRecord for the
1894 // corresponding fragment.
1895 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to DbgVariableRecord: "
1896 << *DVR << '\n');
1897 return;
1898 }
1899
1900 BasicBlock *BB = APN->getParent();
1901 auto InsertionPt = BB->getFirstInsertionPt();
1902
1903 DebugLoc NewLoc = getDebugValueLoc(DVR);
1904
1905 // The block may be a catchswitch block, which does not have a valid
1906 // insertion point.
1907 // FIXME: Insert DbgVariableRecord markers in the successors when appropriate.
1908 if (InsertionPt != BB->end()) {
1909 insertDbgValueOrDbgVariableRecord(Builder, DV: APN, DIVar, DIExpr, NewLoc,
1910 Instr: InsertionPt);
1911 }
1912}
1913
1914/// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set
1915/// of llvm.dbg.value intrinsics.
1916bool llvm::LowerDbgDeclare(Function &F) {
1917 bool Changed = false;
1918 DIBuilder DIB(*F.getParent(), /*AllowUnresolved*/ false);
1919 SmallVector<DbgDeclareInst *, 4> Dbgs;
1920 SmallVector<DbgVariableRecord *> DVRs;
1921 for (auto &FI : F) {
1922 for (Instruction &BI : FI) {
1923 if (auto *DDI = dyn_cast<DbgDeclareInst>(Val: &BI))
1924 Dbgs.push_back(Elt: DDI);
1925 for (DbgVariableRecord &DVR : filterDbgVars(R: BI.getDbgRecordRange())) {
1926 if (DVR.getType() == DbgVariableRecord::LocationType::Declare)
1927 DVRs.push_back(Elt: &DVR);
1928 }
1929 }
1930 }
1931
1932 if (Dbgs.empty() && DVRs.empty())
1933 return Changed;
1934
1935 auto LowerOne = [&](auto *DDI) {
1936 AllocaInst *AI =
1937 dyn_cast_or_null<AllocaInst>(DDI->getVariableLocationOp(0));
1938 // If this is an alloca for a scalar variable, insert a dbg.value
1939 // at each load and store to the alloca and erase the dbg.declare.
1940 // The dbg.values allow tracking a variable even if it is not
1941 // stored on the stack, while the dbg.declare can only describe
1942 // the stack slot (and at a lexical-scope granularity). Later
1943 // passes will attempt to elide the stack slot.
1944 if (!AI || isArray(AI) || isStructure(AI))
1945 return;
1946
1947 // A volatile load/store means that the alloca can't be elided anyway.
1948 if (llvm::any_of(AI->users(), [](User *U) -> bool {
1949 if (LoadInst *LI = dyn_cast<LoadInst>(Val: U))
1950 return LI->isVolatile();
1951 if (StoreInst *SI = dyn_cast<StoreInst>(Val: U))
1952 return SI->isVolatile();
1953 return false;
1954 }))
1955 return;
1956
1957 SmallVector<const Value *, 8> WorkList;
1958 WorkList.push_back(Elt: AI);
1959 while (!WorkList.empty()) {
1960 const Value *V = WorkList.pop_back_val();
1961 for (const auto &AIUse : V->uses()) {
1962 User *U = AIUse.getUser();
1963 if (StoreInst *SI = dyn_cast<StoreInst>(Val: U)) {
1964 if (AIUse.getOperandNo() == 1)
1965 ConvertDebugDeclareToDebugValue(DDI, SI, DIB);
1966 } else if (LoadInst *LI = dyn_cast<LoadInst>(Val: U)) {
1967 ConvertDebugDeclareToDebugValue(DDI, LI, DIB);
1968 } else if (CallInst *CI = dyn_cast<CallInst>(Val: U)) {
1969 // This is a call by-value or some other instruction that takes a
1970 // pointer to the variable. Insert a *value* intrinsic that describes
1971 // the variable by dereferencing the alloca.
1972 if (!CI->isLifetimeStartOrEnd()) {
1973 DebugLoc NewLoc = getDebugValueLoc(DDI);
1974 auto *DerefExpr =
1975 DIExpression::append(Expr: DDI->getExpression(), Ops: dwarf::DW_OP_deref);
1976 insertDbgValueOrDbgVariableRecord(DIB, AI, DDI->getVariable(),
1977 DerefExpr, NewLoc,
1978 CI->getIterator());
1979 }
1980 } else if (BitCastInst *BI = dyn_cast<BitCastInst>(Val: U)) {
1981 if (BI->getType()->isPointerTy())
1982 WorkList.push_back(Elt: BI);
1983 }
1984 }
1985 }
1986 DDI->eraseFromParent();
1987 Changed = true;
1988 };
1989
1990 for_each(Range&: Dbgs, F: LowerOne);
1991 for_each(Range&: DVRs, F: LowerOne);
1992
1993 if (Changed)
1994 for (BasicBlock &BB : F)
1995 RemoveRedundantDbgInstrs(BB: &BB);
1996
1997 return Changed;
1998}
1999
2000// RemoveDIs: re-implementation of insertDebugValuesForPHIs, but which pulls the
2001// debug-info out of the block's DbgVariableRecords rather than dbg.value
2002// intrinsics.
2003static void
2004insertDbgVariableRecordsForPHIs(BasicBlock *BB,
2005 SmallVectorImpl<PHINode *> &InsertedPHIs) {
2006 assert(BB && "No BasicBlock to clone DbgVariableRecord(s) from.");
2007 if (InsertedPHIs.size() == 0)
2008 return;
2009
2010 // Map existing PHI nodes to their DbgVariableRecords.
2011 DenseMap<Value *, DbgVariableRecord *> DbgValueMap;
2012 for (auto &I : *BB) {
2013 for (DbgVariableRecord &DVR : filterDbgVars(R: I.getDbgRecordRange())) {
2014 for (Value *V : DVR.location_ops())
2015 if (auto *Loc = dyn_cast_or_null<PHINode>(Val: V))
2016 DbgValueMap.insert(KV: {Loc, &DVR});
2017 }
2018 }
2019 if (DbgValueMap.size() == 0)
2020 return;
2021
2022 // Map a pair of the destination BB and old DbgVariableRecord to the new
2023 // DbgVariableRecord, so that if a DbgVariableRecord is being rewritten to use
2024 // more than one of the inserted PHIs in the same destination BB, we can
2025 // update the same DbgVariableRecord with all the new PHIs instead of creating
2026 // one copy for each.
2027 MapVector<std::pair<BasicBlock *, DbgVariableRecord *>, DbgVariableRecord *>
2028 NewDbgValueMap;
2029 // Then iterate through the new PHIs and look to see if they use one of the
2030 // previously mapped PHIs. If so, create a new DbgVariableRecord that will
2031 // propagate the info through the new PHI. If we use more than one new PHI in
2032 // a single destination BB with the same old dbg.value, merge the updates so
2033 // that we get a single new DbgVariableRecord with all the new PHIs.
2034 for (auto PHI : InsertedPHIs) {
2035 BasicBlock *Parent = PHI->getParent();
2036 // Avoid inserting a debug-info record into an EH block.
2037 if (Parent->getFirstNonPHI()->isEHPad())
2038 continue;
2039 for (auto VI : PHI->operand_values()) {
2040 auto V = DbgValueMap.find(Val: VI);
2041 if (V != DbgValueMap.end()) {
2042 DbgVariableRecord *DbgII = cast<DbgVariableRecord>(Val: V->second);
2043 auto NewDI = NewDbgValueMap.find(Key: {Parent, DbgII});
2044 if (NewDI == NewDbgValueMap.end()) {
2045 DbgVariableRecord *NewDbgII = DbgII->clone();
2046 NewDI = NewDbgValueMap.insert(KV: {{Parent, DbgII}, NewDbgII}).first;
2047 }
2048 DbgVariableRecord *NewDbgII = NewDI->second;
2049 // If PHI contains VI as an operand more than once, we may
2050 // replaced it in NewDbgII; confirm that it is present.
2051 if (is_contained(Range: NewDbgII->location_ops(), Element: VI))
2052 NewDbgII->replaceVariableLocationOp(OldValue: VI, NewValue: PHI);
2053 }
2054 }
2055 }
2056 // Insert the new DbgVariableRecords into their destination blocks.
2057 for (auto DI : NewDbgValueMap) {
2058 BasicBlock *Parent = DI.first.first;
2059 DbgVariableRecord *NewDbgII = DI.second;
2060 auto InsertionPt = Parent->getFirstInsertionPt();
2061 assert(InsertionPt != Parent->end() && "Ill-formed basic block");
2062
2063 Parent->insertDbgRecordBefore(DR: NewDbgII, Here: InsertionPt);
2064 }
2065}
2066
2067/// Propagate dbg.value intrinsics through the newly inserted PHIs.
2068void llvm::insertDebugValuesForPHIs(BasicBlock *BB,
2069 SmallVectorImpl<PHINode *> &InsertedPHIs) {
2070 assert(BB && "No BasicBlock to clone dbg.value(s) from.");
2071 if (InsertedPHIs.size() == 0)
2072 return;
2073
2074 insertDbgVariableRecordsForPHIs(BB, InsertedPHIs);
2075
2076 // Map existing PHI nodes to their dbg.values.
2077 ValueToValueMapTy DbgValueMap;
2078 for (auto &I : *BB) {
2079 if (auto DbgII = dyn_cast<DbgVariableIntrinsic>(Val: &I)) {
2080 for (Value *V : DbgII->location_ops())
2081 if (auto *Loc = dyn_cast_or_null<PHINode>(Val: V))
2082 DbgValueMap.insert(KV: {Loc, DbgII});
2083 }
2084 }
2085 if (DbgValueMap.size() == 0)
2086 return;
2087
2088 // Map a pair of the destination BB and old dbg.value to the new dbg.value,
2089 // so that if a dbg.value is being rewritten to use more than one of the
2090 // inserted PHIs in the same destination BB, we can update the same dbg.value
2091 // with all the new PHIs instead of creating one copy for each.
2092 MapVector<std::pair<BasicBlock *, DbgVariableIntrinsic *>,
2093 DbgVariableIntrinsic *>
2094 NewDbgValueMap;
2095 // Then iterate through the new PHIs and look to see if they use one of the
2096 // previously mapped PHIs. If so, create a new dbg.value intrinsic that will
2097 // propagate the info through the new PHI. If we use more than one new PHI in
2098 // a single destination BB with the same old dbg.value, merge the updates so
2099 // that we get a single new dbg.value with all the new PHIs.
2100 for (auto *PHI : InsertedPHIs) {
2101 BasicBlock *Parent = PHI->getParent();
2102 // Avoid inserting an intrinsic into an EH block.
2103 if (Parent->getFirstNonPHI()->isEHPad())
2104 continue;
2105 for (auto *VI : PHI->operand_values()) {
2106 auto V = DbgValueMap.find(Val: VI);
2107 if (V != DbgValueMap.end()) {
2108 auto *DbgII = cast<DbgVariableIntrinsic>(Val&: V->second);
2109 auto NewDI = NewDbgValueMap.find(Key: {Parent, DbgII});
2110 if (NewDI == NewDbgValueMap.end()) {
2111 auto *NewDbgII = cast<DbgVariableIntrinsic>(Val: DbgII->clone());
2112 NewDI = NewDbgValueMap.insert(KV: {{Parent, DbgII}, NewDbgII}).first;
2113 }
2114 DbgVariableIntrinsic *NewDbgII = NewDI->second;
2115 // If PHI contains VI as an operand more than once, we may
2116 // replaced it in NewDbgII; confirm that it is present.
2117 if (is_contained(Range: NewDbgII->location_ops(), Element: VI))
2118 NewDbgII->replaceVariableLocationOp(OldValue: VI, NewValue: PHI);
2119 }
2120 }
2121 }
2122 // Insert thew new dbg.values into their destination blocks.
2123 for (auto DI : NewDbgValueMap) {
2124 BasicBlock *Parent = DI.first.first;
2125 auto *NewDbgII = DI.second;
2126 auto InsertionPt = Parent->getFirstInsertionPt();
2127 assert(InsertionPt != Parent->end() && "Ill-formed basic block");
2128 NewDbgII->insertBefore(InsertPos: &*InsertionPt);
2129 }
2130}
2131
2132bool llvm::replaceDbgDeclare(Value *Address, Value *NewAddress,
2133 DIBuilder &Builder, uint8_t DIExprFlags,
2134 int Offset) {
2135 TinyPtrVector<DbgDeclareInst *> DbgDeclares = findDbgDeclares(V: Address);
2136 TinyPtrVector<DbgVariableRecord *> DVRDeclares = findDVRDeclares(V: Address);
2137
2138 auto ReplaceOne = [&](auto *DII) {
2139 assert(DII->getVariable() && "Missing variable");
2140 auto *DIExpr = DII->getExpression();
2141 DIExpr = DIExpression::prepend(Expr: DIExpr, Flags: DIExprFlags, Offset);
2142 DII->setExpression(DIExpr);
2143 DII->replaceVariableLocationOp(Address, NewAddress);
2144 };
2145
2146 for_each(Range&: DbgDeclares, F: ReplaceOne);
2147 for_each(Range&: DVRDeclares, F: ReplaceOne);
2148
2149 return !DbgDeclares.empty() || !DVRDeclares.empty();
2150}
2151
2152static void updateOneDbgValueForAlloca(const DebugLoc &Loc,
2153 DILocalVariable *DIVar,
2154 DIExpression *DIExpr, Value *NewAddress,
2155 DbgValueInst *DVI,
2156 DbgVariableRecord *DVR,
2157 DIBuilder &Builder, int Offset) {
2158 assert(DIVar && "Missing variable");
2159
2160 // This is an alloca-based dbg.value/DbgVariableRecord. The first thing it
2161 // should do with the alloca pointer is dereference it. Otherwise we don't
2162 // know how to handle it and give up.
2163 if (!DIExpr || DIExpr->getNumElements() < 1 ||
2164 DIExpr->getElement(I: 0) != dwarf::DW_OP_deref)
2165 return;
2166
2167 // Insert the offset before the first deref.
2168 if (Offset)
2169 DIExpr = DIExpression::prepend(Expr: DIExpr, Flags: 0, Offset);
2170
2171 if (DVI) {
2172 DVI->setExpression(DIExpr);
2173 DVI->replaceVariableLocationOp(OpIdx: 0u, NewValue: NewAddress);
2174 } else {
2175 assert(DVR);
2176 DVR->setExpression(DIExpr);
2177 DVR->replaceVariableLocationOp(OpIdx: 0u, NewValue: NewAddress);
2178 }
2179}
2180
2181void llvm::replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
2182 DIBuilder &Builder, int Offset) {
2183 SmallVector<DbgValueInst *, 1> DbgUsers;
2184 SmallVector<DbgVariableRecord *, 1> DPUsers;
2185 findDbgValues(DbgValues&: DbgUsers, V: AI, DbgVariableRecords: &DPUsers);
2186
2187 // Attempt to replace dbg.values that use this alloca.
2188 for (auto *DVI : DbgUsers)
2189 updateOneDbgValueForAlloca(Loc: DVI->getDebugLoc(), DIVar: DVI->getVariable(),
2190 DIExpr: DVI->getExpression(), NewAddress: NewAllocaAddress, DVI,
2191 DVR: nullptr, Builder, Offset);
2192
2193 // Replace any DbgVariableRecords that use this alloca.
2194 for (DbgVariableRecord *DVR : DPUsers)
2195 updateOneDbgValueForAlloca(Loc: DVR->getDebugLoc(), DIVar: DVR->getVariable(),
2196 DIExpr: DVR->getExpression(), NewAddress: NewAllocaAddress, DVI: nullptr,
2197 DVR, Builder, Offset);
2198}
2199
2200/// Where possible to salvage debug information for \p I do so.
2201/// If not possible mark undef.
2202void llvm::salvageDebugInfo(Instruction &I) {
2203 SmallVector<DbgVariableIntrinsic *, 1> DbgUsers;
2204 SmallVector<DbgVariableRecord *, 1> DPUsers;
2205 findDbgUsers(DbgInsts&: DbgUsers, V: &I, DbgVariableRecords: &DPUsers);
2206 salvageDebugInfoForDbgValues(I, Insns: DbgUsers, DPInsns: DPUsers);
2207}
2208
2209template <typename T> static void salvageDbgAssignAddress(T *Assign) {
2210 Instruction *I = dyn_cast<Instruction>(Assign->getAddress());
2211 // Only instructions can be salvaged at the moment.
2212 if (!I)
2213 return;
2214
2215 assert(!Assign->getAddressExpression()->getFragmentInfo().has_value() &&
2216 "address-expression shouldn't have fragment info");
2217
2218 // The address component of a dbg.assign cannot be variadic.
2219 uint64_t CurrentLocOps = 0;
2220 SmallVector<Value *, 4> AdditionalValues;
2221 SmallVector<uint64_t, 16> Ops;
2222 Value *NewV = salvageDebugInfoImpl(I&: *I, CurrentLocOps, Ops, AdditionalValues);
2223
2224 // Check if the salvage failed.
2225 if (!NewV)
2226 return;
2227
2228 DIExpression *SalvagedExpr = DIExpression::appendOpsToArg(
2229 Expr: Assign->getAddressExpression(), Ops, ArgNo: 0, /*StackValue=*/false);
2230 assert(!SalvagedExpr->getFragmentInfo().has_value() &&
2231 "address-expression shouldn't have fragment info");
2232
2233 // Salvage succeeds if no additional values are required.
2234 if (AdditionalValues.empty()) {
2235 Assign->setAddress(NewV);
2236 Assign->setAddressExpression(SalvagedExpr);
2237 } else {
2238 Assign->setKillAddress();
2239 }
2240}
2241
2242void llvm::salvageDebugInfoForDbgValues(
2243 Instruction &I, ArrayRef<DbgVariableIntrinsic *> DbgUsers,
2244 ArrayRef<DbgVariableRecord *> DPUsers) {
2245 // These are arbitrary chosen limits on the maximum number of values and the
2246 // maximum size of a debug expression we can salvage up to, used for
2247 // performance reasons.
2248 const unsigned MaxDebugArgs = 16;
2249 const unsigned MaxExpressionSize = 128;
2250 bool Salvaged = false;
2251
2252 for (auto *DII : DbgUsers) {
2253 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(Val: DII)) {
2254 if (DAI->getAddress() == &I) {
2255 salvageDbgAssignAddress(Assign: DAI);
2256 Salvaged = true;
2257 }
2258 if (DAI->getValue() != &I)
2259 continue;
2260 }
2261
2262 // Do not add DW_OP_stack_value for DbgDeclare, because they are implicitly
2263 // pointing out the value as a DWARF memory location description.
2264 bool StackValue = isa<DbgValueInst>(Val: DII);
2265 auto DIILocation = DII->location_ops();
2266 assert(
2267 is_contained(DIILocation, &I) &&
2268 "DbgVariableIntrinsic must use salvaged instruction as its location");
2269 SmallVector<Value *, 4> AdditionalValues;
2270 // `I` may appear more than once in DII's location ops, and each use of `I`
2271 // must be updated in the DIExpression and potentially have additional
2272 // values added; thus we call salvageDebugInfoImpl for each `I` instance in
2273 // DIILocation.
2274 Value *Op0 = nullptr;
2275 DIExpression *SalvagedExpr = DII->getExpression();
2276 auto LocItr = find(Range&: DIILocation, Val: &I);
2277 while (SalvagedExpr && LocItr != DIILocation.end()) {
2278 SmallVector<uint64_t, 16> Ops;
2279 unsigned LocNo = std::distance(first: DIILocation.begin(), last: LocItr);
2280 uint64_t CurrentLocOps = SalvagedExpr->getNumLocationOperands();
2281 Op0 = salvageDebugInfoImpl(I, CurrentLocOps, Ops, AdditionalValues);
2282 if (!Op0)
2283 break;
2284 SalvagedExpr =
2285 DIExpression::appendOpsToArg(Expr: SalvagedExpr, Ops, ArgNo: LocNo, StackValue);
2286 LocItr = std::find(first: ++LocItr, last: DIILocation.end(), val: &I);
2287 }
2288 // salvageDebugInfoImpl should fail on examining the first element of
2289 // DbgUsers, or none of them.
2290 if (!Op0)
2291 break;
2292
2293 DII->replaceVariableLocationOp(OldValue: &I, NewValue: Op0);
2294 bool IsValidSalvageExpr = SalvagedExpr->getNumElements() <= MaxExpressionSize;
2295 if (AdditionalValues.empty() && IsValidSalvageExpr) {
2296 DII->setExpression(SalvagedExpr);
2297 } else if (isa<DbgValueInst>(Val: DII) && IsValidSalvageExpr &&
2298 DII->getNumVariableLocationOps() + AdditionalValues.size() <=
2299 MaxDebugArgs) {
2300 DII->addVariableLocationOps(NewValues: AdditionalValues, NewExpr: SalvagedExpr);
2301 } else {
2302 // Do not salvage using DIArgList for dbg.declare, as it is not currently
2303 // supported in those instructions. Also do not salvage if the resulting
2304 // DIArgList would contain an unreasonably large number of values.
2305 DII->setKillLocation();
2306 }
2307 LLVM_DEBUG(dbgs() << "SALVAGE: " << *DII << '\n');
2308 Salvaged = true;
2309 }
2310 // Duplicate of above block for DbgVariableRecords.
2311 for (auto *DVR : DPUsers) {
2312 if (DVR->isDbgAssign()) {
2313 if (DVR->getAddress() == &I) {
2314 salvageDbgAssignAddress(Assign: DVR);
2315 Salvaged = true;
2316 }
2317 if (DVR->getValue() != &I)
2318 continue;
2319 }
2320
2321 // Do not add DW_OP_stack_value for DbgDeclare and DbgAddr, because they
2322 // are implicitly pointing out the value as a DWARF memory location
2323 // description.
2324 bool StackValue =
2325 DVR->getType() != DbgVariableRecord::LocationType::Declare;
2326 auto DVRLocation = DVR->location_ops();
2327 assert(
2328 is_contained(DVRLocation, &I) &&
2329 "DbgVariableIntrinsic must use salvaged instruction as its location");
2330 SmallVector<Value *, 4> AdditionalValues;
2331 // 'I' may appear more than once in DVR's location ops, and each use of 'I'
2332 // must be updated in the DIExpression and potentially have additional
2333 // values added; thus we call salvageDebugInfoImpl for each 'I' instance in
2334 // DVRLocation.
2335 Value *Op0 = nullptr;
2336 DIExpression *SalvagedExpr = DVR->getExpression();
2337 auto LocItr = find(Range&: DVRLocation, Val: &I);
2338 while (SalvagedExpr && LocItr != DVRLocation.end()) {
2339 SmallVector<uint64_t, 16> Ops;
2340 unsigned LocNo = std::distance(first: DVRLocation.begin(), last: LocItr);
2341 uint64_t CurrentLocOps = SalvagedExpr->getNumLocationOperands();
2342 Op0 = salvageDebugInfoImpl(I, CurrentLocOps, Ops, AdditionalValues);
2343 if (!Op0)
2344 break;
2345 SalvagedExpr =
2346 DIExpression::appendOpsToArg(Expr: SalvagedExpr, Ops, ArgNo: LocNo, StackValue);
2347 LocItr = std::find(first: ++LocItr, last: DVRLocation.end(), val: &I);
2348 }
2349 // salvageDebugInfoImpl should fail on examining the first element of
2350 // DbgUsers, or none of them.
2351 if (!Op0)
2352 break;
2353
2354 DVR->replaceVariableLocationOp(OldValue: &I, NewValue: Op0);
2355 bool IsValidSalvageExpr =
2356 SalvagedExpr->getNumElements() <= MaxExpressionSize;
2357 if (AdditionalValues.empty() && IsValidSalvageExpr) {
2358 DVR->setExpression(SalvagedExpr);
2359 } else if (DVR->getType() != DbgVariableRecord::LocationType::Declare &&
2360 IsValidSalvageExpr &&
2361 DVR->getNumVariableLocationOps() + AdditionalValues.size() <=
2362 MaxDebugArgs) {
2363 DVR->addVariableLocationOps(NewValues: AdditionalValues, NewExpr: SalvagedExpr);
2364 } else {
2365 // Do not salvage using DIArgList for dbg.addr/dbg.declare, as it is
2366 // currently only valid for stack value expressions.
2367 // Also do not salvage if the resulting DIArgList would contain an
2368 // unreasonably large number of values.
2369 DVR->setKillLocation();
2370 }
2371 LLVM_DEBUG(dbgs() << "SALVAGE: " << DVR << '\n');
2372 Salvaged = true;
2373 }
2374
2375 if (Salvaged)
2376 return;
2377
2378 for (auto *DII : DbgUsers)
2379 DII->setKillLocation();
2380
2381 for (auto *DVR : DPUsers)
2382 DVR->setKillLocation();
2383}
2384
2385Value *getSalvageOpsForGEP(GetElementPtrInst *GEP, const DataLayout &DL,
2386 uint64_t CurrentLocOps,
2387 SmallVectorImpl<uint64_t> &Opcodes,
2388 SmallVectorImpl<Value *> &AdditionalValues) {
2389 unsigned BitWidth = DL.getIndexSizeInBits(AS: GEP->getPointerAddressSpace());
2390 // Rewrite a GEP into a DIExpression.
2391 MapVector<Value *, APInt> VariableOffsets;
2392 APInt ConstantOffset(BitWidth, 0);
2393 if (!GEP->collectOffset(DL, BitWidth, VariableOffsets, ConstantOffset))
2394 return nullptr;
2395 if (!VariableOffsets.empty() && !CurrentLocOps) {
2396 Opcodes.insert(I: Opcodes.begin(), IL: {dwarf::DW_OP_LLVM_arg, 0});
2397 CurrentLocOps = 1;
2398 }
2399 for (const auto &Offset : VariableOffsets) {
2400 AdditionalValues.push_back(Elt: Offset.first);
2401 assert(Offset.second.isStrictlyPositive() &&
2402 "Expected strictly positive multiplier for offset.");
2403 Opcodes.append(IL: {dwarf::DW_OP_LLVM_arg, CurrentLocOps++, dwarf::DW_OP_constu,
2404 Offset.second.getZExtValue(), dwarf::DW_OP_mul,
2405 dwarf::DW_OP_plus});
2406 }
2407 DIExpression::appendOffset(Ops&: Opcodes, Offset: ConstantOffset.getSExtValue());
2408 return GEP->getOperand(i_nocapture: 0);
2409}
2410
2411uint64_t getDwarfOpForBinOp(Instruction::BinaryOps Opcode) {
2412 switch (Opcode) {
2413 case Instruction::Add:
2414 return dwarf::DW_OP_plus;
2415 case Instruction::Sub:
2416 return dwarf::DW_OP_minus;
2417 case Instruction::Mul:
2418 return dwarf::DW_OP_mul;
2419 case Instruction::SDiv:
2420 return dwarf::DW_OP_div;
2421 case Instruction::SRem:
2422 return dwarf::DW_OP_mod;
2423 case Instruction::Or:
2424 return dwarf::DW_OP_or;
2425 case Instruction::And:
2426 return dwarf::DW_OP_and;
2427 case Instruction::Xor:
2428 return dwarf::DW_OP_xor;
2429 case Instruction::Shl:
2430 return dwarf::DW_OP_shl;
2431 case Instruction::LShr:
2432 return dwarf::DW_OP_shr;
2433 case Instruction::AShr:
2434 return dwarf::DW_OP_shra;
2435 default:
2436 // TODO: Salvage from each kind of binop we know about.
2437 return 0;
2438 }
2439}
2440
2441static void handleSSAValueOperands(uint64_t CurrentLocOps,
2442 SmallVectorImpl<uint64_t> &Opcodes,
2443 SmallVectorImpl<Value *> &AdditionalValues,
2444 Instruction *I) {
2445 if (!CurrentLocOps) {
2446 Opcodes.append(IL: {dwarf::DW_OP_LLVM_arg, 0});
2447 CurrentLocOps = 1;
2448 }
2449 Opcodes.append(IL: {dwarf::DW_OP_LLVM_arg, CurrentLocOps});
2450 AdditionalValues.push_back(Elt: I->getOperand(i: 1));
2451}
2452
2453Value *getSalvageOpsForBinOp(BinaryOperator *BI, uint64_t CurrentLocOps,
2454 SmallVectorImpl<uint64_t> &Opcodes,
2455 SmallVectorImpl<Value *> &AdditionalValues) {
2456 // Handle binary operations with constant integer operands as a special case.
2457 auto *ConstInt = dyn_cast<ConstantInt>(Val: BI->getOperand(i_nocapture: 1));
2458 // Values wider than 64 bits cannot be represented within a DIExpression.
2459 if (ConstInt && ConstInt->getBitWidth() > 64)
2460 return nullptr;
2461
2462 Instruction::BinaryOps BinOpcode = BI->getOpcode();
2463 // Push any Constant Int operand onto the expression stack.
2464 if (ConstInt) {
2465 uint64_t Val = ConstInt->getSExtValue();
2466 // Add or Sub Instructions with a constant operand can potentially be
2467 // simplified.
2468 if (BinOpcode == Instruction::Add || BinOpcode == Instruction::Sub) {
2469 uint64_t Offset = BinOpcode == Instruction::Add ? Val : -int64_t(Val);
2470 DIExpression::appendOffset(Ops&: Opcodes, Offset);
2471 return BI->getOperand(i_nocapture: 0);
2472 }
2473 Opcodes.append(IL: {dwarf::DW_OP_constu, Val});
2474 } else {
2475 handleSSAValueOperands(CurrentLocOps, Opcodes, AdditionalValues, I: BI);
2476 }
2477
2478 // Add salvaged binary operator to expression stack, if it has a valid
2479 // representation in a DIExpression.
2480 uint64_t DwarfBinOp = getDwarfOpForBinOp(Opcode: BinOpcode);
2481 if (!DwarfBinOp)
2482 return nullptr;
2483 Opcodes.push_back(Elt: DwarfBinOp);
2484 return BI->getOperand(i_nocapture: 0);
2485}
2486
2487uint64_t getDwarfOpForIcmpPred(CmpInst::Predicate Pred) {
2488 // The signedness of the operation is implicit in the typed stack, signed and
2489 // unsigned instructions map to the same DWARF opcode.
2490 switch (Pred) {
2491 case CmpInst::ICMP_EQ:
2492 return dwarf::DW_OP_eq;
2493 case CmpInst::ICMP_NE:
2494 return dwarf::DW_OP_ne;
2495 case CmpInst::ICMP_UGT:
2496 case CmpInst::ICMP_SGT:
2497 return dwarf::DW_OP_gt;
2498 case CmpInst::ICMP_UGE:
2499 case CmpInst::ICMP_SGE:
2500 return dwarf::DW_OP_ge;
2501 case CmpInst::ICMP_ULT:
2502 case CmpInst::ICMP_SLT:
2503 return dwarf::DW_OP_lt;
2504 case CmpInst::ICMP_ULE:
2505 case CmpInst::ICMP_SLE:
2506 return dwarf::DW_OP_le;
2507 default:
2508 return 0;
2509 }
2510}
2511
2512Value *getSalvageOpsForIcmpOp(ICmpInst *Icmp, uint64_t CurrentLocOps,
2513 SmallVectorImpl<uint64_t> &Opcodes,
2514 SmallVectorImpl<Value *> &AdditionalValues) {
2515 // Handle icmp operations with constant integer operands as a special case.
2516 auto *ConstInt = dyn_cast<ConstantInt>(Val: Icmp->getOperand(i_nocapture: 1));
2517 // Values wider than 64 bits cannot be represented within a DIExpression.
2518 if (ConstInt && ConstInt->getBitWidth() > 64)
2519 return nullptr;
2520 // Push any Constant Int operand onto the expression stack.
2521 if (ConstInt) {
2522 if (Icmp->isSigned())
2523 Opcodes.push_back(Elt: dwarf::DW_OP_consts);
2524 else
2525 Opcodes.push_back(Elt: dwarf::DW_OP_constu);
2526 uint64_t Val = ConstInt->getSExtValue();
2527 Opcodes.push_back(Elt: Val);
2528 } else {
2529 handleSSAValueOperands(CurrentLocOps, Opcodes, AdditionalValues, I: Icmp);
2530 }
2531
2532 // Add salvaged binary operator to expression stack, if it has a valid
2533 // representation in a DIExpression.
2534 uint64_t DwarfIcmpOp = getDwarfOpForIcmpPred(Pred: Icmp->getPredicate());
2535 if (!DwarfIcmpOp)
2536 return nullptr;
2537 Opcodes.push_back(Elt: DwarfIcmpOp);
2538 return Icmp->getOperand(i_nocapture: 0);
2539}
2540
2541Value *llvm::salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps,
2542 SmallVectorImpl<uint64_t> &Ops,
2543 SmallVectorImpl<Value *> &AdditionalValues) {
2544 auto &M = *I.getModule();
2545 auto &DL = M.getDataLayout();
2546
2547 if (auto *CI = dyn_cast<CastInst>(Val: &I)) {
2548 Value *FromValue = CI->getOperand(i_nocapture: 0);
2549 // No-op casts are irrelevant for debug info.
2550 if (CI->isNoopCast(DL)) {
2551 return FromValue;
2552 }
2553
2554 Type *Type = CI->getType();
2555 if (Type->isPointerTy())
2556 Type = DL.getIntPtrType(Type);
2557 // Casts other than Trunc, SExt, or ZExt to scalar types cannot be salvaged.
2558 if (Type->isVectorTy() ||
2559 !(isa<TruncInst>(Val: &I) || isa<SExtInst>(Val: &I) || isa<ZExtInst>(Val: &I) ||
2560 isa<IntToPtrInst>(Val: &I) || isa<PtrToIntInst>(Val: &I)))
2561 return nullptr;
2562
2563 llvm::Type *FromType = FromValue->getType();
2564 if (FromType->isPointerTy())
2565 FromType = DL.getIntPtrType(FromType);
2566
2567 unsigned FromTypeBitSize = FromType->getScalarSizeInBits();
2568 unsigned ToTypeBitSize = Type->getScalarSizeInBits();
2569
2570 auto ExtOps = DIExpression::getExtOps(FromSize: FromTypeBitSize, ToSize: ToTypeBitSize,
2571 Signed: isa<SExtInst>(Val: &I));
2572 Ops.append(in_start: ExtOps.begin(), in_end: ExtOps.end());
2573 return FromValue;
2574 }
2575
2576 if (auto *GEP = dyn_cast<GetElementPtrInst>(Val: &I))
2577 return getSalvageOpsForGEP(GEP, DL, CurrentLocOps, Opcodes&: Ops, AdditionalValues);
2578 if (auto *BI = dyn_cast<BinaryOperator>(Val: &I))
2579 return getSalvageOpsForBinOp(BI, CurrentLocOps, Opcodes&: Ops, AdditionalValues);
2580 if (auto *IC = dyn_cast<ICmpInst>(Val: &I))
2581 return getSalvageOpsForIcmpOp(Icmp: IC, CurrentLocOps, Opcodes&: Ops, AdditionalValues);
2582
2583 // *Not* to do: we should not attempt to salvage load instructions,
2584 // because the validity and lifetime of a dbg.value containing
2585 // DW_OP_deref becomes difficult to analyze. See PR40628 for examples.
2586 return nullptr;
2587}
2588
2589/// A replacement for a dbg.value expression.
2590using DbgValReplacement = std::optional<DIExpression *>;
2591
2592/// Point debug users of \p From to \p To using exprs given by \p RewriteExpr,
2593/// possibly moving/undefing users to prevent use-before-def. Returns true if
2594/// changes are made.
2595static bool rewriteDebugUsers(
2596 Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT,
2597 function_ref<DbgValReplacement(DbgVariableIntrinsic &DII)> RewriteExpr,
2598 function_ref<DbgValReplacement(DbgVariableRecord &DVR)> RewriteDVRExpr) {
2599 // Find debug users of From.
2600 SmallVector<DbgVariableIntrinsic *, 1> Users;
2601 SmallVector<DbgVariableRecord *, 1> DPUsers;
2602 findDbgUsers(DbgInsts&: Users, V: &From, DbgVariableRecords: &DPUsers);
2603 if (Users.empty() && DPUsers.empty())
2604 return false;
2605
2606 // Prevent use-before-def of To.
2607 bool Changed = false;
2608
2609 SmallPtrSet<DbgVariableIntrinsic *, 1> UndefOrSalvage;
2610 SmallPtrSet<DbgVariableRecord *, 1> UndefOrSalvageDVR;
2611 if (isa<Instruction>(Val: &To)) {
2612 bool DomPointAfterFrom = From.getNextNonDebugInstruction() == &DomPoint;
2613
2614 for (auto *DII : Users) {
2615 // It's common to see a debug user between From and DomPoint. Move it
2616 // after DomPoint to preserve the variable update without any reordering.
2617 if (DomPointAfterFrom && DII->getNextNonDebugInstruction() == &DomPoint) {
2618 LLVM_DEBUG(dbgs() << "MOVE: " << *DII << '\n');
2619 DII->moveAfter(MovePos: &DomPoint);
2620 Changed = true;
2621
2622 // Users which otherwise aren't dominated by the replacement value must
2623 // be salvaged or deleted.
2624 } else if (!DT.dominates(Def: &DomPoint, User: DII)) {
2625 UndefOrSalvage.insert(Ptr: DII);
2626 }
2627 }
2628
2629 // DbgVariableRecord implementation of the above.
2630 for (auto *DVR : DPUsers) {
2631 Instruction *MarkedInstr = DVR->getMarker()->MarkedInstr;
2632 Instruction *NextNonDebug = MarkedInstr;
2633 // The next instruction might still be a dbg.declare, skip over it.
2634 if (isa<DbgVariableIntrinsic>(Val: NextNonDebug))
2635 NextNonDebug = NextNonDebug->getNextNonDebugInstruction();
2636
2637 if (DomPointAfterFrom && NextNonDebug == &DomPoint) {
2638 LLVM_DEBUG(dbgs() << "MOVE: " << *DVR << '\n');
2639 DVR->removeFromParent();
2640 // Ensure there's a marker.
2641 DomPoint.getParent()->insertDbgRecordAfter(DR: DVR, I: &DomPoint);
2642 Changed = true;
2643 } else if (!DT.dominates(Def: &DomPoint, User: MarkedInstr)) {
2644 UndefOrSalvageDVR.insert(Ptr: DVR);
2645 }
2646 }
2647 }
2648
2649 // Update debug users without use-before-def risk.
2650 for (auto *DII : Users) {
2651 if (UndefOrSalvage.count(Ptr: DII))
2652 continue;
2653
2654 DbgValReplacement DVRepl = RewriteExpr(*DII);
2655 if (!DVRepl)
2656 continue;
2657
2658 DII->replaceVariableLocationOp(OldValue: &From, NewValue: &To);
2659 DII->setExpression(*DVRepl);
2660 LLVM_DEBUG(dbgs() << "REWRITE: " << *DII << '\n');
2661 Changed = true;
2662 }
2663 for (auto *DVR : DPUsers) {
2664 if (UndefOrSalvageDVR.count(Ptr: DVR))
2665 continue;
2666
2667 DbgValReplacement DVRepl = RewriteDVRExpr(*DVR);
2668 if (!DVRepl)
2669 continue;
2670
2671 DVR->replaceVariableLocationOp(OldValue: &From, NewValue: &To);
2672 DVR->setExpression(*DVRepl);
2673 LLVM_DEBUG(dbgs() << "REWRITE: " << DVR << '\n');
2674 Changed = true;
2675 }
2676
2677 if (!UndefOrSalvage.empty() || !UndefOrSalvageDVR.empty()) {
2678 // Try to salvage the remaining debug users.
2679 salvageDebugInfo(I&: From);
2680 Changed = true;
2681 }
2682
2683 return Changed;
2684}
2685
2686/// Check if a bitcast between a value of type \p FromTy to type \p ToTy would
2687/// losslessly preserve the bits and semantics of the value. This predicate is
2688/// symmetric, i.e swapping \p FromTy and \p ToTy should give the same result.
2689///
2690/// Note that Type::canLosslesslyBitCastTo is not suitable here because it
2691/// allows semantically unequivalent bitcasts, such as <2 x i64> -> <4 x i32>,
2692/// and also does not allow lossless pointer <-> integer conversions.
2693static bool isBitCastSemanticsPreserving(const DataLayout &DL, Type *FromTy,
2694 Type *ToTy) {
2695 // Trivially compatible types.
2696 if (FromTy == ToTy)
2697 return true;
2698
2699 // Handle compatible pointer <-> integer conversions.
2700 if (FromTy->isIntOrPtrTy() && ToTy->isIntOrPtrTy()) {
2701 bool SameSize = DL.getTypeSizeInBits(Ty: FromTy) == DL.getTypeSizeInBits(Ty: ToTy);
2702 bool LosslessConversion = !DL.isNonIntegralPointerType(Ty: FromTy) &&
2703 !DL.isNonIntegralPointerType(Ty: ToTy);
2704 return SameSize && LosslessConversion;
2705 }
2706
2707 // TODO: This is not exhaustive.
2708 return false;
2709}
2710
2711bool llvm::replaceAllDbgUsesWith(Instruction &From, Value &To,
2712 Instruction &DomPoint, DominatorTree &DT) {
2713 // Exit early if From has no debug users.
2714 if (!From.isUsedByMetadata())
2715 return false;
2716
2717 assert(&From != &To && "Can't replace something with itself");
2718
2719 Type *FromTy = From.getType();
2720 Type *ToTy = To.getType();
2721
2722 auto Identity = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement {
2723 return DII.getExpression();
2724 };
2725 auto IdentityDVR = [&](DbgVariableRecord &DVR) -> DbgValReplacement {
2726 return DVR.getExpression();
2727 };
2728
2729 // Handle no-op conversions.
2730 Module &M = *From.getModule();
2731 const DataLayout &DL = M.getDataLayout();
2732 if (isBitCastSemanticsPreserving(DL, FromTy, ToTy))
2733 return rewriteDebugUsers(From, To, DomPoint, DT, RewriteExpr: Identity, RewriteDVRExpr: IdentityDVR);
2734
2735 // Handle integer-to-integer widening and narrowing.
2736 // FIXME: Use DW_OP_convert when it's available everywhere.
2737 if (FromTy->isIntegerTy() && ToTy->isIntegerTy()) {
2738 uint64_t FromBits = FromTy->getPrimitiveSizeInBits();
2739 uint64_t ToBits = ToTy->getPrimitiveSizeInBits();
2740 assert(FromBits != ToBits && "Unexpected no-op conversion");
2741
2742 // When the width of the result grows, assume that a debugger will only
2743 // access the low `FromBits` bits when inspecting the source variable.
2744 if (FromBits < ToBits)
2745 return rewriteDebugUsers(From, To, DomPoint, DT, RewriteExpr: Identity, RewriteDVRExpr: IdentityDVR);
2746
2747 // The width of the result has shrunk. Use sign/zero extension to describe
2748 // the source variable's high bits.
2749 auto SignOrZeroExt = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement {
2750 DILocalVariable *Var = DII.getVariable();
2751
2752 // Without knowing signedness, sign/zero extension isn't possible.
2753 auto Signedness = Var->getSignedness();
2754 if (!Signedness)
2755 return std::nullopt;
2756
2757 bool Signed = *Signedness == DIBasicType::Signedness::Signed;
2758 return DIExpression::appendExt(Expr: DII.getExpression(), FromSize: ToBits, ToSize: FromBits,
2759 Signed);
2760 };
2761 // RemoveDIs: duplicate implementation working on DbgVariableRecords rather
2762 // than on dbg.value intrinsics.
2763 auto SignOrZeroExtDVR = [&](DbgVariableRecord &DVR) -> DbgValReplacement {
2764 DILocalVariable *Var = DVR.getVariable();
2765
2766 // Without knowing signedness, sign/zero extension isn't possible.
2767 auto Signedness = Var->getSignedness();
2768 if (!Signedness)
2769 return std::nullopt;
2770
2771 bool Signed = *Signedness == DIBasicType::Signedness::Signed;
2772 return DIExpression::appendExt(Expr: DVR.getExpression(), FromSize: ToBits, ToSize: FromBits,
2773 Signed);
2774 };
2775 return rewriteDebugUsers(From, To, DomPoint, DT, RewriteExpr: SignOrZeroExt,
2776 RewriteDVRExpr: SignOrZeroExtDVR);
2777 }
2778
2779 // TODO: Floating-point conversions, vectors.
2780 return false;
2781}
2782
2783bool llvm::handleUnreachableTerminator(
2784 Instruction *I, SmallVectorImpl<Value *> &PoisonedValues) {
2785 bool Changed = false;
2786 // RemoveDIs: erase debug-info on this instruction manually.
2787 I->dropDbgRecords();
2788 for (Use &U : I->operands()) {
2789 Value *Op = U.get();
2790 if (isa<Instruction>(Val: Op) && !Op->getType()->isTokenTy()) {
2791 U.set(PoisonValue::get(T: Op->getType()));
2792 PoisonedValues.push_back(Elt: Op);
2793 Changed = true;
2794 }
2795 }
2796
2797 return Changed;
2798}
2799
2800std::pair<unsigned, unsigned>
2801llvm::removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB) {
2802 unsigned NumDeadInst = 0;
2803 unsigned NumDeadDbgInst = 0;
2804 // Delete the instructions backwards, as it has a reduced likelihood of
2805 // having to update as many def-use and use-def chains.
2806 Instruction *EndInst = BB->getTerminator(); // Last not to be deleted.
2807 SmallVector<Value *> Uses;
2808 handleUnreachableTerminator(I: EndInst, PoisonedValues&: Uses);
2809
2810 while (EndInst != &BB->front()) {
2811 // Delete the next to last instruction.
2812 Instruction *Inst = &*--EndInst->getIterator();
2813 if (!Inst->use_empty() && !Inst->getType()->isTokenTy())
2814 Inst->replaceAllUsesWith(V: PoisonValue::get(T: Inst->getType()));
2815 if (Inst->isEHPad() || Inst->getType()->isTokenTy()) {
2816 // EHPads can't have DbgVariableRecords attached to them, but it might be
2817 // possible for things with token type.
2818 Inst->dropDbgRecords();
2819 EndInst = Inst;
2820 continue;
2821 }
2822 if (isa<DbgInfoIntrinsic>(Val: Inst))
2823 ++NumDeadDbgInst;
2824 else
2825 ++NumDeadInst;
2826 // RemoveDIs: erasing debug-info must be done manually.
2827 Inst->dropDbgRecords();
2828 Inst->eraseFromParent();
2829 }
2830 return {NumDeadInst, NumDeadDbgInst};
2831}
2832
2833unsigned llvm::changeToUnreachable(Instruction *I, bool PreserveLCSSA,
2834 DomTreeUpdater *DTU,
2835 MemorySSAUpdater *MSSAU) {
2836 BasicBlock *BB = I->getParent();
2837
2838 if (MSSAU)
2839 MSSAU->changeToUnreachable(I);
2840
2841 SmallSet<BasicBlock *, 8> UniqueSuccessors;
2842
2843 // Loop over all of the successors, removing BB's entry from any PHI
2844 // nodes.
2845 for (BasicBlock *Successor : successors(BB)) {
2846 Successor->removePredecessor(Pred: BB, KeepOneInputPHIs: PreserveLCSSA);
2847 if (DTU)
2848 UniqueSuccessors.insert(Ptr: Successor);
2849 }
2850 auto *UI = new UnreachableInst(I->getContext(), I->getIterator());
2851 UI->setDebugLoc(I->getDebugLoc());
2852
2853 // All instructions after this are dead.
2854 unsigned NumInstrsRemoved = 0;
2855 BasicBlock::iterator BBI = I->getIterator(), BBE = BB->end();
2856 while (BBI != BBE) {
2857 if (!BBI->use_empty())
2858 BBI->replaceAllUsesWith(V: PoisonValue::get(T: BBI->getType()));
2859 BBI++->eraseFromParent();
2860 ++NumInstrsRemoved;
2861 }
2862 if (DTU) {
2863 SmallVector<DominatorTree::UpdateType, 8> Updates;
2864 Updates.reserve(N: UniqueSuccessors.size());
2865 for (BasicBlock *UniqueSuccessor : UniqueSuccessors)
2866 Updates.push_back(Elt: {DominatorTree::Delete, BB, UniqueSuccessor});
2867 DTU->applyUpdates(Updates);
2868 }
2869 BB->flushTerminatorDbgRecords();
2870 return NumInstrsRemoved;
2871}
2872
2873CallInst *llvm::createCallMatchingInvoke(InvokeInst *II) {
2874 SmallVector<Value *, 8> Args(II->args());
2875 SmallVector<OperandBundleDef, 1> OpBundles;
2876 II->getOperandBundlesAsDefs(Defs&: OpBundles);
2877 CallInst *NewCall = CallInst::Create(Ty: II->getFunctionType(),
2878 Func: II->getCalledOperand(), Args, Bundles: OpBundles);
2879 NewCall->setCallingConv(II->getCallingConv());
2880 NewCall->setAttributes(II->getAttributes());
2881 NewCall->setDebugLoc(II->getDebugLoc());
2882 NewCall->copyMetadata(SrcInst: *II);
2883
2884 // If the invoke had profile metadata, try converting them for CallInst.
2885 uint64_t TotalWeight;
2886 if (NewCall->extractProfTotalWeight(TotalVal&: TotalWeight)) {
2887 // Set the total weight if it fits into i32, otherwise reset.
2888 MDBuilder MDB(NewCall->getContext());
2889 auto NewWeights = uint32_t(TotalWeight) != TotalWeight
2890 ? nullptr
2891 : MDB.createBranchWeights(Weights: {uint32_t(TotalWeight)});
2892 NewCall->setMetadata(KindID: LLVMContext::MD_prof, Node: NewWeights);
2893 }
2894
2895 return NewCall;
2896}
2897
2898// changeToCall - Convert the specified invoke into a normal call.
2899CallInst *llvm::changeToCall(InvokeInst *II, DomTreeUpdater *DTU) {
2900 CallInst *NewCall = createCallMatchingInvoke(II);
2901 NewCall->takeName(V: II);
2902 NewCall->insertBefore(InsertPos: II);
2903 II->replaceAllUsesWith(V: NewCall);
2904
2905 // Follow the call by a branch to the normal destination.
2906 BasicBlock *NormalDestBB = II->getNormalDest();
2907 BranchInst::Create(IfTrue: NormalDestBB, InsertBefore: II->getIterator());
2908
2909 // Update PHI nodes in the unwind destination
2910 BasicBlock *BB = II->getParent();
2911 BasicBlock *UnwindDestBB = II->getUnwindDest();
2912 UnwindDestBB->removePredecessor(Pred: BB);
2913 II->eraseFromParent();
2914 if (DTU)
2915 DTU->applyUpdates(Updates: {{DominatorTree::Delete, BB, UnwindDestBB}});
2916 return NewCall;
2917}
2918
2919BasicBlock *llvm::changeToInvokeAndSplitBasicBlock(CallInst *CI,
2920 BasicBlock *UnwindEdge,
2921 DomTreeUpdater *DTU) {
2922 BasicBlock *BB = CI->getParent();
2923
2924 // Convert this function call into an invoke instruction. First, split the
2925 // basic block.
2926 BasicBlock *Split = SplitBlock(Old: BB, SplitPt: CI, DTU, /*LI=*/nullptr, /*MSSAU*/ nullptr,
2927 BBName: CI->getName() + ".noexc");
2928
2929 // Delete the unconditional branch inserted by SplitBlock
2930 BB->back().eraseFromParent();
2931
2932 // Create the new invoke instruction.
2933 SmallVector<Value *, 8> InvokeArgs(CI->args());
2934 SmallVector<OperandBundleDef, 1> OpBundles;
2935
2936 CI->getOperandBundlesAsDefs(Defs&: OpBundles);
2937
2938 // Note: we're round tripping operand bundles through memory here, and that
2939 // can potentially be avoided with a cleverer API design that we do not have
2940 // as of this time.
2941
2942 InvokeInst *II =
2943 InvokeInst::Create(Ty: CI->getFunctionType(), Func: CI->getCalledOperand(), IfNormal: Split,
2944 IfException: UnwindEdge, Args: InvokeArgs, Bundles: OpBundles, NameStr: CI->getName(), InsertAtEnd: BB);
2945 II->setDebugLoc(CI->getDebugLoc());
2946 II->setCallingConv(CI->getCallingConv());
2947 II->setAttributes(CI->getAttributes());
2948 II->setMetadata(KindID: LLVMContext::MD_prof, Node: CI->getMetadata(KindID: LLVMContext::MD_prof));
2949
2950 if (DTU)
2951 DTU->applyUpdates(Updates: {{DominatorTree::Insert, BB, UnwindEdge}});
2952
2953 // Make sure that anything using the call now uses the invoke! This also
2954 // updates the CallGraph if present, because it uses a WeakTrackingVH.
2955 CI->replaceAllUsesWith(V: II);
2956
2957 // Delete the original call
2958 Split->front().eraseFromParent();
2959 return Split;
2960}
2961
2962static bool markAliveBlocks(Function &F,
2963 SmallPtrSetImpl<BasicBlock *> &Reachable,
2964 DomTreeUpdater *DTU = nullptr) {
2965 SmallVector<BasicBlock*, 128> Worklist;
2966 BasicBlock *BB = &F.front();
2967 Worklist.push_back(Elt: BB);
2968 Reachable.insert(Ptr: BB);
2969 bool Changed = false;
2970 do {
2971 BB = Worklist.pop_back_val();
2972
2973 // Do a quick scan of the basic block, turning any obviously unreachable
2974 // instructions into LLVM unreachable insts. The instruction combining pass
2975 // canonicalizes unreachable insts into stores to null or undef.
2976 for (Instruction &I : *BB) {
2977 if (auto *CI = dyn_cast<CallInst>(Val: &I)) {
2978 Value *Callee = CI->getCalledOperand();
2979 // Handle intrinsic calls.
2980 if (Function *F = dyn_cast<Function>(Val: Callee)) {
2981 auto IntrinsicID = F->getIntrinsicID();
2982 // Assumptions that are known to be false are equivalent to
2983 // unreachable. Also, if the condition is undefined, then we make the
2984 // choice most beneficial to the optimizer, and choose that to also be
2985 // unreachable.
2986 if (IntrinsicID == Intrinsic::assume) {
2987 if (match(V: CI->getArgOperand(i: 0), P: m_CombineOr(L: m_Zero(), R: m_Undef()))) {
2988 // Don't insert a call to llvm.trap right before the unreachable.
2989 changeToUnreachable(I: CI, PreserveLCSSA: false, DTU);
2990 Changed = true;
2991 break;
2992 }
2993 } else if (IntrinsicID == Intrinsic::experimental_guard) {
2994 // A call to the guard intrinsic bails out of the current
2995 // compilation unit if the predicate passed to it is false. If the
2996 // predicate is a constant false, then we know the guard will bail
2997 // out of the current compile unconditionally, so all code following
2998 // it is dead.
2999 //
3000 // Note: unlike in llvm.assume, it is not "obviously profitable" for
3001 // guards to treat `undef` as `false` since a guard on `undef` can
3002 // still be useful for widening.
3003 if (match(V: CI->getArgOperand(i: 0), P: m_Zero()))
3004 if (!isa<UnreachableInst>(Val: CI->getNextNode())) {
3005 changeToUnreachable(I: CI->getNextNode(), PreserveLCSSA: false, DTU);
3006 Changed = true;
3007 break;
3008 }
3009 }
3010 } else if ((isa<ConstantPointerNull>(Val: Callee) &&
3011 !NullPointerIsDefined(F: CI->getFunction(),
3012 AS: cast<PointerType>(Val: Callee->getType())
3013 ->getAddressSpace())) ||
3014 isa<UndefValue>(Val: Callee)) {
3015 changeToUnreachable(I: CI, PreserveLCSSA: false, DTU);
3016 Changed = true;
3017 break;
3018 }
3019 if (CI->doesNotReturn() && !CI->isMustTailCall()) {
3020 // If we found a call to a no-return function, insert an unreachable
3021 // instruction after it. Make sure there isn't *already* one there
3022 // though.
3023 if (!isa<UnreachableInst>(Val: CI->getNextNonDebugInstruction())) {
3024 // Don't insert a call to llvm.trap right before the unreachable.
3025 changeToUnreachable(I: CI->getNextNonDebugInstruction(), PreserveLCSSA: false, DTU);
3026 Changed = true;
3027 }
3028 break;
3029 }
3030 } else if (auto *SI = dyn_cast<StoreInst>(Val: &I)) {
3031 // Store to undef and store to null are undefined and used to signal
3032 // that they should be changed to unreachable by passes that can't
3033 // modify the CFG.
3034
3035 // Don't touch volatile stores.
3036 if (SI->isVolatile()) continue;
3037
3038 Value *Ptr = SI->getOperand(i_nocapture: 1);
3039
3040 if (isa<UndefValue>(Val: Ptr) ||
3041 (isa<ConstantPointerNull>(Val: Ptr) &&
3042 !NullPointerIsDefined(F: SI->getFunction(),
3043 AS: SI->getPointerAddressSpace()))) {
3044 changeToUnreachable(I: SI, PreserveLCSSA: false, DTU);
3045 Changed = true;
3046 break;
3047 }
3048 }
3049 }
3050
3051 Instruction *Terminator = BB->getTerminator();
3052 if (auto *II = dyn_cast<InvokeInst>(Val: Terminator)) {
3053 // Turn invokes that call 'nounwind' functions into ordinary calls.
3054 Value *Callee = II->getCalledOperand();
3055 if ((isa<ConstantPointerNull>(Val: Callee) &&
3056 !NullPointerIsDefined(F: BB->getParent())) ||
3057 isa<UndefValue>(Val: Callee)) {
3058 changeToUnreachable(I: II, PreserveLCSSA: false, DTU);
3059 Changed = true;
3060 } else {
3061 if (II->doesNotReturn() &&
3062 !isa<UnreachableInst>(Val: II->getNormalDest()->front())) {
3063 // If we found an invoke of a no-return function,
3064 // create a new empty basic block with an `unreachable` terminator,
3065 // and set it as the normal destination for the invoke,
3066 // unless that is already the case.
3067 // Note that the original normal destination could have other uses.
3068 BasicBlock *OrigNormalDest = II->getNormalDest();
3069 OrigNormalDest->removePredecessor(Pred: II->getParent());
3070 LLVMContext &Ctx = II->getContext();
3071 BasicBlock *UnreachableNormalDest = BasicBlock::Create(
3072 Context&: Ctx, Name: OrigNormalDest->getName() + ".unreachable",
3073 Parent: II->getFunction(), InsertBefore: OrigNormalDest);
3074 new UnreachableInst(Ctx, UnreachableNormalDest);
3075 II->setNormalDest(UnreachableNormalDest);
3076 if (DTU)
3077 DTU->applyUpdates(
3078 Updates: {{DominatorTree::Delete, BB, OrigNormalDest},
3079 {DominatorTree::Insert, BB, UnreachableNormalDest}});
3080 Changed = true;
3081 }
3082 if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(F: &F)) {
3083 if (II->use_empty() && !II->mayHaveSideEffects()) {
3084 // jump to the normal destination branch.
3085 BasicBlock *NormalDestBB = II->getNormalDest();
3086 BasicBlock *UnwindDestBB = II->getUnwindDest();
3087 BranchInst::Create(IfTrue: NormalDestBB, InsertBefore: II->getIterator());
3088 UnwindDestBB->removePredecessor(Pred: II->getParent());
3089 II->eraseFromParent();
3090 if (DTU)
3091 DTU->applyUpdates(Updates: {{DominatorTree::Delete, BB, UnwindDestBB}});
3092 } else
3093 changeToCall(II, DTU);
3094 Changed = true;
3095 }
3096 }
3097 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Val: Terminator)) {
3098 // Remove catchpads which cannot be reached.
3099 struct CatchPadDenseMapInfo {
3100 static CatchPadInst *getEmptyKey() {
3101 return DenseMapInfo<CatchPadInst *>::getEmptyKey();
3102 }
3103
3104 static CatchPadInst *getTombstoneKey() {
3105 return DenseMapInfo<CatchPadInst *>::getTombstoneKey();
3106 }
3107
3108 static unsigned getHashValue(CatchPadInst *CatchPad) {
3109 return static_cast<unsigned>(hash_combine_range(
3110 first: CatchPad->value_op_begin(), last: CatchPad->value_op_end()));
3111 }
3112
3113 static bool isEqual(CatchPadInst *LHS, CatchPadInst *RHS) {
3114 if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
3115 RHS == getEmptyKey() || RHS == getTombstoneKey())
3116 return LHS == RHS;
3117 return LHS->isIdenticalTo(I: RHS);
3118 }
3119 };
3120
3121 SmallDenseMap<BasicBlock *, int, 8> NumPerSuccessorCases;
3122 // Set of unique CatchPads.
3123 SmallDenseMap<CatchPadInst *, detail::DenseSetEmpty, 4,
3124 CatchPadDenseMapInfo, detail::DenseSetPair<CatchPadInst *>>
3125 HandlerSet;
3126 detail::DenseSetEmpty Empty;
3127 for (CatchSwitchInst::handler_iterator I = CatchSwitch->handler_begin(),
3128 E = CatchSwitch->handler_end();
3129 I != E; ++I) {
3130 BasicBlock *HandlerBB = *I;
3131 if (DTU)
3132 ++NumPerSuccessorCases[HandlerBB];
3133 auto *CatchPad = cast<CatchPadInst>(Val: HandlerBB->getFirstNonPHI());
3134 if (!HandlerSet.insert(KV: {CatchPad, Empty}).second) {
3135 if (DTU)
3136 --NumPerSuccessorCases[HandlerBB];
3137 CatchSwitch->removeHandler(HI: I);
3138 --I;
3139 --E;
3140 Changed = true;
3141 }
3142 }
3143 if (DTU) {
3144 std::vector<DominatorTree::UpdateType> Updates;
3145 for (const std::pair<BasicBlock *, int> &I : NumPerSuccessorCases)
3146 if (I.second == 0)
3147 Updates.push_back(x: {DominatorTree::Delete, BB, I.first});
3148 DTU->applyUpdates(Updates);
3149 }
3150 }
3151
3152 Changed |= ConstantFoldTerminator(BB, DeleteDeadConditions: true, TLI: nullptr, DTU);
3153 for (BasicBlock *Successor : successors(BB))
3154 if (Reachable.insert(Ptr: Successor).second)
3155 Worklist.push_back(Elt: Successor);
3156 } while (!Worklist.empty());
3157 return Changed;
3158}
3159
3160Instruction *llvm::removeUnwindEdge(BasicBlock *BB, DomTreeUpdater *DTU) {
3161 Instruction *TI = BB->getTerminator();
3162
3163 if (auto *II = dyn_cast<InvokeInst>(Val: TI))
3164 return changeToCall(II, DTU);
3165
3166 Instruction *NewTI;
3167 BasicBlock *UnwindDest;
3168
3169 if (auto *CRI = dyn_cast<CleanupReturnInst>(Val: TI)) {
3170 NewTI = CleanupReturnInst::Create(CleanupPad: CRI->getCleanupPad(), UnwindBB: nullptr, InsertBefore: CRI->getIterator());
3171 UnwindDest = CRI->getUnwindDest();
3172 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Val: TI)) {
3173 auto *NewCatchSwitch = CatchSwitchInst::Create(
3174 ParentPad: CatchSwitch->getParentPad(), UnwindDest: nullptr, NumHandlers: CatchSwitch->getNumHandlers(),
3175 NameStr: CatchSwitch->getName(), InsertBefore: CatchSwitch->getIterator());
3176 for (BasicBlock *PadBB : CatchSwitch->handlers())
3177 NewCatchSwitch->addHandler(Dest: PadBB);
3178
3179 NewTI = NewCatchSwitch;
3180 UnwindDest = CatchSwitch->getUnwindDest();
3181 } else {
3182 llvm_unreachable("Could not find unwind successor");
3183 }
3184
3185 NewTI->takeName(V: TI);
3186 NewTI->setDebugLoc(TI->getDebugLoc());
3187 UnwindDest->removePredecessor(Pred: BB);
3188 TI->replaceAllUsesWith(V: NewTI);
3189 TI->eraseFromParent();
3190 if (DTU)
3191 DTU->applyUpdates(Updates: {{DominatorTree::Delete, BB, UnwindDest}});
3192 return NewTI;
3193}
3194
3195/// removeUnreachableBlocks - Remove blocks that are not reachable, even
3196/// if they are in a dead cycle. Return true if a change was made, false
3197/// otherwise.
3198bool llvm::removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU,
3199 MemorySSAUpdater *MSSAU) {
3200 SmallPtrSet<BasicBlock *, 16> Reachable;
3201 bool Changed = markAliveBlocks(F, Reachable, DTU);
3202
3203 // If there are unreachable blocks in the CFG...
3204 if (Reachable.size() == F.size())
3205 return Changed;
3206
3207 assert(Reachable.size() < F.size());
3208
3209 // Are there any blocks left to actually delete?
3210 SmallSetVector<BasicBlock *, 8> BlocksToRemove;
3211 for (BasicBlock &BB : F) {
3212 // Skip reachable basic blocks
3213 if (Reachable.count(Ptr: &BB))
3214 continue;
3215 // Skip already-deleted blocks
3216 if (DTU && DTU->isBBPendingDeletion(DelBB: &BB))
3217 continue;
3218 BlocksToRemove.insert(X: &BB);
3219 }
3220
3221 if (BlocksToRemove.empty())
3222 return Changed;
3223
3224 Changed = true;
3225 NumRemoved += BlocksToRemove.size();
3226
3227 if (MSSAU)
3228 MSSAU->removeBlocks(DeadBlocks: BlocksToRemove);
3229
3230 DeleteDeadBlocks(BBs: BlocksToRemove.takeVector(), DTU);
3231
3232 return Changed;
3233}
3234
3235void llvm::combineMetadata(Instruction *K, const Instruction *J,
3236 ArrayRef<unsigned> KnownIDs, bool DoesKMove) {
3237 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
3238 K->dropUnknownNonDebugMetadata(KnownIDs);
3239 K->getAllMetadataOtherThanDebugLoc(MDs&: Metadata);
3240 for (const auto &MD : Metadata) {
3241 unsigned Kind = MD.first;
3242 MDNode *JMD = J->getMetadata(KindID: Kind);
3243 MDNode *KMD = MD.second;
3244
3245 switch (Kind) {
3246 default:
3247 K->setMetadata(KindID: Kind, Node: nullptr); // Remove unknown metadata
3248 break;
3249 case LLVMContext::MD_dbg:
3250 llvm_unreachable("getAllMetadataOtherThanDebugLoc returned a MD_dbg");
3251 case LLVMContext::MD_DIAssignID:
3252 K->mergeDIAssignID(SourceInstructions: J);
3253 break;
3254 case LLVMContext::MD_tbaa:
3255 K->setMetadata(KindID: Kind, Node: MDNode::getMostGenericTBAA(A: JMD, B: KMD));
3256 break;
3257 case LLVMContext::MD_alias_scope:
3258 K->setMetadata(KindID: Kind, Node: MDNode::getMostGenericAliasScope(A: JMD, B: KMD));
3259 break;
3260 case LLVMContext::MD_noalias:
3261 case LLVMContext::MD_mem_parallel_loop_access:
3262 K->setMetadata(KindID: Kind, Node: MDNode::intersect(A: JMD, B: KMD));
3263 break;
3264 case LLVMContext::MD_access_group:
3265 K->setMetadata(KindID: LLVMContext::MD_access_group,
3266 Node: intersectAccessGroups(Inst1: K, Inst2: J));
3267 break;
3268 case LLVMContext::MD_range:
3269 if (DoesKMove || !K->hasMetadata(KindID: LLVMContext::MD_noundef))
3270 K->setMetadata(KindID: Kind, Node: MDNode::getMostGenericRange(A: JMD, B: KMD));
3271 break;
3272 case LLVMContext::MD_fpmath:
3273 K->setMetadata(KindID: Kind, Node: MDNode::getMostGenericFPMath(A: JMD, B: KMD));
3274 break;
3275 case LLVMContext::MD_invariant_load:
3276 // If K moves, only set the !invariant.load if it is present in both
3277 // instructions.
3278 if (DoesKMove)
3279 K->setMetadata(KindID: Kind, Node: JMD);
3280 break;
3281 case LLVMContext::MD_nonnull:
3282 if (DoesKMove || !K->hasMetadata(KindID: LLVMContext::MD_noundef))
3283 K->setMetadata(KindID: Kind, Node: JMD);
3284 break;
3285 case LLVMContext::MD_invariant_group:
3286 // Preserve !invariant.group in K.
3287 break;
3288 case LLVMContext::MD_mmra:
3289 // Combine MMRAs
3290 break;
3291 case LLVMContext::MD_align:
3292 if (DoesKMove || !K->hasMetadata(KindID: LLVMContext::MD_noundef))
3293 K->setMetadata(
3294 KindID: Kind, Node: MDNode::getMostGenericAlignmentOrDereferenceable(A: JMD, B: KMD));
3295 break;
3296 case LLVMContext::MD_dereferenceable:
3297 case LLVMContext::MD_dereferenceable_or_null:
3298 if (DoesKMove)
3299 K->setMetadata(KindID: Kind,
3300 Node: MDNode::getMostGenericAlignmentOrDereferenceable(A: JMD, B: KMD));
3301 break;
3302 case LLVMContext::MD_preserve_access_index:
3303 // Preserve !preserve.access.index in K.
3304 break;
3305 case LLVMContext::MD_noundef:
3306 // If K does move, keep noundef if it is present in both instructions.
3307 if (DoesKMove)
3308 K->setMetadata(KindID: Kind, Node: JMD);
3309 break;
3310 case LLVMContext::MD_nontemporal:
3311 // Preserve !nontemporal if it is present on both instructions.
3312 K->setMetadata(KindID: Kind, Node: JMD);
3313 break;
3314 case LLVMContext::MD_prof:
3315 if (DoesKMove)
3316 K->setMetadata(KindID: Kind, Node: MDNode::getMergedProfMetadata(A: KMD, B: JMD, AInstr: K, BInstr: J));
3317 break;
3318 }
3319 }
3320 // Set !invariant.group from J if J has it. If both instructions have it
3321 // then we will just pick it from J - even when they are different.
3322 // Also make sure that K is load or store - f.e. combining bitcast with load
3323 // could produce bitcast with invariant.group metadata, which is invalid.
3324 // FIXME: we should try to preserve both invariant.group md if they are
3325 // different, but right now instruction can only have one invariant.group.
3326 if (auto *JMD = J->getMetadata(KindID: LLVMContext::MD_invariant_group))
3327 if (isa<LoadInst>(Val: K) || isa<StoreInst>(Val: K))
3328 K->setMetadata(KindID: LLVMContext::MD_invariant_group, Node: JMD);
3329
3330 // Merge MMRAs.
3331 // This is handled separately because we also want to handle cases where K
3332 // doesn't have tags but J does.
3333 auto JMMRA = J->getMetadata(KindID: LLVMContext::MD_mmra);
3334 auto KMMRA = K->getMetadata(KindID: LLVMContext::MD_mmra);
3335 if (JMMRA || KMMRA) {
3336 K->setMetadata(KindID: LLVMContext::MD_mmra,
3337 Node: MMRAMetadata::combine(Ctx&: K->getContext(), A: JMMRA, B: KMMRA));
3338 }
3339}
3340
3341void llvm::combineMetadataForCSE(Instruction *K, const Instruction *J,
3342 bool KDominatesJ) {
3343 unsigned KnownIDs[] = {LLVMContext::MD_tbaa,
3344 LLVMContext::MD_alias_scope,
3345 LLVMContext::MD_noalias,
3346 LLVMContext::MD_range,
3347 LLVMContext::MD_fpmath,
3348 LLVMContext::MD_invariant_load,
3349 LLVMContext::MD_nonnull,
3350 LLVMContext::MD_invariant_group,
3351 LLVMContext::MD_align,
3352 LLVMContext::MD_dereferenceable,
3353 LLVMContext::MD_dereferenceable_or_null,
3354 LLVMContext::MD_access_group,
3355 LLVMContext::MD_preserve_access_index,
3356 LLVMContext::MD_prof,
3357 LLVMContext::MD_nontemporal,
3358 LLVMContext::MD_noundef,
3359 LLVMContext::MD_mmra};
3360 combineMetadata(K, J, KnownIDs, DoesKMove: KDominatesJ);
3361}
3362
3363void llvm::copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source) {
3364 SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
3365 Source.getAllMetadata(MDs&: MD);
3366 MDBuilder MDB(Dest.getContext());
3367 Type *NewType = Dest.getType();
3368 const DataLayout &DL = Source.getModule()->getDataLayout();
3369 for (const auto &MDPair : MD) {
3370 unsigned ID = MDPair.first;
3371 MDNode *N = MDPair.second;
3372 // Note, essentially every kind of metadata should be preserved here! This
3373 // routine is supposed to clone a load instruction changing *only its type*.
3374 // The only metadata it makes sense to drop is metadata which is invalidated
3375 // when the pointer type changes. This should essentially never be the case
3376 // in LLVM, but we explicitly switch over only known metadata to be
3377 // conservatively correct. If you are adding metadata to LLVM which pertains
3378 // to loads, you almost certainly want to add it here.
3379 switch (ID) {
3380 case LLVMContext::MD_dbg:
3381 case LLVMContext::MD_tbaa:
3382 case LLVMContext::MD_prof:
3383 case LLVMContext::MD_fpmath:
3384 case LLVMContext::MD_tbaa_struct:
3385 case LLVMContext::MD_invariant_load:
3386 case LLVMContext::MD_alias_scope:
3387 case LLVMContext::MD_noalias:
3388 case LLVMContext::MD_nontemporal:
3389 case LLVMContext::MD_mem_parallel_loop_access:
3390 case LLVMContext::MD_access_group:
3391 case LLVMContext::MD_noundef:
3392 // All of these directly apply.
3393 Dest.setMetadata(KindID: ID, Node: N);
3394 break;
3395
3396 case LLVMContext::MD_nonnull:
3397 copyNonnullMetadata(OldLI: Source, N, NewLI&: Dest);
3398 break;
3399
3400 case LLVMContext::MD_align:
3401 case LLVMContext::MD_dereferenceable:
3402 case LLVMContext::MD_dereferenceable_or_null:
3403 // These only directly apply if the new type is also a pointer.
3404 if (NewType->isPointerTy())
3405 Dest.setMetadata(KindID: ID, Node: N);
3406 break;
3407
3408 case LLVMContext::MD_range:
3409 copyRangeMetadata(DL, OldLI: Source, N, NewLI&: Dest);
3410 break;
3411 }
3412 }
3413}
3414
3415void llvm::patchReplacementInstruction(Instruction *I, Value *Repl) {
3416 auto *ReplInst = dyn_cast<Instruction>(Val: Repl);
3417 if (!ReplInst)
3418 return;
3419
3420 // Patch the replacement so that it is not more restrictive than the value
3421 // being replaced.
3422 WithOverflowInst *UnusedWO;
3423 // When replacing the result of a llvm.*.with.overflow intrinsic with a
3424 // overflowing binary operator, nuw/nsw flags may no longer hold.
3425 if (isa<OverflowingBinaryOperator>(Val: ReplInst) &&
3426 match(V: I, P: m_ExtractValue<0>(V: m_WithOverflowInst(I&: UnusedWO))))
3427 ReplInst->dropPoisonGeneratingFlags();
3428 // Note that if 'I' is a load being replaced by some operation,
3429 // for example, by an arithmetic operation, then andIRFlags()
3430 // would just erase all math flags from the original arithmetic
3431 // operation, which is clearly not wanted and not needed.
3432 else if (!isa<LoadInst>(Val: I))
3433 ReplInst->andIRFlags(V: I);
3434
3435 // FIXME: If both the original and replacement value are part of the
3436 // same control-flow region (meaning that the execution of one
3437 // guarantees the execution of the other), then we can combine the
3438 // noalias scopes here and do better than the general conservative
3439 // answer used in combineMetadata().
3440
3441 // In general, GVN unifies expressions over different control-flow
3442 // regions, and so we need a conservative combination of the noalias
3443 // scopes.
3444 combineMetadataForCSE(K: ReplInst, J: I, KDominatesJ: false);
3445}
3446
3447template <typename RootType, typename ShouldReplaceFn>
3448static unsigned replaceDominatedUsesWith(Value *From, Value *To,
3449 const RootType &Root,
3450 const ShouldReplaceFn &ShouldReplace) {
3451 assert(From->getType() == To->getType());
3452
3453 unsigned Count = 0;
3454 for (Use &U : llvm::make_early_inc_range(Range: From->uses())) {
3455 if (!ShouldReplace(Root, U))
3456 continue;
3457 LLVM_DEBUG(dbgs() << "Replace dominated use of '";
3458 From->printAsOperand(dbgs());
3459 dbgs() << "' with " << *To << " in " << *U.getUser() << "\n");
3460 U.set(To);
3461 ++Count;
3462 }
3463 return Count;
3464}
3465
3466unsigned llvm::replaceNonLocalUsesWith(Instruction *From, Value *To) {
3467 assert(From->getType() == To->getType());
3468 auto *BB = From->getParent();
3469 unsigned Count = 0;
3470
3471 for (Use &U : llvm::make_early_inc_range(Range: From->uses())) {
3472 auto *I = cast<Instruction>(Val: U.getUser());
3473 if (I->getParent() == BB)
3474 continue;
3475 U.set(To);
3476 ++Count;
3477 }
3478 return Count;
3479}
3480
3481unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
3482 DominatorTree &DT,
3483 const BasicBlockEdge &Root) {
3484 auto Dominates = [&DT](const BasicBlockEdge &Root, const Use &U) {
3485 return DT.dominates(BBE: Root, U);
3486 };
3487 return ::replaceDominatedUsesWith(From, To, Root, ShouldReplace: Dominates);
3488}
3489
3490unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
3491 DominatorTree &DT,
3492 const BasicBlock *BB) {
3493 auto Dominates = [&DT](const BasicBlock *BB, const Use &U) {
3494 return DT.dominates(BB, U);
3495 };
3496 return ::replaceDominatedUsesWith(From, To, Root: BB, ShouldReplace: Dominates);
3497}
3498
3499unsigned llvm::replaceDominatedUsesWithIf(
3500 Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Root,
3501 function_ref<bool(const Use &U, const Value *To)> ShouldReplace) {
3502 auto DominatesAndShouldReplace =
3503 [&DT, &ShouldReplace, To](const BasicBlockEdge &Root, const Use &U) {
3504 return DT.dominates(BBE: Root, U) && ShouldReplace(U, To);
3505 };
3506 return ::replaceDominatedUsesWith(From, To, Root, ShouldReplace: DominatesAndShouldReplace);
3507}
3508
3509unsigned llvm::replaceDominatedUsesWithIf(
3510 Value *From, Value *To, DominatorTree &DT, const BasicBlock *BB,
3511 function_ref<bool(const Use &U, const Value *To)> ShouldReplace) {
3512 auto DominatesAndShouldReplace = [&DT, &ShouldReplace,
3513 To](const BasicBlock *BB, const Use &U) {
3514 return DT.dominates(BB, U) && ShouldReplace(U, To);
3515 };
3516 return ::replaceDominatedUsesWith(From, To, Root: BB, ShouldReplace: DominatesAndShouldReplace);
3517}
3518
3519bool llvm::callsGCLeafFunction(const CallBase *Call,
3520 const TargetLibraryInfo &TLI) {
3521 // Check if the function is specifically marked as a gc leaf function.
3522 if (Call->hasFnAttr(Kind: "gc-leaf-function"))
3523 return true;
3524 if (const Function *F = Call->getCalledFunction()) {
3525 if (F->hasFnAttribute(Kind: "gc-leaf-function"))
3526 return true;
3527
3528 if (auto IID = F->getIntrinsicID()) {
3529 // Most LLVM intrinsics do not take safepoints.
3530 return IID != Intrinsic::experimental_gc_statepoint &&
3531 IID != Intrinsic::experimental_deoptimize &&
3532 IID != Intrinsic::memcpy_element_unordered_atomic &&
3533 IID != Intrinsic::memmove_element_unordered_atomic;
3534 }
3535 }
3536
3537 // Lib calls can be materialized by some passes, and won't be
3538 // marked as 'gc-leaf-function.' All available Libcalls are
3539 // GC-leaf.
3540 LibFunc LF;
3541 if (TLI.getLibFunc(CB: *Call, F&: LF)) {
3542 return TLI.has(F: LF);
3543 }
3544
3545 return false;
3546}
3547
3548void llvm::copyNonnullMetadata(const LoadInst &OldLI, MDNode *N,
3549 LoadInst &NewLI) {
3550 auto *NewTy = NewLI.getType();
3551
3552 // This only directly applies if the new type is also a pointer.
3553 if (NewTy->isPointerTy()) {
3554 NewLI.setMetadata(KindID: LLVMContext::MD_nonnull, Node: N);
3555 return;
3556 }
3557
3558 // The only other translation we can do is to integral loads with !range
3559 // metadata.
3560 if (!NewTy->isIntegerTy())
3561 return;
3562
3563 MDBuilder MDB(NewLI.getContext());
3564 const Value *Ptr = OldLI.getPointerOperand();
3565 auto *ITy = cast<IntegerType>(Val: NewTy);
3566 auto *NullInt = ConstantExpr::getPtrToInt(
3567 C: ConstantPointerNull::get(T: cast<PointerType>(Val: Ptr->getType())), Ty: ITy);
3568 auto *NonNullInt = ConstantExpr::getAdd(C1: NullInt, C2: ConstantInt::get(Ty: ITy, V: 1));
3569 NewLI.setMetadata(KindID: LLVMContext::MD_range,
3570 Node: MDB.createRange(Lo: NonNullInt, Hi: NullInt));
3571}
3572
3573void llvm::copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI,
3574 MDNode *N, LoadInst &NewLI) {
3575 auto *NewTy = NewLI.getType();
3576 // Simply copy the metadata if the type did not change.
3577 if (NewTy == OldLI.getType()) {
3578 NewLI.setMetadata(KindID: LLVMContext::MD_range, Node: N);
3579 return;
3580 }
3581
3582 // Give up unless it is converted to a pointer where there is a single very
3583 // valuable mapping we can do reliably.
3584 // FIXME: It would be nice to propagate this in more ways, but the type
3585 // conversions make it hard.
3586 if (!NewTy->isPointerTy())
3587 return;
3588
3589 unsigned BitWidth = DL.getPointerTypeSizeInBits(NewTy);
3590 if (BitWidth == OldLI.getType()->getScalarSizeInBits() &&
3591 !getConstantRangeFromMetadata(RangeMD: *N).contains(Val: APInt(BitWidth, 0))) {
3592 MDNode *NN = MDNode::get(Context&: OldLI.getContext(), MDs: std::nullopt);
3593 NewLI.setMetadata(KindID: LLVMContext::MD_nonnull, Node: NN);
3594 }
3595}
3596
3597void llvm::dropDebugUsers(Instruction &I) {
3598 SmallVector<DbgVariableIntrinsic *, 1> DbgUsers;
3599 SmallVector<DbgVariableRecord *, 1> DPUsers;
3600 findDbgUsers(DbgInsts&: DbgUsers, V: &I, DbgVariableRecords: &DPUsers);
3601 for (auto *DII : DbgUsers)
3602 DII->eraseFromParent();
3603 for (auto *DVR : DPUsers)
3604 DVR->eraseFromParent();
3605}
3606
3607void llvm::hoistAllInstructionsInto(BasicBlock *DomBlock, Instruction *InsertPt,
3608 BasicBlock *BB) {
3609 // Since we are moving the instructions out of its basic block, we do not
3610 // retain their original debug locations (DILocations) and debug intrinsic
3611 // instructions.
3612 //
3613 // Doing so would degrade the debugging experience and adversely affect the
3614 // accuracy of profiling information.
3615 //
3616 // Currently, when hoisting the instructions, we take the following actions:
3617 // - Remove their debug intrinsic instructions.
3618 // - Set their debug locations to the values from the insertion point.
3619 //
3620 // As per PR39141 (comment #8), the more fundamental reason why the dbg.values
3621 // need to be deleted, is because there will not be any instructions with a
3622 // DILocation in either branch left after performing the transformation. We
3623 // can only insert a dbg.value after the two branches are joined again.
3624 //
3625 // See PR38762, PR39243 for more details.
3626 //
3627 // TODO: Extend llvm.dbg.value to take more than one SSA Value (PR39141) to
3628 // encode predicated DIExpressions that yield different results on different
3629 // code paths.
3630
3631 for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) {
3632 Instruction *I = &*II;
3633 I->dropUBImplyingAttrsAndMetadata();
3634 if (I->isUsedByMetadata())
3635 dropDebugUsers(I&: *I);
3636 // RemoveDIs: drop debug-info too as the following code does.
3637 I->dropDbgRecords();
3638 if (I->isDebugOrPseudoInst()) {
3639 // Remove DbgInfo and pseudo probe Intrinsics.
3640 II = I->eraseFromParent();
3641 continue;
3642 }
3643 I->setDebugLoc(InsertPt->getDebugLoc());
3644 ++II;
3645 }
3646 DomBlock->splice(ToIt: InsertPt->getIterator(), FromBB: BB, FromBeginIt: BB->begin(),
3647 FromEndIt: BB->getTerminator()->getIterator());
3648}
3649
3650DIExpression *llvm::getExpressionForConstant(DIBuilder &DIB, const Constant &C,
3651 Type &Ty) {
3652 // Create integer constant expression.
3653 auto createIntegerExpression = [&DIB](const Constant &CV) -> DIExpression * {
3654 const APInt &API = cast<ConstantInt>(Val: &CV)->getValue();
3655 std::optional<int64_t> InitIntOpt = API.trySExtValue();
3656 return InitIntOpt ? DIB.createConstantValueExpression(
3657 Val: static_cast<uint64_t>(*InitIntOpt))
3658 : nullptr;
3659 };
3660
3661 if (isa<ConstantInt>(Val: C))
3662 return createIntegerExpression(C);
3663
3664 auto *FP = dyn_cast<ConstantFP>(Val: &C);
3665 if (FP && Ty.isFloatingPointTy() && Ty.getScalarSizeInBits() <= 64) {
3666 const APFloat &APF = FP->getValueAPF();
3667 APInt const &API = APF.bitcastToAPInt();
3668 if (auto Temp = API.getZExtValue())
3669 return DIB.createConstantValueExpression(Val: static_cast<uint64_t>(Temp));
3670 return DIB.createConstantValueExpression(Val: *API.getRawData());
3671 }
3672
3673 if (!Ty.isPointerTy())
3674 return nullptr;
3675
3676 if (isa<ConstantPointerNull>(Val: C))
3677 return DIB.createConstantValueExpression(Val: 0);
3678
3679 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(Val: &C))
3680 if (CE->getOpcode() == Instruction::IntToPtr) {
3681 const Value *V = CE->getOperand(i_nocapture: 0);
3682 if (auto CI = dyn_cast_or_null<ConstantInt>(Val: V))
3683 return createIntegerExpression(*CI);
3684 }
3685 return nullptr;
3686}
3687
3688namespace {
3689
3690/// A potential constituent of a bitreverse or bswap expression. See
3691/// collectBitParts for a fuller explanation.
3692struct BitPart {
3693 BitPart(Value *P, unsigned BW) : Provider(P) {
3694 Provenance.resize(N: BW);
3695 }
3696
3697 /// The Value that this is a bitreverse/bswap of.
3698 Value *Provider;
3699
3700 /// The "provenance" of each bit. Provenance[A] = B means that bit A
3701 /// in Provider becomes bit B in the result of this expression.
3702 SmallVector<int8_t, 32> Provenance; // int8_t means max size is i128.
3703
3704 enum { Unset = -1 };
3705};
3706
3707} // end anonymous namespace
3708
3709/// Analyze the specified subexpression and see if it is capable of providing
3710/// pieces of a bswap or bitreverse. The subexpression provides a potential
3711/// piece of a bswap or bitreverse if it can be proved that each non-zero bit in
3712/// the output of the expression came from a corresponding bit in some other
3713/// value. This function is recursive, and the end result is a mapping of
3714/// bitnumber to bitnumber. It is the caller's responsibility to validate that
3715/// the bitnumber to bitnumber mapping is correct for a bswap or bitreverse.
3716///
3717/// For example, if the current subexpression if "(shl i32 %X, 24)" then we know
3718/// that the expression deposits the low byte of %X into the high byte of the
3719/// result and that all other bits are zero. This expression is accepted and a
3720/// BitPart is returned with Provider set to %X and Provenance[24-31] set to
3721/// [0-7].
3722///
3723/// For vector types, all analysis is performed at the per-element level. No
3724/// cross-element analysis is supported (shuffle/insertion/reduction), and all
3725/// constant masks must be splatted across all elements.
3726///
3727/// To avoid revisiting values, the BitPart results are memoized into the
3728/// provided map. To avoid unnecessary copying of BitParts, BitParts are
3729/// constructed in-place in the \c BPS map. Because of this \c BPS needs to
3730/// store BitParts objects, not pointers. As we need the concept of a nullptr
3731/// BitParts (Value has been analyzed and the analysis failed), we an Optional
3732/// type instead to provide the same functionality.
3733///
3734/// Because we pass around references into \c BPS, we must use a container that
3735/// does not invalidate internal references (std::map instead of DenseMap).
3736static const std::optional<BitPart> &
3737collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals,
3738 std::map<Value *, std::optional<BitPart>> &BPS, int Depth,
3739 bool &FoundRoot) {
3740 auto I = BPS.find(x: V);
3741 if (I != BPS.end())
3742 return I->second;
3743
3744 auto &Result = BPS[V] = std::nullopt;
3745 auto BitWidth = V->getType()->getScalarSizeInBits();
3746
3747 // Can't do integer/elements > 128 bits.
3748 if (BitWidth > 128)
3749 return Result;
3750
3751 // Prevent stack overflow by limiting the recursion depth
3752 if (Depth == BitPartRecursionMaxDepth) {
3753 LLVM_DEBUG(dbgs() << "collectBitParts max recursion depth reached.\n");
3754 return Result;
3755 }
3756
3757 if (auto *I = dyn_cast<Instruction>(Val: V)) {
3758 Value *X, *Y;
3759 const APInt *C;
3760
3761 // If this is an or instruction, it may be an inner node of the bswap.
3762 if (match(V, P: m_Or(L: m_Value(V&: X), R: m_Value(V&: Y)))) {
3763 // Check we have both sources and they are from the same provider.
3764 const auto &A = collectBitParts(V: X, MatchBSwaps, MatchBitReversals, BPS,
3765 Depth: Depth + 1, FoundRoot);
3766 if (!A || !A->Provider)
3767 return Result;
3768
3769 const auto &B = collectBitParts(V: Y, MatchBSwaps, MatchBitReversals, BPS,
3770 Depth: Depth + 1, FoundRoot);
3771 if (!B || A->Provider != B->Provider)
3772 return Result;
3773
3774 // Try and merge the two together.
3775 Result = BitPart(A->Provider, BitWidth);
3776 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx) {
3777 if (A->Provenance[BitIdx] != BitPart::Unset &&
3778 B->Provenance[BitIdx] != BitPart::Unset &&
3779 A->Provenance[BitIdx] != B->Provenance[BitIdx])
3780 return Result = std::nullopt;
3781
3782 if (A->Provenance[BitIdx] == BitPart::Unset)
3783 Result->Provenance[BitIdx] = B->Provenance[BitIdx];
3784 else
3785 Result->Provenance[BitIdx] = A->Provenance[BitIdx];
3786 }
3787
3788 return Result;
3789 }
3790
3791 // If this is a logical shift by a constant, recurse then shift the result.
3792 if (match(V, P: m_LogicalShift(L: m_Value(V&: X), R: m_APInt(Res&: C)))) {
3793 const APInt &BitShift = *C;
3794
3795 // Ensure the shift amount is defined.
3796 if (BitShift.uge(RHS: BitWidth))
3797 return Result;
3798
3799 // For bswap-only, limit shift amounts to whole bytes, for an early exit.
3800 if (!MatchBitReversals && (BitShift.getZExtValue() % 8) != 0)
3801 return Result;
3802
3803 const auto &Res = collectBitParts(V: X, MatchBSwaps, MatchBitReversals, BPS,
3804 Depth: Depth + 1, FoundRoot);
3805 if (!Res)
3806 return Result;
3807 Result = Res;
3808
3809 // Perform the "shift" on BitProvenance.
3810 auto &P = Result->Provenance;
3811 if (I->getOpcode() == Instruction::Shl) {
3812 P.erase(CS: std::prev(x: P.end(), n: BitShift.getZExtValue()), CE: P.end());
3813 P.insert(I: P.begin(), NumToInsert: BitShift.getZExtValue(), Elt: BitPart::Unset);
3814 } else {
3815 P.erase(CS: P.begin(), CE: std::next(x: P.begin(), n: BitShift.getZExtValue()));
3816 P.insert(I: P.end(), NumToInsert: BitShift.getZExtValue(), Elt: BitPart::Unset);
3817 }
3818
3819 return Result;
3820 }
3821
3822 // If this is a logical 'and' with a mask that clears bits, recurse then
3823 // unset the appropriate bits.
3824 if (match(V, P: m_And(L: m_Value(V&: X), R: m_APInt(Res&: C)))) {
3825 const APInt &AndMask = *C;
3826
3827 // Check that the mask allows a multiple of 8 bits for a bswap, for an
3828 // early exit.
3829 unsigned NumMaskedBits = AndMask.popcount();
3830 if (!MatchBitReversals && (NumMaskedBits % 8) != 0)
3831 return Result;
3832
3833 const auto &Res = collectBitParts(V: X, MatchBSwaps, MatchBitReversals, BPS,
3834 Depth: Depth + 1, FoundRoot);
3835 if (!Res)
3836 return Result;
3837 Result = Res;
3838
3839 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3840 // If the AndMask is zero for this bit, clear the bit.
3841 if (AndMask[BitIdx] == 0)
3842 Result->Provenance[BitIdx] = BitPart::Unset;
3843 return Result;
3844 }
3845
3846 // If this is a zext instruction zero extend the result.
3847 if (match(V, P: m_ZExt(Op: m_Value(V&: X)))) {
3848 const auto &Res = collectBitParts(V: X, MatchBSwaps, MatchBitReversals, BPS,
3849 Depth: Depth + 1, FoundRoot);
3850 if (!Res)
3851 return Result;
3852
3853 Result = BitPart(Res->Provider, BitWidth);
3854 auto NarrowBitWidth = X->getType()->getScalarSizeInBits();
3855 for (unsigned BitIdx = 0; BitIdx < NarrowBitWidth; ++BitIdx)
3856 Result->Provenance[BitIdx] = Res->Provenance[BitIdx];
3857 for (unsigned BitIdx = NarrowBitWidth; BitIdx < BitWidth; ++BitIdx)
3858 Result->Provenance[BitIdx] = BitPart::Unset;
3859 return Result;
3860 }
3861
3862 // If this is a truncate instruction, extract the lower bits.
3863 if (match(V, P: m_Trunc(Op: m_Value(V&: X)))) {
3864 const auto &Res = collectBitParts(V: X, MatchBSwaps, MatchBitReversals, BPS,
3865 Depth: Depth + 1, FoundRoot);
3866 if (!Res)
3867 return Result;
3868
3869 Result = BitPart(Res->Provider, BitWidth);
3870 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3871 Result->Provenance[BitIdx] = Res->Provenance[BitIdx];
3872 return Result;
3873 }
3874
3875 // BITREVERSE - most likely due to us previous matching a partial
3876 // bitreverse.
3877 if (match(V, P: m_BitReverse(Op0: m_Value(V&: X)))) {
3878 const auto &Res = collectBitParts(V: X, MatchBSwaps, MatchBitReversals, BPS,
3879 Depth: Depth + 1, FoundRoot);
3880 if (!Res)
3881 return Result;
3882
3883 Result = BitPart(Res->Provider, BitWidth);
3884 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3885 Result->Provenance[(BitWidth - 1) - BitIdx] = Res->Provenance[BitIdx];
3886 return Result;
3887 }
3888
3889 // BSWAP - most likely due to us previous matching a partial bswap.
3890 if (match(V, P: m_BSwap(Op0: m_Value(V&: X)))) {
3891 const auto &Res = collectBitParts(V: X, MatchBSwaps, MatchBitReversals, BPS,
3892 Depth: Depth + 1, FoundRoot);
3893 if (!Res)
3894 return Result;
3895
3896 unsigned ByteWidth = BitWidth / 8;
3897 Result = BitPart(Res->Provider, BitWidth);
3898 for (unsigned ByteIdx = 0; ByteIdx < ByteWidth; ++ByteIdx) {
3899 unsigned ByteBitOfs = ByteIdx * 8;
3900 for (unsigned BitIdx = 0; BitIdx < 8; ++BitIdx)
3901 Result->Provenance[(BitWidth - 8 - ByteBitOfs) + BitIdx] =
3902 Res->Provenance[ByteBitOfs + BitIdx];
3903 }
3904 return Result;
3905 }
3906
3907 // Funnel 'double' shifts take 3 operands, 2 inputs and the shift
3908 // amount (modulo).
3909 // fshl(X,Y,Z): (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
3910 // fshr(X,Y,Z): (X << (BW - (Z % BW))) | (Y >> (Z % BW))
3911 if (match(V, P: m_FShl(Op0: m_Value(V&: X), Op1: m_Value(V&: Y), Op2: m_APInt(Res&: C))) ||
3912 match(V, P: m_FShr(Op0: m_Value(V&: X), Op1: m_Value(V&: Y), Op2: m_APInt(Res&: C)))) {
3913 // We can treat fshr as a fshl by flipping the modulo amount.
3914 unsigned ModAmt = C->urem(RHS: BitWidth);
3915 if (cast<IntrinsicInst>(Val: I)->getIntrinsicID() == Intrinsic::fshr)
3916 ModAmt = BitWidth - ModAmt;
3917
3918 // For bswap-only, limit shift amounts to whole bytes, for an early exit.
3919 if (!MatchBitReversals && (ModAmt % 8) != 0)
3920 return Result;
3921
3922 // Check we have both sources and they are from the same provider.
3923 const auto &LHS = collectBitParts(V: X, MatchBSwaps, MatchBitReversals, BPS,
3924 Depth: Depth + 1, FoundRoot);
3925 if (!LHS || !LHS->Provider)
3926 return Result;
3927
3928 const auto &RHS = collectBitParts(V: Y, MatchBSwaps, MatchBitReversals, BPS,
3929 Depth: Depth + 1, FoundRoot);
3930 if (!RHS || LHS->Provider != RHS->Provider)
3931 return Result;
3932
3933 unsigned StartBitRHS = BitWidth - ModAmt;
3934 Result = BitPart(LHS->Provider, BitWidth);
3935 for (unsigned BitIdx = 0; BitIdx < StartBitRHS; ++BitIdx)
3936 Result->Provenance[BitIdx + ModAmt] = LHS->Provenance[BitIdx];
3937 for (unsigned BitIdx = 0; BitIdx < ModAmt; ++BitIdx)
3938 Result->Provenance[BitIdx] = RHS->Provenance[BitIdx + StartBitRHS];
3939 return Result;
3940 }
3941 }
3942
3943 // If we've already found a root input value then we're never going to merge
3944 // these back together.
3945 if (FoundRoot)
3946 return Result;
3947
3948 // Okay, we got to something that isn't a shift, 'or', 'and', etc. This must
3949 // be the root input value to the bswap/bitreverse.
3950 FoundRoot = true;
3951 Result = BitPart(V, BitWidth);
3952 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3953 Result->Provenance[BitIdx] = BitIdx;
3954 return Result;
3955}
3956
3957static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To,
3958 unsigned BitWidth) {
3959 if (From % 8 != To % 8)
3960 return false;
3961 // Convert from bit indices to byte indices and check for a byte reversal.
3962 From >>= 3;
3963 To >>= 3;
3964 BitWidth >>= 3;
3965 return From == BitWidth - To - 1;
3966}
3967
3968static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To,
3969 unsigned BitWidth) {
3970 return From == BitWidth - To - 1;
3971}
3972
3973bool llvm::recognizeBSwapOrBitReverseIdiom(
3974 Instruction *I, bool MatchBSwaps, bool MatchBitReversals,
3975 SmallVectorImpl<Instruction *> &InsertedInsts) {
3976 if (!match(V: I, P: m_Or(L: m_Value(), R: m_Value())) &&
3977 !match(V: I, P: m_FShl(Op0: m_Value(), Op1: m_Value(), Op2: m_Value())) &&
3978 !match(V: I, P: m_FShr(Op0: m_Value(), Op1: m_Value(), Op2: m_Value())) &&
3979 !match(V: I, P: m_BSwap(Op0: m_Value())))
3980 return false;
3981 if (!MatchBSwaps && !MatchBitReversals)
3982 return false;
3983 Type *ITy = I->getType();
3984 if (!ITy->isIntOrIntVectorTy() || ITy->getScalarSizeInBits() > 128)
3985 return false; // Can't do integer/elements > 128 bits.
3986
3987 // Try to find all the pieces corresponding to the bswap.
3988 bool FoundRoot = false;
3989 std::map<Value *, std::optional<BitPart>> BPS;
3990 const auto &Res =
3991 collectBitParts(V: I, MatchBSwaps, MatchBitReversals, BPS, Depth: 0, FoundRoot);
3992 if (!Res)
3993 return false;
3994 ArrayRef<int8_t> BitProvenance = Res->Provenance;
3995 assert(all_of(BitProvenance,
3996 [](int8_t I) { return I == BitPart::Unset || 0 <= I; }) &&
3997 "Illegal bit provenance index");
3998
3999 // If the upper bits are zero, then attempt to perform as a truncated op.
4000 Type *DemandedTy = ITy;
4001 if (BitProvenance.back() == BitPart::Unset) {
4002 while (!BitProvenance.empty() && BitProvenance.back() == BitPart::Unset)
4003 BitProvenance = BitProvenance.drop_back();
4004 if (BitProvenance.empty())
4005 return false; // TODO - handle null value?
4006 DemandedTy = Type::getIntNTy(C&: I->getContext(), N: BitProvenance.size());
4007 if (auto *IVecTy = dyn_cast<VectorType>(Val: ITy))
4008 DemandedTy = VectorType::get(ElementType: DemandedTy, Other: IVecTy);
4009 }
4010
4011 // Check BitProvenance hasn't found a source larger than the result type.
4012 unsigned DemandedBW = DemandedTy->getScalarSizeInBits();
4013 if (DemandedBW > ITy->getScalarSizeInBits())
4014 return false;
4015
4016 // Now, is the bit permutation correct for a bswap or a bitreverse? We can
4017 // only byteswap values with an even number of bytes.
4018 APInt DemandedMask = APInt::getAllOnes(numBits: DemandedBW);
4019 bool OKForBSwap = MatchBSwaps && (DemandedBW % 16) == 0;
4020 bool OKForBitReverse = MatchBitReversals;
4021 for (unsigned BitIdx = 0;
4022 (BitIdx < DemandedBW) && (OKForBSwap || OKForBitReverse); ++BitIdx) {
4023 if (BitProvenance[BitIdx] == BitPart::Unset) {
4024 DemandedMask.clearBit(BitPosition: BitIdx);
4025 continue;
4026 }
4027 OKForBSwap &= bitTransformIsCorrectForBSwap(From: BitProvenance[BitIdx], To: BitIdx,
4028 BitWidth: DemandedBW);
4029 OKForBitReverse &= bitTransformIsCorrectForBitReverse(From: BitProvenance[BitIdx],
4030 To: BitIdx, BitWidth: DemandedBW);
4031 }
4032
4033 Intrinsic::ID Intrin;
4034 if (OKForBSwap)
4035 Intrin = Intrinsic::bswap;
4036 else if (OKForBitReverse)
4037 Intrin = Intrinsic::bitreverse;
4038 else
4039 return false;
4040
4041 Function *F = Intrinsic::getDeclaration(M: I->getModule(), id: Intrin, Tys: DemandedTy);
4042 Value *Provider = Res->Provider;
4043
4044 // We may need to truncate the provider.
4045 if (DemandedTy != Provider->getType()) {
4046 auto *Trunc =
4047 CastInst::CreateIntegerCast(S: Provider, Ty: DemandedTy, isSigned: false, Name: "trunc", InsertBefore: I->getIterator());
4048 InsertedInsts.push_back(Elt: Trunc);
4049 Provider = Trunc;
4050 }
4051
4052 Instruction *Result = CallInst::Create(Func: F, Args: Provider, NameStr: "rev", InsertBefore: I->getIterator());
4053 InsertedInsts.push_back(Elt: Result);
4054
4055 if (!DemandedMask.isAllOnes()) {
4056 auto *Mask = ConstantInt::get(Ty: DemandedTy, V: DemandedMask);
4057 Result = BinaryOperator::Create(Op: Instruction::And, S1: Result, S2: Mask, Name: "mask", InsertBefore: I->getIterator());
4058 InsertedInsts.push_back(Elt: Result);
4059 }
4060
4061 // We may need to zeroextend back to the result type.
4062 if (ITy != Result->getType()) {
4063 auto *ExtInst = CastInst::CreateIntegerCast(S: Result, Ty: ITy, isSigned: false, Name: "zext", InsertBefore: I->getIterator());
4064 InsertedInsts.push_back(Elt: ExtInst);
4065 }
4066
4067 return true;
4068}
4069
4070// CodeGen has special handling for some string functions that may replace
4071// them with target-specific intrinsics. Since that'd skip our interceptors
4072// in ASan/MSan/TSan/DFSan, and thus make us miss some memory accesses,
4073// we mark affected calls as NoBuiltin, which will disable optimization
4074// in CodeGen.
4075void llvm::maybeMarkSanitizerLibraryCallNoBuiltin(
4076 CallInst *CI, const TargetLibraryInfo *TLI) {
4077 Function *F = CI->getCalledFunction();
4078 LibFunc Func;
4079 if (F && !F->hasLocalLinkage() && F->hasName() &&
4080 TLI->getLibFunc(funcName: F->getName(), F&: Func) && TLI->hasOptimizedCodeGen(F: Func) &&
4081 !F->doesNotAccessMemory())
4082 CI->addFnAttr(Attribute::NoBuiltin);
4083}
4084
4085bool llvm::canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx) {
4086 // We can't have a PHI with a metadata type.
4087 if (I->getOperand(i: OpIdx)->getType()->isMetadataTy())
4088 return false;
4089
4090 // Early exit.
4091 if (!isa<Constant>(Val: I->getOperand(i: OpIdx)))
4092 return true;
4093
4094 switch (I->getOpcode()) {
4095 default:
4096 return true;
4097 case Instruction::Call:
4098 case Instruction::Invoke: {
4099 const auto &CB = cast<CallBase>(Val: *I);
4100
4101 // Can't handle inline asm. Skip it.
4102 if (CB.isInlineAsm())
4103 return false;
4104
4105 // Constant bundle operands may need to retain their constant-ness for
4106 // correctness.
4107 if (CB.isBundleOperand(Idx: OpIdx))
4108 return false;
4109
4110 if (OpIdx < CB.arg_size()) {
4111 // Some variadic intrinsics require constants in the variadic arguments,
4112 // which currently aren't markable as immarg.
4113 if (isa<IntrinsicInst>(Val: CB) &&
4114 OpIdx >= CB.getFunctionType()->getNumParams()) {
4115 // This is known to be OK for stackmap.
4116 return CB.getIntrinsicID() == Intrinsic::experimental_stackmap;
4117 }
4118
4119 // gcroot is a special case, since it requires a constant argument which
4120 // isn't also required to be a simple ConstantInt.
4121 if (CB.getIntrinsicID() == Intrinsic::gcroot)
4122 return false;
4123
4124 // Some intrinsic operands are required to be immediates.
4125 return !CB.paramHasAttr(ArgNo: OpIdx, Attribute::Kind: ImmArg);
4126 }
4127
4128 // It is never allowed to replace the call argument to an intrinsic, but it
4129 // may be possible for a call.
4130 return !isa<IntrinsicInst>(Val: CB);
4131 }
4132 case Instruction::ShuffleVector:
4133 // Shufflevector masks are constant.
4134 return OpIdx != 2;
4135 case Instruction::Switch:
4136 case Instruction::ExtractValue:
4137 // All operands apart from the first are constant.
4138 return OpIdx == 0;
4139 case Instruction::InsertValue:
4140 // All operands apart from the first and the second are constant.
4141 return OpIdx < 2;
4142 case Instruction::Alloca:
4143 // Static allocas (constant size in the entry block) are handled by
4144 // prologue/epilogue insertion so they're free anyway. We definitely don't
4145 // want to make them non-constant.
4146 return !cast<AllocaInst>(Val: I)->isStaticAlloca();
4147 case Instruction::GetElementPtr:
4148 if (OpIdx == 0)
4149 return true;
4150 gep_type_iterator It = gep_type_begin(GEP: I);
4151 for (auto E = std::next(x: It, n: OpIdx); It != E; ++It)
4152 if (It.isStruct())
4153 return false;
4154 return true;
4155 }
4156}
4157
4158Value *llvm::invertCondition(Value *Condition) {
4159 // First: Check if it's a constant
4160 if (Constant *C = dyn_cast<Constant>(Val: Condition))
4161 return ConstantExpr::getNot(C);
4162
4163 // Second: If the condition is already inverted, return the original value
4164 Value *NotCondition;
4165 if (match(V: Condition, P: m_Not(V: m_Value(V&: NotCondition))))
4166 return NotCondition;
4167
4168 BasicBlock *Parent = nullptr;
4169 Instruction *Inst = dyn_cast<Instruction>(Val: Condition);
4170 if (Inst)
4171 Parent = Inst->getParent();
4172 else if (Argument *Arg = dyn_cast<Argument>(Val: Condition))
4173 Parent = &Arg->getParent()->getEntryBlock();
4174 assert(Parent && "Unsupported condition to invert");
4175
4176 // Third: Check all the users for an invert
4177 for (User *U : Condition->users())
4178 if (Instruction *I = dyn_cast<Instruction>(Val: U))
4179 if (I->getParent() == Parent && match(V: I, P: m_Not(V: m_Specific(V: Condition))))
4180 return I;
4181
4182 // Last option: Create a new instruction
4183 auto *Inverted =
4184 BinaryOperator::CreateNot(Op: Condition, Name: Condition->getName() + ".inv");
4185 if (Inst && !isa<PHINode>(Val: Inst))
4186 Inverted->insertAfter(InsertPos: Inst);
4187 else
4188 Inverted->insertBefore(InsertPos: &*Parent->getFirstInsertionPt());
4189 return Inverted;
4190}
4191
4192bool llvm::inferAttributesFromOthers(Function &F) {
4193 // Note: We explicitly check for attributes rather than using cover functions
4194 // because some of the cover functions include the logic being implemented.
4195
4196 bool Changed = false;
4197 // readnone + not convergent implies nosync
4198 if (!F.hasFnAttribute(Attribute::NoSync) &&
4199 F.doesNotAccessMemory() && !F.isConvergent()) {
4200 F.setNoSync();
4201 Changed = true;
4202 }
4203
4204 // readonly implies nofree
4205 if (!F.hasFnAttribute(Attribute::NoFree) && F.onlyReadsMemory()) {
4206 F.setDoesNotFreeMemory();
4207 Changed = true;
4208 }
4209
4210 // willreturn implies mustprogress
4211 if (!F.hasFnAttribute(Attribute::MustProgress) && F.willReturn()) {
4212 F.setMustProgress();
4213 Changed = true;
4214 }
4215
4216 // TODO: There are a bunch of cases of restrictive memory effects we
4217 // can infer by inspecting arguments of argmemonly-ish functions.
4218
4219 return Changed;
4220}
4221

source code of llvm/lib/Transforms/Utils/Local.cpp