1//===- InstCombineNegator.cpp -----------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements sinking of negation into expression trees,
10// as long as that can be done without increasing instruction count.
11//
12//===----------------------------------------------------------------------===//
13
14#include "InstCombineInternal.h"
15#include "llvm/ADT/APInt.h"
16#include "llvm/ADT/ArrayRef.h"
17#include "llvm/ADT/DenseMap.h"
18#include "llvm/ADT/STLExtras.h"
19#include "llvm/ADT/SmallVector.h"
20#include "llvm/ADT/Statistic.h"
21#include "llvm/ADT/StringRef.h"
22#include "llvm/ADT/Twine.h"
23#include "llvm/Analysis/TargetFolder.h"
24#include "llvm/Analysis/ValueTracking.h"
25#include "llvm/IR/Constant.h"
26#include "llvm/IR/Constants.h"
27#include "llvm/IR/DebugLoc.h"
28#include "llvm/IR/IRBuilder.h"
29#include "llvm/IR/Instruction.h"
30#include "llvm/IR/Instructions.h"
31#include "llvm/IR/PatternMatch.h"
32#include "llvm/IR/Type.h"
33#include "llvm/IR/Use.h"
34#include "llvm/IR/User.h"
35#include "llvm/IR/Value.h"
36#include "llvm/Support/Casting.h"
37#include "llvm/Support/CommandLine.h"
38#include "llvm/Support/Compiler.h"
39#include "llvm/Support/DebugCounter.h"
40#include "llvm/Support/ErrorHandling.h"
41#include "llvm/Support/raw_ostream.h"
42#include "llvm/Transforms/InstCombine/InstCombiner.h"
43#include <cassert>
44#include <cstdint>
45#include <functional>
46#include <type_traits>
47#include <utility>
48
49namespace llvm {
50class DataLayout;
51class LLVMContext;
52} // namespace llvm
53
54using namespace llvm;
55
56#define DEBUG_TYPE "instcombine"
57
58STATISTIC(NegatorTotalNegationsAttempted,
59 "Negator: Number of negations attempted to be sinked");
60STATISTIC(NegatorNumTreesNegated,
61 "Negator: Number of negations successfully sinked");
62STATISTIC(NegatorMaxDepthVisited, "Negator: Maximal traversal depth ever "
63 "reached while attempting to sink negation");
64STATISTIC(NegatorTimesDepthLimitReached,
65 "Negator: How many times did the traversal depth limit was reached "
66 "during sinking");
67STATISTIC(
68 NegatorNumValuesVisited,
69 "Negator: Total number of values visited during attempts to sink negation");
70STATISTIC(NegatorNumNegationsFoundInCache,
71 "Negator: How many negations did we retrieve/reuse from cache");
72STATISTIC(NegatorMaxTotalValuesVisited,
73 "Negator: Maximal number of values ever visited while attempting to "
74 "sink negation");
75STATISTIC(NegatorNumInstructionsCreatedTotal,
76 "Negator: Number of new negated instructions created, total");
77STATISTIC(NegatorMaxInstructionsCreated,
78 "Negator: Maximal number of new instructions created during negation "
79 "attempt");
80STATISTIC(NegatorNumInstructionsNegatedSuccess,
81 "Negator: Number of new negated instructions created in successful "
82 "negation sinking attempts");
83
84DEBUG_COUNTER(NegatorCounter, "instcombine-negator",
85 "Controls Negator transformations in InstCombine pass");
86
87static cl::opt<bool>
88 NegatorEnabled("instcombine-negator-enabled", cl::init(Val: true),
89 cl::desc("Should we attempt to sink negations?"));
90
91static cl::opt<unsigned>
92 NegatorMaxDepth("instcombine-negator-max-depth",
93 cl::init(Val: NegatorDefaultMaxDepth),
94 cl::desc("What is the maximal lookup depth when trying to "
95 "check for viability of negation sinking."));
96
97Negator::Negator(LLVMContext &C, const DataLayout &DL, bool IsTrulyNegation_)
98 : Builder(C, TargetFolder(DL),
99 IRBuilderCallbackInserter([&](Instruction *I) {
100 ++NegatorNumInstructionsCreatedTotal;
101 NewInstructions.push_back(Elt: I);
102 })),
103 IsTrulyNegation(IsTrulyNegation_) {}
104
105#if LLVM_ENABLE_STATS
106Negator::~Negator() {
107 NegatorMaxTotalValuesVisited.updateMax(V: NumValuesVisitedInThisNegator);
108}
109#endif
110
111// Due to the InstCombine's worklist management, there are no guarantees that
112// each instruction we'll encounter has been visited by InstCombine already.
113// In particular, most importantly for us, that means we have to canonicalize
114// constants to RHS ourselves, since that is helpful sometimes.
115std::array<Value *, 2> Negator::getSortedOperandsOfBinOp(Instruction *I) {
116 assert(I->getNumOperands() == 2 && "Only for binops!");
117 std::array<Value *, 2> Ops{I->getOperand(i: 0), I->getOperand(i: 1)};
118 if (I->isCommutative() && InstCombiner::getComplexity(V: I->getOperand(i: 0)) <
119 InstCombiner::getComplexity(V: I->getOperand(i: 1)))
120 std::swap(a&: Ops[0], b&: Ops[1]);
121 return Ops;
122}
123
124// FIXME: can this be reworked into a worklist-based algorithm while preserving
125// the depth-first, early bailout traversal?
126[[nodiscard]] Value *Negator::visitImpl(Value *V, bool IsNSW, unsigned Depth) {
127 // -(undef) -> undef.
128 if (match(V, P: m_Undef()))
129 return V;
130
131 // In i1, negation can simply be ignored.
132 if (V->getType()->isIntOrIntVectorTy(BitWidth: 1))
133 return V;
134
135 Value *X;
136
137 // -(-(X)) -> X.
138 if (match(V, P: m_Neg(V: m_Value(V&: X))))
139 return X;
140
141 // Integral constants can be freely negated.
142 if (match(V, P: m_AnyIntegralConstant()))
143 return ConstantExpr::getNeg(C: cast<Constant>(Val: V),
144 /*HasNSW=*/false);
145
146 // If we have a non-instruction, then give up.
147 if (!isa<Instruction>(Val: V))
148 return nullptr;
149
150 // If we have started with a true negation (i.e. `sub 0, %y`), then if we've
151 // got instruction that does not require recursive reasoning, we can still
152 // negate it even if it has other uses, without increasing instruction count.
153 if (!V->hasOneUse() && !IsTrulyNegation)
154 return nullptr;
155
156 auto *I = cast<Instruction>(Val: V);
157 unsigned BitWidth = I->getType()->getScalarSizeInBits();
158
159 // We must preserve the insertion point and debug info that is set in the
160 // builder at the time this function is called.
161 InstCombiner::BuilderTy::InsertPointGuard Guard(Builder);
162 // And since we are trying to negate instruction I, that tells us about the
163 // insertion point and the debug info that we need to keep.
164 Builder.SetInsertPoint(I);
165
166 // In some cases we can give the answer without further recursion.
167 switch (I->getOpcode()) {
168 case Instruction::Add: {
169 std::array<Value *, 2> Ops = getSortedOperandsOfBinOp(I);
170 // `inc` is always negatible.
171 if (match(V: Ops[1], P: m_One()))
172 return Builder.CreateNot(V: Ops[0], Name: I->getName() + ".neg");
173 break;
174 }
175 case Instruction::Xor:
176 // `not` is always negatible.
177 if (match(V: I, P: m_Not(V: m_Value(V&: X))))
178 return Builder.CreateAdd(LHS: X, RHS: ConstantInt::get(Ty: X->getType(), V: 1),
179 Name: I->getName() + ".neg");
180 break;
181 case Instruction::AShr:
182 case Instruction::LShr: {
183 // Right-shift sign bit smear is negatible.
184 const APInt *Op1Val;
185 if (match(V: I->getOperand(i: 1), P: m_APInt(Res&: Op1Val)) && *Op1Val == BitWidth - 1) {
186 Value *BO = I->getOpcode() == Instruction::AShr
187 ? Builder.CreateLShr(LHS: I->getOperand(i: 0), RHS: I->getOperand(i: 1))
188 : Builder.CreateAShr(LHS: I->getOperand(i: 0), RHS: I->getOperand(i: 1));
189 if (auto *NewInstr = dyn_cast<Instruction>(Val: BO)) {
190 NewInstr->copyIRFlags(V: I);
191 NewInstr->setName(I->getName() + ".neg");
192 }
193 return BO;
194 }
195 // While we could negate exact arithmetic shift:
196 // ashr exact %x, C --> sdiv exact i8 %x, -1<<C
197 // iff C != 0 and C u< bitwidth(%x), we don't want to,
198 // because division is *THAT* much worse than a shift.
199 break;
200 }
201 case Instruction::SExt:
202 case Instruction::ZExt:
203 // `*ext` of i1 is always negatible
204 if (I->getOperand(i: 0)->getType()->isIntOrIntVectorTy(BitWidth: 1))
205 return I->getOpcode() == Instruction::SExt
206 ? Builder.CreateZExt(V: I->getOperand(i: 0), DestTy: I->getType(),
207 Name: I->getName() + ".neg")
208 : Builder.CreateSExt(V: I->getOperand(i: 0), DestTy: I->getType(),
209 Name: I->getName() + ".neg");
210 break;
211 case Instruction::Select: {
212 // If both arms of the select are constants, we don't need to recurse.
213 // Therefore, this transform is not limited by uses.
214 auto *Sel = cast<SelectInst>(Val: I);
215 Constant *TrueC, *FalseC;
216 if (match(V: Sel->getTrueValue(), P: m_ImmConstant(C&: TrueC)) &&
217 match(V: Sel->getFalseValue(), P: m_ImmConstant(C&: FalseC))) {
218 Constant *NegTrueC = ConstantExpr::getNeg(C: TrueC);
219 Constant *NegFalseC = ConstantExpr::getNeg(C: FalseC);
220 return Builder.CreateSelect(C: Sel->getCondition(), True: NegTrueC, False: NegFalseC,
221 Name: I->getName() + ".neg", /*MDFrom=*/I);
222 }
223 break;
224 }
225 default:
226 break; // Other instructions require recursive reasoning.
227 }
228
229 if (I->getOpcode() == Instruction::Sub &&
230 (I->hasOneUse() || match(V: I->getOperand(i: 0), P: m_ImmConstant()))) {
231 // `sub` is always negatible.
232 // However, only do this either if the old `sub` doesn't stick around, or
233 // it was subtracting from a constant. Otherwise, this isn't profitable.
234 return Builder.CreateSub(LHS: I->getOperand(i: 1), RHS: I->getOperand(i: 0),
235 Name: I->getName() + ".neg", /* HasNUW */ false,
236 HasNSW: IsNSW && I->hasNoSignedWrap());
237 }
238
239 // Some other cases, while still don't require recursion,
240 // are restricted to the one-use case.
241 if (!V->hasOneUse())
242 return nullptr;
243
244 switch (I->getOpcode()) {
245 case Instruction::ZExt: {
246 // Negation of zext of signbit is signbit splat:
247 // 0 - (zext (i8 X u>> 7) to iN) --> sext (i8 X s>> 7) to iN
248 Value *SrcOp = I->getOperand(i: 0);
249 unsigned SrcWidth = SrcOp->getType()->getScalarSizeInBits();
250 const APInt &FullShift = APInt(SrcWidth, SrcWidth - 1);
251 if (IsTrulyNegation &&
252 match(V: SrcOp, P: m_LShr(L: m_Value(V&: X), R: m_SpecificIntAllowPoison(V: FullShift)))) {
253 Value *Ashr = Builder.CreateAShr(LHS: X, RHS: FullShift);
254 return Builder.CreateSExt(V: Ashr, DestTy: I->getType());
255 }
256 break;
257 }
258 case Instruction::And: {
259 Constant *ShAmt;
260 // sub(y,and(lshr(x,C),1)) --> add(ashr(shl(x,(BW-1)-C),BW-1),y)
261 if (match(V: I, P: m_And(L: m_OneUse(SubPattern: m_TruncOrSelf(
262 Op: m_LShr(L: m_Value(V&: X), R: m_ImmConstant(C&: ShAmt)))),
263 R: m_One()))) {
264 unsigned BW = X->getType()->getScalarSizeInBits();
265 Constant *BWMinusOne = ConstantInt::get(Ty: X->getType(), V: BW - 1);
266 Value *R = Builder.CreateShl(LHS: X, RHS: Builder.CreateSub(LHS: BWMinusOne, RHS: ShAmt));
267 R = Builder.CreateAShr(LHS: R, RHS: BWMinusOne);
268 return Builder.CreateTruncOrBitCast(V: R, DestTy: I->getType());
269 }
270 break;
271 }
272 case Instruction::SDiv:
273 // `sdiv` is negatible if divisor is not undef/INT_MIN/1.
274 // While this is normally not behind a use-check,
275 // let's consider division to be special since it's costly.
276 if (auto *Op1C = dyn_cast<Constant>(Val: I->getOperand(i: 1))) {
277 if (!Op1C->containsUndefOrPoisonElement() &&
278 Op1C->isNotMinSignedValue() && Op1C->isNotOneValue()) {
279 Value *BO =
280 Builder.CreateSDiv(LHS: I->getOperand(i: 0), RHS: ConstantExpr::getNeg(C: Op1C),
281 Name: I->getName() + ".neg");
282 if (auto *NewInstr = dyn_cast<Instruction>(Val: BO))
283 NewInstr->setIsExact(I->isExact());
284 return BO;
285 }
286 }
287 break;
288 }
289
290 // Rest of the logic is recursive, so if it's time to give up then it's time.
291 if (Depth > NegatorMaxDepth) {
292 LLVM_DEBUG(dbgs() << "Negator: reached maximal allowed traversal depth in "
293 << *V << ". Giving up.\n");
294 ++NegatorTimesDepthLimitReached;
295 return nullptr;
296 }
297
298 switch (I->getOpcode()) {
299 case Instruction::Freeze: {
300 // `freeze` is negatible if its operand is negatible.
301 Value *NegOp = negate(V: I->getOperand(i: 0), IsNSW, Depth: Depth + 1);
302 if (!NegOp) // Early return.
303 return nullptr;
304 return Builder.CreateFreeze(V: NegOp, Name: I->getName() + ".neg");
305 }
306 case Instruction::PHI: {
307 // `phi` is negatible if all the incoming values are negatible.
308 auto *PHI = cast<PHINode>(Val: I);
309 SmallVector<Value *, 4> NegatedIncomingValues(PHI->getNumOperands());
310 for (auto I : zip(t: PHI->incoming_values(), u&: NegatedIncomingValues)) {
311 if (!(std::get<1>(t&: I) =
312 negate(V: std::get<0>(t&: I), IsNSW, Depth: Depth + 1))) // Early return.
313 return nullptr;
314 }
315 // All incoming values are indeed negatible. Create negated PHI node.
316 PHINode *NegatedPHI = Builder.CreatePHI(
317 Ty: PHI->getType(), NumReservedValues: PHI->getNumOperands(), Name: PHI->getName() + ".neg");
318 for (auto I : zip(t&: NegatedIncomingValues, u: PHI->blocks()))
319 NegatedPHI->addIncoming(V: std::get<0>(t&: I), BB: std::get<1>(t&: I));
320 return NegatedPHI;
321 }
322 case Instruction::Select: {
323 if (isKnownNegation(X: I->getOperand(i: 1), Y: I->getOperand(i: 2), /*NeedNSW=*/false,
324 /*AllowPoison=*/false)) {
325 // Of one hand of select is known to be negation of another hand,
326 // just swap the hands around.
327 auto *NewSelect = cast<SelectInst>(Val: I->clone());
328 // Just swap the operands of the select.
329 NewSelect->swapValues();
330 // Don't swap prof metadata, we didn't change the branch behavior.
331 NewSelect->setName(I->getName() + ".neg");
332 Builder.Insert(I: NewSelect);
333 return NewSelect;
334 }
335 // `select` is negatible if both hands of `select` are negatible.
336 Value *NegOp1 = negate(V: I->getOperand(i: 1), IsNSW, Depth: Depth + 1);
337 if (!NegOp1) // Early return.
338 return nullptr;
339 Value *NegOp2 = negate(V: I->getOperand(i: 2), IsNSW, Depth: Depth + 1);
340 if (!NegOp2)
341 return nullptr;
342 // Do preserve the metadata!
343 return Builder.CreateSelect(C: I->getOperand(i: 0), True: NegOp1, False: NegOp2,
344 Name: I->getName() + ".neg", /*MDFrom=*/I);
345 }
346 case Instruction::ShuffleVector: {
347 // `shufflevector` is negatible if both operands are negatible.
348 auto *Shuf = cast<ShuffleVectorInst>(Val: I);
349 Value *NegOp0 = negate(V: I->getOperand(i: 0), IsNSW, Depth: Depth + 1);
350 if (!NegOp0) // Early return.
351 return nullptr;
352 Value *NegOp1 = negate(V: I->getOperand(i: 1), IsNSW, Depth: Depth + 1);
353 if (!NegOp1)
354 return nullptr;
355 return Builder.CreateShuffleVector(V1: NegOp0, V2: NegOp1, Mask: Shuf->getShuffleMask(),
356 Name: I->getName() + ".neg");
357 }
358 case Instruction::ExtractElement: {
359 // `extractelement` is negatible if source operand is negatible.
360 auto *EEI = cast<ExtractElementInst>(Val: I);
361 Value *NegVector = negate(V: EEI->getVectorOperand(), IsNSW, Depth: Depth + 1);
362 if (!NegVector) // Early return.
363 return nullptr;
364 return Builder.CreateExtractElement(Vec: NegVector, Idx: EEI->getIndexOperand(),
365 Name: I->getName() + ".neg");
366 }
367 case Instruction::InsertElement: {
368 // `insertelement` is negatible if both the source vector and
369 // element-to-be-inserted are negatible.
370 auto *IEI = cast<InsertElementInst>(Val: I);
371 Value *NegVector = negate(V: IEI->getOperand(i_nocapture: 0), IsNSW, Depth: Depth + 1);
372 if (!NegVector) // Early return.
373 return nullptr;
374 Value *NegNewElt = negate(V: IEI->getOperand(i_nocapture: 1), IsNSW, Depth: Depth + 1);
375 if (!NegNewElt) // Early return.
376 return nullptr;
377 return Builder.CreateInsertElement(Vec: NegVector, NewElt: NegNewElt, Idx: IEI->getOperand(i_nocapture: 2),
378 Name: I->getName() + ".neg");
379 }
380 case Instruction::Trunc: {
381 // `trunc` is negatible if its operand is negatible.
382 Value *NegOp = negate(V: I->getOperand(i: 0), /* IsNSW */ false, Depth: Depth + 1);
383 if (!NegOp) // Early return.
384 return nullptr;
385 return Builder.CreateTrunc(V: NegOp, DestTy: I->getType(), Name: I->getName() + ".neg");
386 }
387 case Instruction::Shl: {
388 // `shl` is negatible if the first operand is negatible.
389 IsNSW &= I->hasNoSignedWrap();
390 if (Value *NegOp0 = negate(V: I->getOperand(i: 0), IsNSW, Depth: Depth + 1))
391 return Builder.CreateShl(LHS: NegOp0, RHS: I->getOperand(i: 1), Name: I->getName() + ".neg",
392 /* HasNUW */ false, HasNSW: IsNSW);
393 // Otherwise, `shl %x, C` can be interpreted as `mul %x, 1<<C`.
394 auto *Op1C = dyn_cast<Constant>(Val: I->getOperand(i: 1));
395 if (!Op1C || !IsTrulyNegation)
396 return nullptr;
397 return Builder.CreateMul(
398 LHS: I->getOperand(i: 0),
399 RHS: ConstantExpr::getShl(C1: Constant::getAllOnesValue(Ty: Op1C->getType()), C2: Op1C),
400 Name: I->getName() + ".neg", /* HasNUW */ false, HasNSW: IsNSW);
401 }
402 case Instruction::Or: {
403 if (!cast<PossiblyDisjointInst>(Val: I)->isDisjoint())
404 return nullptr; // Don't know how to handle `or` in general.
405 std::array<Value *, 2> Ops = getSortedOperandsOfBinOp(I);
406 // `or`/`add` are interchangeable when operands have no common bits set.
407 // `inc` is always negatible.
408 if (match(V: Ops[1], P: m_One()))
409 return Builder.CreateNot(V: Ops[0], Name: I->getName() + ".neg");
410 // Else, just defer to Instruction::Add handling.
411 [[fallthrough]];
412 }
413 case Instruction::Add: {
414 // `add` is negatible if both of its operands are negatible.
415 SmallVector<Value *, 2> NegatedOps, NonNegatedOps;
416 for (Value *Op : I->operands()) {
417 // Can we sink the negation into this operand?
418 if (Value *NegOp = negate(V: Op, /* IsNSW */ false, Depth: Depth + 1)) {
419 NegatedOps.emplace_back(Args&: NegOp); // Successfully negated operand!
420 continue;
421 }
422 // Failed to sink negation into this operand. IFF we started from negation
423 // and we manage to sink negation into one operand, we can still do this.
424 if (!IsTrulyNegation)
425 return nullptr;
426 NonNegatedOps.emplace_back(Args&: Op); // Just record which operand that was.
427 }
428 assert((NegatedOps.size() + NonNegatedOps.size()) == 2 &&
429 "Internal consistency check failed.");
430 // Did we manage to sink negation into both of the operands?
431 if (NegatedOps.size() == 2) // Then we get to keep the `add`!
432 return Builder.CreateAdd(LHS: NegatedOps[0], RHS: NegatedOps[1],
433 Name: I->getName() + ".neg");
434 assert(IsTrulyNegation && "We should have early-exited then.");
435 // Completely failed to sink negation?
436 if (NonNegatedOps.size() == 2)
437 return nullptr;
438 // 0-(a+b) --> (-a)-b
439 return Builder.CreateSub(LHS: NegatedOps[0], RHS: NonNegatedOps[0],
440 Name: I->getName() + ".neg");
441 }
442 case Instruction::Xor: {
443 std::array<Value *, 2> Ops = getSortedOperandsOfBinOp(I);
444 // `xor` is negatible if one of its operands is invertible.
445 // FIXME: InstCombineInverter? But how to connect Inverter and Negator?
446 if (auto *C = dyn_cast<Constant>(Val: Ops[1])) {
447 if (IsTrulyNegation) {
448 Value *Xor = Builder.CreateXor(LHS: Ops[0], RHS: ConstantExpr::getNot(C));
449 return Builder.CreateAdd(LHS: Xor, RHS: ConstantInt::get(Ty: Xor->getType(), V: 1),
450 Name: I->getName() + ".neg");
451 }
452 }
453 return nullptr;
454 }
455 case Instruction::Mul: {
456 std::array<Value *, 2> Ops = getSortedOperandsOfBinOp(I);
457 // `mul` is negatible if one of its operands is negatible.
458 Value *NegatedOp, *OtherOp;
459 // First try the second operand, in case it's a constant it will be best to
460 // just invert it instead of sinking the `neg` deeper.
461 if (Value *NegOp1 = negate(V: Ops[1], /* IsNSW */ false, Depth: Depth + 1)) {
462 NegatedOp = NegOp1;
463 OtherOp = Ops[0];
464 } else if (Value *NegOp0 = negate(V: Ops[0], /* IsNSW */ false, Depth: Depth + 1)) {
465 NegatedOp = NegOp0;
466 OtherOp = Ops[1];
467 } else
468 // Can't negate either of them.
469 return nullptr;
470 return Builder.CreateMul(LHS: NegatedOp, RHS: OtherOp, Name: I->getName() + ".neg",
471 /* HasNUW */ false, HasNSW: IsNSW && I->hasNoSignedWrap());
472 }
473 default:
474 return nullptr; // Don't know, likely not negatible for free.
475 }
476
477 llvm_unreachable("Can't get here. We always return from switch.");
478}
479
480[[nodiscard]] Value *Negator::negate(Value *V, bool IsNSW, unsigned Depth) {
481 NegatorMaxDepthVisited.updateMax(V: Depth);
482 ++NegatorNumValuesVisited;
483
484#if LLVM_ENABLE_STATS
485 ++NumValuesVisitedInThisNegator;
486#endif
487
488#ifndef NDEBUG
489 // We can't ever have a Value with such an address.
490 Value *Placeholder = reinterpret_cast<Value *>(static_cast<uintptr_t>(-1));
491#endif
492
493 // Did we already try to negate this value?
494 auto NegationsCacheIterator = NegationsCache.find(Val: V);
495 if (NegationsCacheIterator != NegationsCache.end()) {
496 ++NegatorNumNegationsFoundInCache;
497 Value *NegatedV = NegationsCacheIterator->second;
498 assert(NegatedV != Placeholder && "Encountered a cycle during negation.");
499 return NegatedV;
500 }
501
502#ifndef NDEBUG
503 // We did not find a cached result for negation of V. While there,
504 // let's temporairly cache a placeholder value, with the idea that if later
505 // during negation we fetch it from cache, we'll know we're in a cycle.
506 NegationsCache[V] = Placeholder;
507#endif
508
509 // No luck. Try negating it for real.
510 Value *NegatedV = visitImpl(V, IsNSW, Depth);
511 // And cache the (real) result for the future.
512 NegationsCache[V] = NegatedV;
513
514 return NegatedV;
515}
516
517[[nodiscard]] std::optional<Negator::Result> Negator::run(Value *Root,
518 bool IsNSW) {
519 Value *Negated = negate(V: Root, IsNSW, /*Depth=*/0);
520 if (!Negated) {
521 // We must cleanup newly-inserted instructions, to avoid any potential
522 // endless combine looping.
523 for (Instruction *I : llvm::reverse(C&: NewInstructions))
524 I->eraseFromParent();
525 return std::nullopt;
526 }
527 return std::make_pair(x: ArrayRef<Instruction *>(NewInstructions), y&: Negated);
528}
529
530[[nodiscard]] Value *Negator::Negate(bool LHSIsZero, bool IsNSW, Value *Root,
531 InstCombinerImpl &IC) {
532 ++NegatorTotalNegationsAttempted;
533 LLVM_DEBUG(dbgs() << "Negator: attempting to sink negation into " << *Root
534 << "\n");
535
536 if (!NegatorEnabled || !DebugCounter::shouldExecute(CounterName: NegatorCounter))
537 return nullptr;
538
539 Negator N(Root->getContext(), IC.getDataLayout(), LHSIsZero);
540 std::optional<Result> Res = N.run(Root, IsNSW);
541 if (!Res) { // Negation failed.
542 LLVM_DEBUG(dbgs() << "Negator: failed to sink negation into " << *Root
543 << "\n");
544 return nullptr;
545 }
546
547 LLVM_DEBUG(dbgs() << "Negator: successfully sunk negation into " << *Root
548 << "\n NEW: " << *Res->second << "\n");
549 ++NegatorNumTreesNegated;
550
551 // We must temporarily unset the 'current' insertion point and DebugLoc of the
552 // InstCombine's IRBuilder so that it won't interfere with the ones we have
553 // already specified when producing negated instructions.
554 InstCombiner::BuilderTy::InsertPointGuard Guard(IC.Builder);
555 IC.Builder.ClearInsertionPoint();
556 IC.Builder.SetCurrentDebugLocation(DebugLoc());
557
558 // And finally, we must add newly-created instructions into the InstCombine's
559 // worklist (in a proper order!) so it can attempt to combine them.
560 LLVM_DEBUG(dbgs() << "Negator: Propagating " << Res->first.size()
561 << " instrs to InstCombine\n");
562 NegatorMaxInstructionsCreated.updateMax(V: Res->first.size());
563 NegatorNumInstructionsNegatedSuccess += Res->first.size();
564
565 // They are in def-use order, so nothing fancy, just insert them in order.
566 for (Instruction *I : Res->first)
567 IC.Builder.Insert(I, Name: I->getName());
568
569 // And return the new root.
570 return Res->second;
571}
572

source code of llvm/lib/Transforms/InstCombine/InstCombineNegator.cpp