1//===- InstCombineMulDivRem.cpp -------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the visit functions for mul, fmul, sdiv, udiv, fdiv,
10// srem, urem, frem.
11//
12//===----------------------------------------------------------------------===//
13
14#include "InstCombineInternal.h"
15#include "llvm/ADT/APInt.h"
16#include "llvm/ADT/SmallVector.h"
17#include "llvm/Analysis/InstructionSimplify.h"
18#include "llvm/Analysis/ValueTracking.h"
19#include "llvm/IR/BasicBlock.h"
20#include "llvm/IR/Constant.h"
21#include "llvm/IR/Constants.h"
22#include "llvm/IR/InstrTypes.h"
23#include "llvm/IR/Instruction.h"
24#include "llvm/IR/Instructions.h"
25#include "llvm/IR/IntrinsicInst.h"
26#include "llvm/IR/Intrinsics.h"
27#include "llvm/IR/Operator.h"
28#include "llvm/IR/PatternMatch.h"
29#include "llvm/IR/Type.h"
30#include "llvm/IR/Value.h"
31#include "llvm/Support/Casting.h"
32#include "llvm/Support/ErrorHandling.h"
33#include "llvm/Transforms/InstCombine/InstCombiner.h"
34#include "llvm/Transforms/Utils/BuildLibCalls.h"
35#include <cassert>
36
37#define DEBUG_TYPE "instcombine"
38#include "llvm/Transforms/Utils/InstructionWorklist.h"
39
40using namespace llvm;
41using namespace PatternMatch;
42
43/// The specific integer value is used in a context where it is known to be
44/// non-zero. If this allows us to simplify the computation, do so and return
45/// the new operand, otherwise return null.
46static Value *simplifyValueKnownNonZero(Value *V, InstCombinerImpl &IC,
47 Instruction &CxtI) {
48 // If V has multiple uses, then we would have to do more analysis to determine
49 // if this is safe. For example, the use could be in dynamically unreached
50 // code.
51 if (!V->hasOneUse()) return nullptr;
52
53 bool MadeChange = false;
54
55 // ((1 << A) >>u B) --> (1 << (A-B))
56 // Because V cannot be zero, we know that B is less than A.
57 Value *A = nullptr, *B = nullptr, *One = nullptr;
58 if (match(V, P: m_LShr(L: m_OneUse(SubPattern: m_Shl(L: m_Value(V&: One), R: m_Value(V&: A))), R: m_Value(V&: B))) &&
59 match(V: One, P: m_One())) {
60 A = IC.Builder.CreateSub(LHS: A, RHS: B);
61 return IC.Builder.CreateShl(LHS: One, RHS: A);
62 }
63
64 // (PowerOfTwo >>u B) --> isExact since shifting out the result would make it
65 // inexact. Similarly for <<.
66 BinaryOperator *I = dyn_cast<BinaryOperator>(Val: V);
67 if (I && I->isLogicalShift() &&
68 IC.isKnownToBeAPowerOfTwo(V: I->getOperand(i_nocapture: 0), OrZero: false, Depth: 0, CxtI: &CxtI)) {
69 // We know that this is an exact/nuw shift and that the input is a
70 // non-zero context as well.
71 if (Value *V2 = simplifyValueKnownNonZero(V: I->getOperand(i_nocapture: 0), IC, CxtI)) {
72 IC.replaceOperand(I&: *I, OpNum: 0, V: V2);
73 MadeChange = true;
74 }
75
76 if (I->getOpcode() == Instruction::LShr && !I->isExact()) {
77 I->setIsExact();
78 MadeChange = true;
79 }
80
81 if (I->getOpcode() == Instruction::Shl && !I->hasNoUnsignedWrap()) {
82 I->setHasNoUnsignedWrap();
83 MadeChange = true;
84 }
85 }
86
87 // TODO: Lots more we could do here:
88 // If V is a phi node, we can call this on each of its operands.
89 // "select cond, X, 0" can simplify to "X".
90
91 return MadeChange ? V : nullptr;
92}
93
94// TODO: This is a specific form of a much more general pattern.
95// We could detect a select with any binop identity constant, or we
96// could use SimplifyBinOp to see if either arm of the select reduces.
97// But that needs to be done carefully and/or while removing potential
98// reverse canonicalizations as in InstCombiner::foldSelectIntoOp().
99static Value *foldMulSelectToNegate(BinaryOperator &I,
100 InstCombiner::BuilderTy &Builder) {
101 Value *Cond, *OtherOp;
102
103 // mul (select Cond, 1, -1), OtherOp --> select Cond, OtherOp, -OtherOp
104 // mul OtherOp, (select Cond, 1, -1) --> select Cond, OtherOp, -OtherOp
105 if (match(V: &I, P: m_c_Mul(L: m_OneUse(SubPattern: m_Select(C: m_Value(V&: Cond), L: m_One(), R: m_AllOnes())),
106 R: m_Value(V&: OtherOp)))) {
107 bool HasAnyNoWrap = I.hasNoSignedWrap() || I.hasNoUnsignedWrap();
108 Value *Neg = Builder.CreateNeg(V: OtherOp, Name: "", HasNSW: HasAnyNoWrap);
109 return Builder.CreateSelect(C: Cond, True: OtherOp, False: Neg);
110 }
111 // mul (select Cond, -1, 1), OtherOp --> select Cond, -OtherOp, OtherOp
112 // mul OtherOp, (select Cond, -1, 1) --> select Cond, -OtherOp, OtherOp
113 if (match(V: &I, P: m_c_Mul(L: m_OneUse(SubPattern: m_Select(C: m_Value(V&: Cond), L: m_AllOnes(), R: m_One())),
114 R: m_Value(V&: OtherOp)))) {
115 bool HasAnyNoWrap = I.hasNoSignedWrap() || I.hasNoUnsignedWrap();
116 Value *Neg = Builder.CreateNeg(V: OtherOp, Name: "", HasNSW: HasAnyNoWrap);
117 return Builder.CreateSelect(C: Cond, True: Neg, False: OtherOp);
118 }
119
120 // fmul (select Cond, 1.0, -1.0), OtherOp --> select Cond, OtherOp, -OtherOp
121 // fmul OtherOp, (select Cond, 1.0, -1.0) --> select Cond, OtherOp, -OtherOp
122 if (match(V: &I, P: m_c_FMul(L: m_OneUse(SubPattern: m_Select(C: m_Value(V&: Cond), L: m_SpecificFP(V: 1.0),
123 R: m_SpecificFP(V: -1.0))),
124 R: m_Value(V&: OtherOp)))) {
125 IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
126 Builder.setFastMathFlags(I.getFastMathFlags());
127 return Builder.CreateSelect(C: Cond, True: OtherOp, False: Builder.CreateFNeg(V: OtherOp));
128 }
129
130 // fmul (select Cond, -1.0, 1.0), OtherOp --> select Cond, -OtherOp, OtherOp
131 // fmul OtherOp, (select Cond, -1.0, 1.0) --> select Cond, -OtherOp, OtherOp
132 if (match(V: &I, P: m_c_FMul(L: m_OneUse(SubPattern: m_Select(C: m_Value(V&: Cond), L: m_SpecificFP(V: -1.0),
133 R: m_SpecificFP(V: 1.0))),
134 R: m_Value(V&: OtherOp)))) {
135 IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
136 Builder.setFastMathFlags(I.getFastMathFlags());
137 return Builder.CreateSelect(C: Cond, True: Builder.CreateFNeg(V: OtherOp), False: OtherOp);
138 }
139
140 return nullptr;
141}
142
143/// Reduce integer multiplication patterns that contain a (+/-1 << Z) factor.
144/// Callers are expected to call this twice to handle commuted patterns.
145static Value *foldMulShl1(BinaryOperator &Mul, bool CommuteOperands,
146 InstCombiner::BuilderTy &Builder) {
147 Value *X = Mul.getOperand(i_nocapture: 0), *Y = Mul.getOperand(i_nocapture: 1);
148 if (CommuteOperands)
149 std::swap(a&: X, b&: Y);
150
151 const bool HasNSW = Mul.hasNoSignedWrap();
152 const bool HasNUW = Mul.hasNoUnsignedWrap();
153
154 // X * (1 << Z) --> X << Z
155 Value *Z;
156 if (match(V: Y, P: m_Shl(L: m_One(), R: m_Value(V&: Z)))) {
157 bool PropagateNSW = HasNSW && cast<ShlOperator>(Val: Y)->hasNoSignedWrap();
158 return Builder.CreateShl(LHS: X, RHS: Z, Name: Mul.getName(), HasNUW, HasNSW: PropagateNSW);
159 }
160
161 // Similar to above, but an increment of the shifted value becomes an add:
162 // X * ((1 << Z) + 1) --> (X * (1 << Z)) + X --> (X << Z) + X
163 // This increases uses of X, so it may require a freeze, but that is still
164 // expected to be an improvement because it removes the multiply.
165 BinaryOperator *Shift;
166 if (match(V: Y, P: m_OneUse(SubPattern: m_Add(L: m_BinOp(I&: Shift), R: m_One()))) &&
167 match(V: Shift, P: m_OneUse(SubPattern: m_Shl(L: m_One(), R: m_Value(V&: Z))))) {
168 bool PropagateNSW = HasNSW && Shift->hasNoSignedWrap();
169 Value *FrX = Builder.CreateFreeze(V: X, Name: X->getName() + ".fr");
170 Value *Shl = Builder.CreateShl(LHS: FrX, RHS: Z, Name: "mulshl", HasNUW, HasNSW: PropagateNSW);
171 return Builder.CreateAdd(LHS: Shl, RHS: FrX, Name: Mul.getName(), HasNUW, HasNSW: PropagateNSW);
172 }
173
174 // Similar to above, but a decrement of the shifted value is disguised as
175 // 'not' and becomes a sub:
176 // X * (~(-1 << Z)) --> X * ((1 << Z) - 1) --> (X << Z) - X
177 // This increases uses of X, so it may require a freeze, but that is still
178 // expected to be an improvement because it removes the multiply.
179 if (match(V: Y, P: m_OneUse(SubPattern: m_Not(V: m_OneUse(SubPattern: m_Shl(L: m_AllOnes(), R: m_Value(V&: Z))))))) {
180 Value *FrX = Builder.CreateFreeze(V: X, Name: X->getName() + ".fr");
181 Value *Shl = Builder.CreateShl(LHS: FrX, RHS: Z, Name: "mulshl");
182 return Builder.CreateSub(LHS: Shl, RHS: FrX, Name: Mul.getName());
183 }
184
185 return nullptr;
186}
187
188static Value *takeLog2(IRBuilderBase &Builder, Value *Op, unsigned Depth,
189 bool AssumeNonZero, bool DoFold);
190
191Instruction *InstCombinerImpl::visitMul(BinaryOperator &I) {
192 Value *Op0 = I.getOperand(i_nocapture: 0), *Op1 = I.getOperand(i_nocapture: 1);
193 if (Value *V =
194 simplifyMulInst(LHS: Op0, RHS: Op1, IsNSW: I.hasNoSignedWrap(), IsNUW: I.hasNoUnsignedWrap(),
195 Q: SQ.getWithInstruction(I: &I)))
196 return replaceInstUsesWith(I, V);
197
198 if (SimplifyAssociativeOrCommutative(I))
199 return &I;
200
201 if (Instruction *X = foldVectorBinop(Inst&: I))
202 return X;
203
204 if (Instruction *Phi = foldBinopWithPhiOperands(BO&: I))
205 return Phi;
206
207 if (Value *V = foldUsingDistributiveLaws(I))
208 return replaceInstUsesWith(I, V);
209
210 Type *Ty = I.getType();
211 const unsigned BitWidth = Ty->getScalarSizeInBits();
212 const bool HasNSW = I.hasNoSignedWrap();
213 const bool HasNUW = I.hasNoUnsignedWrap();
214
215 // X * -1 --> 0 - X
216 if (match(V: Op1, P: m_AllOnes())) {
217 return HasNSW ? BinaryOperator::CreateNSWNeg(Op: Op0)
218 : BinaryOperator::CreateNeg(Op: Op0);
219 }
220
221 // Also allow combining multiply instructions on vectors.
222 {
223 Value *NewOp;
224 Constant *C1, *C2;
225 const APInt *IVal;
226 if (match(V: &I, P: m_Mul(L: m_Shl(L: m_Value(V&: NewOp), R: m_Constant(C&: C2)),
227 R: m_Constant(C&: C1))) &&
228 match(V: C1, P: m_APInt(Res&: IVal))) {
229 // ((X << C2)*C1) == (X * (C1 << C2))
230 Constant *Shl = ConstantExpr::getShl(C1, C2);
231 BinaryOperator *Mul = cast<BinaryOperator>(Val: I.getOperand(i_nocapture: 0));
232 BinaryOperator *BO = BinaryOperator::CreateMul(V1: NewOp, V2: Shl);
233 if (HasNUW && Mul->hasNoUnsignedWrap())
234 BO->setHasNoUnsignedWrap();
235 if (HasNSW && Mul->hasNoSignedWrap() && Shl->isNotMinSignedValue())
236 BO->setHasNoSignedWrap();
237 return BO;
238 }
239
240 if (match(V: &I, P: m_Mul(L: m_Value(V&: NewOp), R: m_Constant(C&: C1)))) {
241 // Replace X*(2^C) with X << C, where C is either a scalar or a vector.
242 if (Constant *NewCst = ConstantExpr::getExactLogBase2(C: C1)) {
243 BinaryOperator *Shl = BinaryOperator::CreateShl(V1: NewOp, V2: NewCst);
244
245 if (HasNUW)
246 Shl->setHasNoUnsignedWrap();
247 if (HasNSW) {
248 const APInt *V;
249 if (match(V: NewCst, P: m_APInt(Res&: V)) && *V != V->getBitWidth() - 1)
250 Shl->setHasNoSignedWrap();
251 }
252
253 return Shl;
254 }
255 }
256 }
257
258 if (Op0->hasOneUse() && match(V: Op1, P: m_NegatedPower2())) {
259 // Interpret X * (-1<<C) as (-X) * (1<<C) and try to sink the negation.
260 // The "* (1<<C)" thus becomes a potential shifting opportunity.
261 if (Value *NegOp0 =
262 Negator::Negate(/*IsNegation*/ LHSIsZero: true, IsNSW: HasNSW, Root: Op0, IC&: *this)) {
263 auto *Op1C = cast<Constant>(Val: Op1);
264 return replaceInstUsesWith(
265 I, V: Builder.CreateMul(LHS: NegOp0, RHS: ConstantExpr::getNeg(C: Op1C), Name: "",
266 /* HasNUW */ false,
267 HasNSW: HasNSW && Op1C->isNotMinSignedValue()));
268 }
269
270 // Try to convert multiply of extended operand to narrow negate and shift
271 // for better analysis.
272 // This is valid if the shift amount (trailing zeros in the multiplier
273 // constant) clears more high bits than the bitwidth difference between
274 // source and destination types:
275 // ({z/s}ext X) * (-1<<C) --> (zext (-X)) << C
276 const APInt *NegPow2C;
277 Value *X;
278 if (match(V: Op0, P: m_ZExtOrSExt(Op: m_Value(V&: X))) &&
279 match(V: Op1, P: m_APIntAllowPoison(Res&: NegPow2C))) {
280 unsigned SrcWidth = X->getType()->getScalarSizeInBits();
281 unsigned ShiftAmt = NegPow2C->countr_zero();
282 if (ShiftAmt >= BitWidth - SrcWidth) {
283 Value *N = Builder.CreateNeg(V: X, Name: X->getName() + ".neg");
284 Value *Z = Builder.CreateZExt(V: N, DestTy: Ty, Name: N->getName() + ".z");
285 return BinaryOperator::CreateShl(V1: Z, V2: ConstantInt::get(Ty, V: ShiftAmt));
286 }
287 }
288 }
289
290 if (Instruction *FoldedMul = foldBinOpIntoSelectOrPhi(I))
291 return FoldedMul;
292
293 if (Value *FoldedMul = foldMulSelectToNegate(I, Builder))
294 return replaceInstUsesWith(I, V: FoldedMul);
295
296 // Simplify mul instructions with a constant RHS.
297 Constant *MulC;
298 if (match(V: Op1, P: m_ImmConstant(C&: MulC))) {
299 // Canonicalize (X+C1)*MulC -> X*MulC+C1*MulC.
300 // Canonicalize (X|C1)*MulC -> X*MulC+C1*MulC.
301 Value *X;
302 Constant *C1;
303 if (match(V: Op0, P: m_OneUse(SubPattern: m_AddLike(L: m_Value(V&: X), R: m_ImmConstant(C&: C1))))) {
304 // C1*MulC simplifies to a tidier constant.
305 Value *NewC = Builder.CreateMul(LHS: C1, RHS: MulC);
306 auto *BOp0 = cast<BinaryOperator>(Val: Op0);
307 bool Op0NUW =
308 (BOp0->getOpcode() == Instruction::Or || BOp0->hasNoUnsignedWrap());
309 Value *NewMul = Builder.CreateMul(LHS: X, RHS: MulC);
310 auto *BO = BinaryOperator::CreateAdd(V1: NewMul, V2: NewC);
311 if (HasNUW && Op0NUW) {
312 // If NewMulBO is constant we also can set BO to nuw.
313 if (auto *NewMulBO = dyn_cast<BinaryOperator>(Val: NewMul))
314 NewMulBO->setHasNoUnsignedWrap();
315 BO->setHasNoUnsignedWrap();
316 }
317 return BO;
318 }
319 }
320
321 // abs(X) * abs(X) -> X * X
322 Value *X;
323 if (Op0 == Op1 && match(Op0, m_Intrinsic<Intrinsic::abs>(m_Value(V&: X))))
324 return BinaryOperator::CreateMul(V1: X, V2: X);
325
326 {
327 Value *Y;
328 // abs(X) * abs(Y) -> abs(X * Y)
329 if (I.hasNoSignedWrap() &&
330 match(Op0,
331 m_OneUse(m_Intrinsic<Intrinsic::abs>(m_Value(V&: X), m_One()))) &&
332 match(Op1, m_OneUse(m_Intrinsic<Intrinsic::abs>(m_Value(V&: Y), m_One()))))
333 return replaceInstUsesWith(
334 I, V: Builder.CreateBinaryIntrinsic(Intrinsic::ID: abs,
335 LHS: Builder.CreateNSWMul(LHS: X, RHS: Y),
336 RHS: Builder.getTrue()));
337 }
338
339 // -X * C --> X * -C
340 Value *Y;
341 Constant *Op1C;
342 if (match(V: Op0, P: m_Neg(V: m_Value(V&: X))) && match(V: Op1, P: m_Constant(C&: Op1C)))
343 return BinaryOperator::CreateMul(V1: X, V2: ConstantExpr::getNeg(C: Op1C));
344
345 // -X * -Y --> X * Y
346 if (match(V: Op0, P: m_Neg(V: m_Value(V&: X))) && match(V: Op1, P: m_Neg(V: m_Value(V&: Y)))) {
347 auto *NewMul = BinaryOperator::CreateMul(V1: X, V2: Y);
348 if (HasNSW && cast<OverflowingBinaryOperator>(Val: Op0)->hasNoSignedWrap() &&
349 cast<OverflowingBinaryOperator>(Val: Op1)->hasNoSignedWrap())
350 NewMul->setHasNoSignedWrap();
351 return NewMul;
352 }
353
354 // -X * Y --> -(X * Y)
355 // X * -Y --> -(X * Y)
356 if (match(V: &I, P: m_c_Mul(L: m_OneUse(SubPattern: m_Neg(V: m_Value(V&: X))), R: m_Value(V&: Y))))
357 return BinaryOperator::CreateNeg(Op: Builder.CreateMul(LHS: X, RHS: Y));
358
359 // (-X * Y) * -X --> (X * Y) * X
360 // (-X << Y) * -X --> (X << Y) * X
361 if (match(V: Op1, P: m_Neg(V: m_Value(V&: X)))) {
362 if (Value *NegOp0 = Negator::Negate(LHSIsZero: false, /*IsNSW*/ false, Root: Op0, IC&: *this))
363 return BinaryOperator::CreateMul(V1: NegOp0, V2: X);
364 }
365
366 // (X / Y) * Y = X - (X % Y)
367 // (X / Y) * -Y = (X % Y) - X
368 {
369 Value *Y = Op1;
370 BinaryOperator *Div = dyn_cast<BinaryOperator>(Val: Op0);
371 if (!Div || (Div->getOpcode() != Instruction::UDiv &&
372 Div->getOpcode() != Instruction::SDiv)) {
373 Y = Op0;
374 Div = dyn_cast<BinaryOperator>(Val: Op1);
375 }
376 Value *Neg = dyn_castNegVal(V: Y);
377 if (Div && Div->hasOneUse() &&
378 (Div->getOperand(i_nocapture: 1) == Y || Div->getOperand(i_nocapture: 1) == Neg) &&
379 (Div->getOpcode() == Instruction::UDiv ||
380 Div->getOpcode() == Instruction::SDiv)) {
381 Value *X = Div->getOperand(i_nocapture: 0), *DivOp1 = Div->getOperand(i_nocapture: 1);
382
383 // If the division is exact, X % Y is zero, so we end up with X or -X.
384 if (Div->isExact()) {
385 if (DivOp1 == Y)
386 return replaceInstUsesWith(I, V: X);
387 return BinaryOperator::CreateNeg(Op: X);
388 }
389
390 auto RemOpc = Div->getOpcode() == Instruction::UDiv ? Instruction::URem
391 : Instruction::SRem;
392 // X must be frozen because we are increasing its number of uses.
393 Value *XFreeze = Builder.CreateFreeze(V: X, Name: X->getName() + ".fr");
394 Value *Rem = Builder.CreateBinOp(Opc: RemOpc, LHS: XFreeze, RHS: DivOp1);
395 if (DivOp1 == Y)
396 return BinaryOperator::CreateSub(V1: XFreeze, V2: Rem);
397 return BinaryOperator::CreateSub(V1: Rem, V2: XFreeze);
398 }
399 }
400
401 // Fold the following two scenarios:
402 // 1) i1 mul -> i1 and.
403 // 2) X * Y --> X & Y, iff X, Y can be only {0,1}.
404 // Note: We could use known bits to generalize this and related patterns with
405 // shifts/truncs
406 if (Ty->isIntOrIntVectorTy(BitWidth: 1) ||
407 (match(V: Op0, P: m_And(L: m_Value(), R: m_One())) &&
408 match(V: Op1, P: m_And(L: m_Value(), R: m_One()))))
409 return BinaryOperator::CreateAnd(V1: Op0, V2: Op1);
410
411 if (Value *R = foldMulShl1(Mul&: I, /* CommuteOperands */ false, Builder))
412 return replaceInstUsesWith(I, V: R);
413 if (Value *R = foldMulShl1(Mul&: I, /* CommuteOperands */ true, Builder))
414 return replaceInstUsesWith(I, V: R);
415
416 // (zext bool X) * (zext bool Y) --> zext (and X, Y)
417 // (sext bool X) * (sext bool Y) --> zext (and X, Y)
418 // Note: -1 * -1 == 1 * 1 == 1 (if the extends match, the result is the same)
419 if (((match(V: Op0, P: m_ZExt(Op: m_Value(V&: X))) && match(V: Op1, P: m_ZExt(Op: m_Value(V&: Y)))) ||
420 (match(V: Op0, P: m_SExt(Op: m_Value(V&: X))) && match(V: Op1, P: m_SExt(Op: m_Value(V&: Y))))) &&
421 X->getType()->isIntOrIntVectorTy(BitWidth: 1) && X->getType() == Y->getType() &&
422 (Op0->hasOneUse() || Op1->hasOneUse() || X == Y)) {
423 Value *And = Builder.CreateAnd(LHS: X, RHS: Y, Name: "mulbool");
424 return CastInst::Create(Instruction::ZExt, S: And, Ty);
425 }
426 // (sext bool X) * (zext bool Y) --> sext (and X, Y)
427 // (zext bool X) * (sext bool Y) --> sext (and X, Y)
428 // Note: -1 * 1 == 1 * -1 == -1
429 if (((match(V: Op0, P: m_SExt(Op: m_Value(V&: X))) && match(V: Op1, P: m_ZExt(Op: m_Value(V&: Y)))) ||
430 (match(V: Op0, P: m_ZExt(Op: m_Value(V&: X))) && match(V: Op1, P: m_SExt(Op: m_Value(V&: Y))))) &&
431 X->getType()->isIntOrIntVectorTy(BitWidth: 1) && X->getType() == Y->getType() &&
432 (Op0->hasOneUse() || Op1->hasOneUse())) {
433 Value *And = Builder.CreateAnd(LHS: X, RHS: Y, Name: "mulbool");
434 return CastInst::Create(Instruction::SExt, S: And, Ty);
435 }
436
437 // (zext bool X) * Y --> X ? Y : 0
438 // Y * (zext bool X) --> X ? Y : 0
439 if (match(V: Op0, P: m_ZExt(Op: m_Value(V&: X))) && X->getType()->isIntOrIntVectorTy(BitWidth: 1))
440 return SelectInst::Create(C: X, S1: Op1, S2: ConstantInt::getNullValue(Ty));
441 if (match(V: Op1, P: m_ZExt(Op: m_Value(V&: X))) && X->getType()->isIntOrIntVectorTy(BitWidth: 1))
442 return SelectInst::Create(C: X, S1: Op0, S2: ConstantInt::getNullValue(Ty));
443
444 // mul (sext X), Y -> select X, -Y, 0
445 // mul Y, (sext X) -> select X, -Y, 0
446 if (match(V: &I, P: m_c_Mul(L: m_OneUse(SubPattern: m_SExt(Op: m_Value(V&: X))), R: m_Value(V&: Y))) &&
447 X->getType()->isIntOrIntVectorTy(BitWidth: 1))
448 return SelectInst::Create(C: X, S1: Builder.CreateNeg(V: Y, Name: "", HasNSW: I.hasNoSignedWrap()),
449 S2: ConstantInt::getNullValue(Ty: Op0->getType()));
450
451 Constant *ImmC;
452 if (match(V: Op1, P: m_ImmConstant(C&: ImmC))) {
453 // (sext bool X) * C --> X ? -C : 0
454 if (match(V: Op0, P: m_SExt(Op: m_Value(V&: X))) && X->getType()->isIntOrIntVectorTy(BitWidth: 1)) {
455 Constant *NegC = ConstantExpr::getNeg(C: ImmC);
456 return SelectInst::Create(C: X, S1: NegC, S2: ConstantInt::getNullValue(Ty));
457 }
458
459 // (ashr i32 X, 31) * C --> (X < 0) ? -C : 0
460 const APInt *C;
461 if (match(V: Op0, P: m_OneUse(SubPattern: m_AShr(L: m_Value(V&: X), R: m_APInt(Res&: C)))) &&
462 *C == C->getBitWidth() - 1) {
463 Constant *NegC = ConstantExpr::getNeg(C: ImmC);
464 Value *IsNeg = Builder.CreateIsNeg(Arg: X, Name: "isneg");
465 return SelectInst::Create(C: IsNeg, S1: NegC, S2: ConstantInt::getNullValue(Ty));
466 }
467 }
468
469 // (lshr X, 31) * Y --> (X < 0) ? Y : 0
470 // TODO: We are not checking one-use because the elimination of the multiply
471 // is better for analysis?
472 const APInt *C;
473 if (match(V: &I, P: m_c_BinOp(L: m_LShr(L: m_Value(V&: X), R: m_APInt(Res&: C)), R: m_Value(V&: Y))) &&
474 *C == C->getBitWidth() - 1) {
475 Value *IsNeg = Builder.CreateIsNeg(Arg: X, Name: "isneg");
476 return SelectInst::Create(C: IsNeg, S1: Y, S2: ConstantInt::getNullValue(Ty));
477 }
478
479 // (and X, 1) * Y --> (trunc X) ? Y : 0
480 if (match(V: &I, P: m_c_BinOp(L: m_OneUse(SubPattern: m_And(L: m_Value(V&: X), R: m_One())), R: m_Value(V&: Y)))) {
481 Value *Tr = Builder.CreateTrunc(V: X, DestTy: CmpInst::makeCmpResultType(opnd_type: Ty));
482 return SelectInst::Create(C: Tr, S1: Y, S2: ConstantInt::getNullValue(Ty));
483 }
484
485 // ((ashr X, 31) | 1) * X --> abs(X)
486 // X * ((ashr X, 31) | 1) --> abs(X)
487 if (match(V: &I, P: m_c_BinOp(L: m_Or(L: m_AShr(L: m_Value(V&: X),
488 R: m_SpecificIntAllowPoison(V: BitWidth - 1)),
489 R: m_One()),
490 R: m_Deferred(V: X)))) {
491 Value *Abs = Builder.CreateBinaryIntrinsic(
492 Intrinsic::ID: abs, LHS: X, RHS: ConstantInt::getBool(Context&: I.getContext(), V: HasNSW));
493 Abs->takeName(V: &I);
494 return replaceInstUsesWith(I, V: Abs);
495 }
496
497 if (Instruction *Ext = narrowMathIfNoOverflow(I))
498 return Ext;
499
500 if (Instruction *Res = foldBinOpOfSelectAndCastOfSelectCondition(I))
501 return Res;
502
503 // (mul Op0 Op1):
504 // if Log2(Op0) folds away ->
505 // (shl Op1, Log2(Op0))
506 // if Log2(Op1) folds away ->
507 // (shl Op0, Log2(Op1))
508 if (takeLog2(Builder, Op: Op0, /*Depth*/ 0, /*AssumeNonZero*/ false,
509 /*DoFold*/ false)) {
510 Value *Res = takeLog2(Builder, Op: Op0, /*Depth*/ 0, /*AssumeNonZero*/ false,
511 /*DoFold*/ true);
512 BinaryOperator *Shl = BinaryOperator::CreateShl(V1: Op1, V2: Res);
513 // We can only propegate nuw flag.
514 Shl->setHasNoUnsignedWrap(HasNUW);
515 return Shl;
516 }
517 if (takeLog2(Builder, Op: Op1, /*Depth*/ 0, /*AssumeNonZero*/ false,
518 /*DoFold*/ false)) {
519 Value *Res = takeLog2(Builder, Op: Op1, /*Depth*/ 0, /*AssumeNonZero*/ false,
520 /*DoFold*/ true);
521 BinaryOperator *Shl = BinaryOperator::CreateShl(V1: Op0, V2: Res);
522 // We can only propegate nuw flag.
523 Shl->setHasNoUnsignedWrap(HasNUW);
524 return Shl;
525 }
526
527 bool Changed = false;
528 if (!HasNSW && willNotOverflowSignedMul(LHS: Op0, RHS: Op1, CxtI: I)) {
529 Changed = true;
530 I.setHasNoSignedWrap(true);
531 }
532
533 if (!HasNUW && willNotOverflowUnsignedMul(LHS: Op0, RHS: Op1, CxtI: I)) {
534 Changed = true;
535 I.setHasNoUnsignedWrap(true);
536 }
537
538 return Changed ? &I : nullptr;
539}
540
541Instruction *InstCombinerImpl::foldFPSignBitOps(BinaryOperator &I) {
542 BinaryOperator::BinaryOps Opcode = I.getOpcode();
543 assert((Opcode == Instruction::FMul || Opcode == Instruction::FDiv) &&
544 "Expected fmul or fdiv");
545
546 Value *Op0 = I.getOperand(i_nocapture: 0), *Op1 = I.getOperand(i_nocapture: 1);
547 Value *X, *Y;
548
549 // -X * -Y --> X * Y
550 // -X / -Y --> X / Y
551 if (match(V: Op0, P: m_FNeg(X: m_Value(V&: X))) && match(V: Op1, P: m_FNeg(X: m_Value(V&: Y))))
552 return BinaryOperator::CreateWithCopiedFlags(Opc: Opcode, V1: X, V2: Y, CopyO: &I);
553
554 // fabs(X) * fabs(X) -> X * X
555 // fabs(X) / fabs(X) -> X / X
556 if (Op0 == Op1 && match(V: Op0, P: m_FAbs(Op0: m_Value(V&: X))))
557 return BinaryOperator::CreateWithCopiedFlags(Opc: Opcode, V1: X, V2: X, CopyO: &I);
558
559 // fabs(X) * fabs(Y) --> fabs(X * Y)
560 // fabs(X) / fabs(Y) --> fabs(X / Y)
561 if (match(V: Op0, P: m_FAbs(Op0: m_Value(V&: X))) && match(V: Op1, P: m_FAbs(Op0: m_Value(V&: Y))) &&
562 (Op0->hasOneUse() || Op1->hasOneUse())) {
563 IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
564 Builder.setFastMathFlags(I.getFastMathFlags());
565 Value *XY = Builder.CreateBinOp(Opc: Opcode, LHS: X, RHS: Y);
566 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::ID: fabs, V: XY);
567 Fabs->takeName(V: &I);
568 return replaceInstUsesWith(I, V: Fabs);
569 }
570
571 return nullptr;
572}
573
574Instruction *InstCombinerImpl::foldPowiReassoc(BinaryOperator &I) {
575 auto createPowiExpr = [](BinaryOperator &I, InstCombinerImpl &IC, Value *X,
576 Value *Y, Value *Z) {
577 InstCombiner::BuilderTy &Builder = IC.Builder;
578 Value *YZ = Builder.CreateAdd(LHS: Y, RHS: Z);
579 Instruction *NewPow = Builder.CreateIntrinsic(
580 Intrinsic::powi, {X->getType(), YZ->getType()}, {X, YZ}, &I);
581
582 return NewPow;
583 };
584
585 Value *X, *Y, *Z;
586 unsigned Opcode = I.getOpcode();
587 assert((Opcode == Instruction::FMul || Opcode == Instruction::FDiv) &&
588 "Unexpected opcode");
589
590 // powi(X, Y) * X --> powi(X, Y+1)
591 // X * powi(X, Y) --> powi(X, Y+1)
592 if (match(&I, m_c_FMul(m_OneUse(m_AllowReassoc(m_Intrinsic<Intrinsic::powi>(
593 m_Value(X), m_Value(Y)))),
594 m_Deferred(X)))) {
595 Constant *One = ConstantInt::get(Ty: Y->getType(), V: 1);
596 if (willNotOverflowSignedAdd(LHS: Y, RHS: One, CxtI: I)) {
597 Instruction *NewPow = createPowiExpr(I, *this, X, Y, One);
598 return replaceInstUsesWith(I, V: NewPow);
599 }
600 }
601
602 // powi(x, y) * powi(x, z) -> powi(x, y + z)
603 Value *Op0 = I.getOperand(i_nocapture: 0);
604 Value *Op1 = I.getOperand(i_nocapture: 1);
605 if (Opcode == Instruction::FMul && I.isOnlyUserOfAnyOperand() &&
606 match(Op0, m_AllowReassoc(
607 m_Intrinsic<Intrinsic::powi>(m_Value(X), m_Value(Y)))) &&
608 match(Op1, m_AllowReassoc(m_Intrinsic<Intrinsic::powi>(m_Specific(X),
609 m_Value(Z)))) &&
610 Y->getType() == Z->getType()) {
611 Instruction *NewPow = createPowiExpr(I, *this, X, Y, Z);
612 return replaceInstUsesWith(I, V: NewPow);
613 }
614
615 if (Opcode == Instruction::FDiv && I.hasAllowReassoc() && I.hasNoNaNs()) {
616 // powi(X, Y) / X --> powi(X, Y-1)
617 // This is legal when (Y - 1) can't wraparound, in which case reassoc and
618 // nnan are required.
619 // TODO: Multi-use may be also better off creating Powi(x,y-1)
620 if (match(Op0, m_OneUse(m_AllowReassoc(m_Intrinsic<Intrinsic::powi>(
621 m_Specific(Op1), m_Value(Y))))) &&
622 willNotOverflowSignedSub(Y, ConstantInt::get(Y->getType(), 1), I)) {
623 Constant *NegOne = ConstantInt::getAllOnesValue(Ty: Y->getType());
624 Instruction *NewPow = createPowiExpr(I, *this, Op1, Y, NegOne);
625 return replaceInstUsesWith(I, V: NewPow);
626 }
627
628 // powi(X, Y) / (X * Z) --> powi(X, Y-1) / Z
629 // This is legal when (Y - 1) can't wraparound, in which case reassoc and
630 // nnan are required.
631 // TODO: Multi-use may be also better off creating Powi(x,y-1)
632 if (match(Op0, m_OneUse(m_AllowReassoc(m_Intrinsic<Intrinsic::powi>(
633 m_Value(X), m_Value(Y))))) &&
634 match(Op1, m_AllowReassoc(m_c_FMul(m_Specific(X), m_Value(Z)))) &&
635 willNotOverflowSignedSub(Y, ConstantInt::get(Y->getType(), 1), I)) {
636 Constant *NegOne = ConstantInt::getAllOnesValue(Ty: Y->getType());
637 auto *NewPow = createPowiExpr(I, *this, X, Y, NegOne);
638 return BinaryOperator::CreateFDivFMF(NewPow, Z, &I);
639 }
640 }
641
642 return nullptr;
643}
644
645Instruction *InstCombinerImpl::foldFMulReassoc(BinaryOperator &I) {
646 Value *Op0 = I.getOperand(i_nocapture: 0);
647 Value *Op1 = I.getOperand(i_nocapture: 1);
648 Value *X, *Y;
649 Constant *C;
650 BinaryOperator *Op0BinOp;
651
652 // Reassociate constant RHS with another constant to form constant
653 // expression.
654 if (match(V: Op1, P: m_Constant(C)) && C->isFiniteNonZeroFP() &&
655 match(V: Op0, P: m_AllowReassoc(SubPattern: m_BinOp(I&: Op0BinOp)))) {
656 // Everything in this scope folds I with Op0, intersecting their FMF.
657 FastMathFlags FMF = I.getFastMathFlags() & Op0BinOp->getFastMathFlags();
658 IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
659 Builder.setFastMathFlags(FMF);
660 Constant *C1;
661 if (match(V: Op0, P: m_OneUse(SubPattern: m_FDiv(L: m_Constant(C&: C1), R: m_Value(V&: X))))) {
662 // (C1 / X) * C --> (C * C1) / X
663 Constant *CC1 =
664 ConstantFoldBinaryOpOperands(Opcode: Instruction::FMul, LHS: C, RHS: C1, DL);
665 if (CC1 && CC1->isNormalFP())
666 return BinaryOperator::CreateFDivFMF(V1: CC1, V2: X, FMF);
667 }
668 if (match(V: Op0, P: m_FDiv(L: m_Value(V&: X), R: m_Constant(C&: C1)))) {
669 // FIXME: This seems like it should also be checking for arcp
670 // (X / C1) * C --> X * (C / C1)
671 Constant *CDivC1 =
672 ConstantFoldBinaryOpOperands(Opcode: Instruction::FDiv, LHS: C, RHS: C1, DL);
673 if (CDivC1 && CDivC1->isNormalFP())
674 return BinaryOperator::CreateFMulFMF(V1: X, V2: CDivC1, FMF);
675
676 // If the constant was a denormal, try reassociating differently.
677 // (X / C1) * C --> X / (C1 / C)
678 Constant *C1DivC =
679 ConstantFoldBinaryOpOperands(Opcode: Instruction::FDiv, LHS: C1, RHS: C, DL);
680 if (C1DivC && Op0->hasOneUse() && C1DivC->isNormalFP())
681 return BinaryOperator::CreateFDivFMF(V1: X, V2: C1DivC, FMF);
682 }
683
684 // We do not need to match 'fadd C, X' and 'fsub X, C' because they are
685 // canonicalized to 'fadd X, C'. Distributing the multiply may allow
686 // further folds and (X * C) + C2 is 'fma'.
687 if (match(V: Op0, P: m_OneUse(SubPattern: m_FAdd(L: m_Value(V&: X), R: m_Constant(C&: C1))))) {
688 // (X + C1) * C --> (X * C) + (C * C1)
689 if (Constant *CC1 =
690 ConstantFoldBinaryOpOperands(Opcode: Instruction::FMul, LHS: C, RHS: C1, DL)) {
691 Value *XC = Builder.CreateFMul(L: X, R: C);
692 return BinaryOperator::CreateFAddFMF(V1: XC, V2: CC1, FMF);
693 }
694 }
695 if (match(V: Op0, P: m_OneUse(SubPattern: m_FSub(L: m_Constant(C&: C1), R: m_Value(V&: X))))) {
696 // (C1 - X) * C --> (C * C1) - (X * C)
697 if (Constant *CC1 =
698 ConstantFoldBinaryOpOperands(Opcode: Instruction::FMul, LHS: C, RHS: C1, DL)) {
699 Value *XC = Builder.CreateFMul(L: X, R: C);
700 return BinaryOperator::CreateFSubFMF(V1: CC1, V2: XC, FMF);
701 }
702 }
703 }
704
705 Value *Z;
706 if (match(V: &I,
707 P: m_c_FMul(L: m_AllowReassoc(SubPattern: m_OneUse(SubPattern: m_FDiv(L: m_Value(V&: X), R: m_Value(V&: Y)))),
708 R: m_Value(V&: Z)))) {
709 BinaryOperator *DivOp = cast<BinaryOperator>(Val: ((Z == Op0) ? Op1 : Op0));
710 FastMathFlags FMF = I.getFastMathFlags() & DivOp->getFastMathFlags();
711 if (FMF.allowReassoc()) {
712 // Sink division: (X / Y) * Z --> (X * Z) / Y
713 IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
714 Builder.setFastMathFlags(FMF);
715 auto *NewFMul = Builder.CreateFMul(L: X, R: Z);
716 return BinaryOperator::CreateFDivFMF(V1: NewFMul, V2: Y, FMF);
717 }
718 }
719
720 // sqrt(X) * sqrt(Y) -> sqrt(X * Y)
721 // nnan disallows the possibility of returning a number if both operands are
722 // negative (in that case, we should return NaN).
723 if (I.hasNoNaNs() && match(V: Op0, P: m_OneUse(SubPattern: m_Sqrt(Op0: m_Value(V&: X)))) &&
724 match(V: Op1, P: m_OneUse(SubPattern: m_Sqrt(Op0: m_Value(V&: Y))))) {
725 Value *XY = Builder.CreateFMulFMF(L: X, R: Y, FMFSource: &I);
726 Value *Sqrt = Builder.CreateUnaryIntrinsic(Intrinsic::ID: sqrt, V: XY, FMFSource: &I);
727 return replaceInstUsesWith(I, V: Sqrt);
728 }
729
730 // The following transforms are done irrespective of the number of uses
731 // for the expression "1.0/sqrt(X)".
732 // 1) 1.0/sqrt(X) * X -> X/sqrt(X)
733 // 2) X * 1.0/sqrt(X) -> X/sqrt(X)
734 // We always expect the backend to reduce X/sqrt(X) to sqrt(X), if it
735 // has the necessary (reassoc) fast-math-flags.
736 if (I.hasNoSignedZeros() &&
737 match(V: Op0, P: (m_FDiv(L: m_SpecificFP(V: 1.0), R: m_Value(V&: Y)))) &&
738 match(V: Y, P: m_Sqrt(Op0: m_Value(V&: X))) && Op1 == X)
739 return BinaryOperator::CreateFDivFMF(V1: X, V2: Y, FMFSource: &I);
740 if (I.hasNoSignedZeros() &&
741 match(V: Op1, P: (m_FDiv(L: m_SpecificFP(V: 1.0), R: m_Value(V&: Y)))) &&
742 match(V: Y, P: m_Sqrt(Op0: m_Value(V&: X))) && Op0 == X)
743 return BinaryOperator::CreateFDivFMF(V1: X, V2: Y, FMFSource: &I);
744
745 // Like the similar transform in instsimplify, this requires 'nsz' because
746 // sqrt(-0.0) = -0.0, and -0.0 * -0.0 does not simplify to -0.0.
747 if (I.hasNoNaNs() && I.hasNoSignedZeros() && Op0 == Op1 && Op0->hasNUses(N: 2)) {
748 // Peek through fdiv to find squaring of square root:
749 // (X / sqrt(Y)) * (X / sqrt(Y)) --> (X * X) / Y
750 if (match(V: Op0, P: m_FDiv(L: m_Value(V&: X), R: m_Sqrt(Op0: m_Value(V&: Y))))) {
751 Value *XX = Builder.CreateFMulFMF(L: X, R: X, FMFSource: &I);
752 return BinaryOperator::CreateFDivFMF(V1: XX, V2: Y, FMFSource: &I);
753 }
754 // (sqrt(Y) / X) * (sqrt(Y) / X) --> Y / (X * X)
755 if (match(V: Op0, P: m_FDiv(L: m_Sqrt(Op0: m_Value(V&: Y)), R: m_Value(V&: X)))) {
756 Value *XX = Builder.CreateFMulFMF(L: X, R: X, FMFSource: &I);
757 return BinaryOperator::CreateFDivFMF(V1: Y, V2: XX, FMFSource: &I);
758 }
759 }
760
761 // pow(X, Y) * X --> pow(X, Y+1)
762 // X * pow(X, Y) --> pow(X, Y+1)
763 if (match(&I, m_c_FMul(m_OneUse(m_Intrinsic<Intrinsic::pow>(m_Value(X),
764 m_Value(Y))),
765 m_Deferred(X)))) {
766 Value *Y1 = Builder.CreateFAddFMF(L: Y, R: ConstantFP::get(Ty: I.getType(), V: 1.0), FMFSource: &I);
767 Value *Pow = Builder.CreateBinaryIntrinsic(Intrinsic::ID: pow, LHS: X, RHS: Y1, FMFSource: &I);
768 return replaceInstUsesWith(I, V: Pow);
769 }
770
771 if (Instruction *FoldedPowi = foldPowiReassoc(I))
772 return FoldedPowi;
773
774 if (I.isOnlyUserOfAnyOperand()) {
775 // pow(X, Y) * pow(X, Z) -> pow(X, Y + Z)
776 if (match(Op0, m_Intrinsic<Intrinsic::pow>(m_Value(X), m_Value(Y))) &&
777 match(Op1, m_Intrinsic<Intrinsic::pow>(m_Specific(X), m_Value(Z)))) {
778 auto *YZ = Builder.CreateFAddFMF(L: Y, R: Z, FMFSource: &I);
779 auto *NewPow = Builder.CreateBinaryIntrinsic(Intrinsic::pow, X, YZ, &I);
780 return replaceInstUsesWith(I, V: NewPow);
781 }
782 // pow(X, Y) * pow(Z, Y) -> pow(X * Z, Y)
783 if (match(Op0, m_Intrinsic<Intrinsic::pow>(m_Value(X), m_Value(Y))) &&
784 match(Op1, m_Intrinsic<Intrinsic::pow>(m_Value(Z), m_Specific(Y)))) {
785 auto *XZ = Builder.CreateFMulFMF(L: X, R: Z, FMFSource: &I);
786 auto *NewPow = Builder.CreateBinaryIntrinsic(Intrinsic::pow, XZ, Y, &I);
787 return replaceInstUsesWith(I, V: NewPow);
788 }
789
790 // exp(X) * exp(Y) -> exp(X + Y)
791 if (match(Op0, m_Intrinsic<Intrinsic::exp>(m_Value(X))) &&
792 match(Op1, m_Intrinsic<Intrinsic::exp>(m_Value(Y)))) {
793 Value *XY = Builder.CreateFAddFMF(L: X, R: Y, FMFSource: &I);
794 Value *Exp = Builder.CreateUnaryIntrinsic(Intrinsic::ID: exp, V: XY, FMFSource: &I);
795 return replaceInstUsesWith(I, V: Exp);
796 }
797
798 // exp2(X) * exp2(Y) -> exp2(X + Y)
799 if (match(Op0, m_Intrinsic<Intrinsic::exp2>(m_Value(X))) &&
800 match(Op1, m_Intrinsic<Intrinsic::exp2>(m_Value(Y)))) {
801 Value *XY = Builder.CreateFAddFMF(L: X, R: Y, FMFSource: &I);
802 Value *Exp2 = Builder.CreateUnaryIntrinsic(Intrinsic::ID: exp2, V: XY, FMFSource: &I);
803 return replaceInstUsesWith(I, V: Exp2);
804 }
805 }
806
807 // (X*Y) * X => (X*X) * Y where Y != X
808 // The purpose is two-fold:
809 // 1) to form a power expression (of X).
810 // 2) potentially shorten the critical path: After transformation, the
811 // latency of the instruction Y is amortized by the expression of X*X,
812 // and therefore Y is in a "less critical" position compared to what it
813 // was before the transformation.
814 if (match(V: Op0, P: m_OneUse(SubPattern: m_c_FMul(L: m_Specific(V: Op1), R: m_Value(V&: Y)))) && Op1 != Y) {
815 Value *XX = Builder.CreateFMulFMF(L: Op1, R: Op1, FMFSource: &I);
816 return BinaryOperator::CreateFMulFMF(V1: XX, V2: Y, FMFSource: &I);
817 }
818 if (match(V: Op1, P: m_OneUse(SubPattern: m_c_FMul(L: m_Specific(V: Op0), R: m_Value(V&: Y)))) && Op0 != Y) {
819 Value *XX = Builder.CreateFMulFMF(L: Op0, R: Op0, FMFSource: &I);
820 return BinaryOperator::CreateFMulFMF(V1: XX, V2: Y, FMFSource: &I);
821 }
822
823 return nullptr;
824}
825
826Instruction *InstCombinerImpl::visitFMul(BinaryOperator &I) {
827 if (Value *V = simplifyFMulInst(LHS: I.getOperand(i_nocapture: 0), RHS: I.getOperand(i_nocapture: 1),
828 FMF: I.getFastMathFlags(),
829 Q: SQ.getWithInstruction(I: &I)))
830 return replaceInstUsesWith(I, V);
831
832 if (SimplifyAssociativeOrCommutative(I))
833 return &I;
834
835 if (Instruction *X = foldVectorBinop(Inst&: I))
836 return X;
837
838 if (Instruction *Phi = foldBinopWithPhiOperands(BO&: I))
839 return Phi;
840
841 if (Instruction *FoldedMul = foldBinOpIntoSelectOrPhi(I))
842 return FoldedMul;
843
844 if (Value *FoldedMul = foldMulSelectToNegate(I, Builder))
845 return replaceInstUsesWith(I, V: FoldedMul);
846
847 if (Instruction *R = foldFPSignBitOps(I))
848 return R;
849
850 if (Instruction *R = foldFBinOpOfIntCasts(I))
851 return R;
852
853 // X * -1.0 --> -X
854 Value *Op0 = I.getOperand(i_nocapture: 0), *Op1 = I.getOperand(i_nocapture: 1);
855 if (match(V: Op1, P: m_SpecificFP(V: -1.0)))
856 return UnaryOperator::CreateFNegFMF(Op: Op0, FMFSource: &I);
857
858 // With no-nans/no-infs:
859 // X * 0.0 --> copysign(0.0, X)
860 // X * -0.0 --> copysign(0.0, -X)
861 const APFloat *FPC;
862 if (match(V: Op1, P: m_APFloatAllowPoison(Res&: FPC)) && FPC->isZero() &&
863 ((I.hasNoInfs() &&
864 isKnownNeverNaN(V: Op0, /*Depth=*/0, SQ: SQ.getWithInstruction(I: &I))) ||
865 isKnownNeverNaN(V: &I, /*Depth=*/0, SQ: SQ.getWithInstruction(I: &I)))) {
866 if (FPC->isNegative())
867 Op0 = Builder.CreateFNegFMF(V: Op0, FMFSource: &I);
868 CallInst *CopySign = Builder.CreateIntrinsic(Intrinsic::copysign,
869 {I.getType()}, {Op1, Op0}, &I);
870 return replaceInstUsesWith(I, V: CopySign);
871 }
872
873 // -X * C --> X * -C
874 Value *X, *Y;
875 Constant *C;
876 if (match(V: Op0, P: m_FNeg(X: m_Value(V&: X))) && match(V: Op1, P: m_Constant(C)))
877 if (Constant *NegC = ConstantFoldUnaryOpOperand(Opcode: Instruction::FNeg, Op: C, DL))
878 return BinaryOperator::CreateFMulFMF(V1: X, V2: NegC, FMFSource: &I);
879
880 // (select A, B, C) * (select A, D, E) --> select A, (B*D), (C*E)
881 if (Value *V = SimplifySelectsFeedingBinaryOp(I, LHS: Op0, RHS: Op1))
882 return replaceInstUsesWith(I, V);
883
884 if (I.hasAllowReassoc())
885 if (Instruction *FoldedMul = foldFMulReassoc(I))
886 return FoldedMul;
887
888 // log2(X * 0.5) * Y = log2(X) * Y - Y
889 if (I.isFast()) {
890 IntrinsicInst *Log2 = nullptr;
891 if (match(Op0, m_OneUse(m_Intrinsic<Intrinsic::log2>(
892 m_OneUse(SubPattern: m_FMul(L: m_Value(V&: X), R: m_SpecificFP(V: 0.5))))))) {
893 Log2 = cast<IntrinsicInst>(Val: Op0);
894 Y = Op1;
895 }
896 if (match(Op1, m_OneUse(m_Intrinsic<Intrinsic::log2>(
897 m_OneUse(SubPattern: m_FMul(L: m_Value(V&: X), R: m_SpecificFP(V: 0.5))))))) {
898 Log2 = cast<IntrinsicInst>(Val: Op1);
899 Y = Op0;
900 }
901 if (Log2) {
902 Value *Log2 = Builder.CreateUnaryIntrinsic(Intrinsic::ID: log2, V: X, FMFSource: &I);
903 Value *LogXTimesY = Builder.CreateFMulFMF(L: Log2, R: Y, FMFSource: &I);
904 return BinaryOperator::CreateFSubFMF(V1: LogXTimesY, V2: Y, FMFSource: &I);
905 }
906 }
907
908 // Simplify FMUL recurrences starting with 0.0 to 0.0 if nnan and nsz are set.
909 // Given a phi node with entry value as 0 and it used in fmul operation,
910 // we can replace fmul with 0 safely and eleminate loop operation.
911 PHINode *PN = nullptr;
912 Value *Start = nullptr, *Step = nullptr;
913 if (matchSimpleRecurrence(I: &I, P&: PN, Start, Step) && I.hasNoNaNs() &&
914 I.hasNoSignedZeros() && match(V: Start, P: m_Zero()))
915 return replaceInstUsesWith(I, V: Start);
916
917 // minimum(X, Y) * maximum(X, Y) => X * Y.
918 if (match(&I,
919 m_c_FMul(m_Intrinsic<Intrinsic::maximum>(m_Value(V&: X), m_Value(V&: Y)),
920 m_c_Intrinsic<Intrinsic::minimum>(m_Deferred(V: X),
921 m_Deferred(V: Y))))) {
922 BinaryOperator *Result = BinaryOperator::CreateFMulFMF(V1: X, V2: Y, FMFSource: &I);
923 // We cannot preserve ninf if nnan flag is not set.
924 // If X is NaN and Y is Inf then in original program we had NaN * NaN,
925 // while in optimized version NaN * Inf and this is a poison with ninf flag.
926 if (!Result->hasNoNaNs())
927 Result->setHasNoInfs(false);
928 return Result;
929 }
930
931 return nullptr;
932}
933
934/// Fold a divide or remainder with a select instruction divisor when one of the
935/// select operands is zero. In that case, we can use the other select operand
936/// because div/rem by zero is undefined.
937bool InstCombinerImpl::simplifyDivRemOfSelectWithZeroOp(BinaryOperator &I) {
938 SelectInst *SI = dyn_cast<SelectInst>(Val: I.getOperand(i_nocapture: 1));
939 if (!SI)
940 return false;
941
942 int NonNullOperand;
943 if (match(V: SI->getTrueValue(), P: m_Zero()))
944 // div/rem X, (Cond ? 0 : Y) -> div/rem X, Y
945 NonNullOperand = 2;
946 else if (match(V: SI->getFalseValue(), P: m_Zero()))
947 // div/rem X, (Cond ? Y : 0) -> div/rem X, Y
948 NonNullOperand = 1;
949 else
950 return false;
951
952 // Change the div/rem to use 'Y' instead of the select.
953 replaceOperand(I, OpNum: 1, V: SI->getOperand(i_nocapture: NonNullOperand));
954
955 // Okay, we know we replace the operand of the div/rem with 'Y' with no
956 // problem. However, the select, or the condition of the select may have
957 // multiple uses. Based on our knowledge that the operand must be non-zero,
958 // propagate the known value for the select into other uses of it, and
959 // propagate a known value of the condition into its other users.
960
961 // If the select and condition only have a single use, don't bother with this,
962 // early exit.
963 Value *SelectCond = SI->getCondition();
964 if (SI->use_empty() && SelectCond->hasOneUse())
965 return true;
966
967 // Scan the current block backward, looking for other uses of SI.
968 BasicBlock::iterator BBI = I.getIterator(), BBFront = I.getParent()->begin();
969 Type *CondTy = SelectCond->getType();
970 while (BBI != BBFront) {
971 --BBI;
972 // If we found an instruction that we can't assume will return, so
973 // information from below it cannot be propagated above it.
974 if (!isGuaranteedToTransferExecutionToSuccessor(I: &*BBI))
975 break;
976
977 // Replace uses of the select or its condition with the known values.
978 for (Use &Op : BBI->operands()) {
979 if (Op == SI) {
980 replaceUse(U&: Op, NewValue: SI->getOperand(i_nocapture: NonNullOperand));
981 Worklist.push(I: &*BBI);
982 } else if (Op == SelectCond) {
983 replaceUse(U&: Op, NewValue: NonNullOperand == 1 ? ConstantInt::getTrue(Ty: CondTy)
984 : ConstantInt::getFalse(Ty: CondTy));
985 Worklist.push(I: &*BBI);
986 }
987 }
988
989 // If we past the instruction, quit looking for it.
990 if (&*BBI == SI)
991 SI = nullptr;
992 if (&*BBI == SelectCond)
993 SelectCond = nullptr;
994
995 // If we ran out of things to eliminate, break out of the loop.
996 if (!SelectCond && !SI)
997 break;
998
999 }
1000 return true;
1001}
1002
1003/// True if the multiply can not be expressed in an int this size.
1004static bool multiplyOverflows(const APInt &C1, const APInt &C2, APInt &Product,
1005 bool IsSigned) {
1006 bool Overflow;
1007 Product = IsSigned ? C1.smul_ov(RHS: C2, Overflow) : C1.umul_ov(RHS: C2, Overflow);
1008 return Overflow;
1009}
1010
1011/// True if C1 is a multiple of C2. Quotient contains C1/C2.
1012static bool isMultiple(const APInt &C1, const APInt &C2, APInt &Quotient,
1013 bool IsSigned) {
1014 assert(C1.getBitWidth() == C2.getBitWidth() && "Constant widths not equal");
1015
1016 // Bail if we will divide by zero.
1017 if (C2.isZero())
1018 return false;
1019
1020 // Bail if we would divide INT_MIN by -1.
1021 if (IsSigned && C1.isMinSignedValue() && C2.isAllOnes())
1022 return false;
1023
1024 APInt Remainder(C1.getBitWidth(), /*val=*/0ULL, IsSigned);
1025 if (IsSigned)
1026 APInt::sdivrem(LHS: C1, RHS: C2, Quotient, Remainder);
1027 else
1028 APInt::udivrem(LHS: C1, RHS: C2, Quotient, Remainder);
1029
1030 return Remainder.isMinValue();
1031}
1032
1033static Value *foldIDivShl(BinaryOperator &I, InstCombiner::BuilderTy &Builder) {
1034 assert((I.getOpcode() == Instruction::SDiv ||
1035 I.getOpcode() == Instruction::UDiv) &&
1036 "Expected integer divide");
1037
1038 bool IsSigned = I.getOpcode() == Instruction::SDiv;
1039 Value *Op0 = I.getOperand(i_nocapture: 0), *Op1 = I.getOperand(i_nocapture: 1);
1040 Type *Ty = I.getType();
1041
1042 Value *X, *Y, *Z;
1043
1044 // With appropriate no-wrap constraints, remove a common factor in the
1045 // dividend and divisor that is disguised as a left-shifted value.
1046 if (match(V: Op1, P: m_Shl(L: m_Value(V&: X), R: m_Value(V&: Z))) &&
1047 match(V: Op0, P: m_c_Mul(L: m_Specific(V: X), R: m_Value(V&: Y)))) {
1048 // Both operands must have the matching no-wrap for this kind of division.
1049 auto *Mul = cast<OverflowingBinaryOperator>(Val: Op0);
1050 auto *Shl = cast<OverflowingBinaryOperator>(Val: Op1);
1051 bool HasNUW = Mul->hasNoUnsignedWrap() && Shl->hasNoUnsignedWrap();
1052 bool HasNSW = Mul->hasNoSignedWrap() && Shl->hasNoSignedWrap();
1053
1054 // (X * Y) u/ (X << Z) --> Y u>> Z
1055 if (!IsSigned && HasNUW)
1056 return Builder.CreateLShr(LHS: Y, RHS: Z, Name: "", isExact: I.isExact());
1057
1058 // (X * Y) s/ (X << Z) --> Y s/ (1 << Z)
1059 if (IsSigned && HasNSW && (Op0->hasOneUse() || Op1->hasOneUse())) {
1060 Value *Shl = Builder.CreateShl(LHS: ConstantInt::get(Ty, V: 1), RHS: Z);
1061 return Builder.CreateSDiv(LHS: Y, RHS: Shl, Name: "", isExact: I.isExact());
1062 }
1063 }
1064
1065 // With appropriate no-wrap constraints, remove a common factor in the
1066 // dividend and divisor that is disguised as a left-shift amount.
1067 if (match(V: Op0, P: m_Shl(L: m_Value(V&: X), R: m_Value(V&: Z))) &&
1068 match(V: Op1, P: m_Shl(L: m_Value(V&: Y), R: m_Specific(V: Z)))) {
1069 auto *Shl0 = cast<OverflowingBinaryOperator>(Val: Op0);
1070 auto *Shl1 = cast<OverflowingBinaryOperator>(Val: Op1);
1071
1072 // For unsigned div, we need 'nuw' on both shifts or
1073 // 'nsw' on both shifts + 'nuw' on the dividend.
1074 // (X << Z) / (Y << Z) --> X / Y
1075 if (!IsSigned &&
1076 ((Shl0->hasNoUnsignedWrap() && Shl1->hasNoUnsignedWrap()) ||
1077 (Shl0->hasNoUnsignedWrap() && Shl0->hasNoSignedWrap() &&
1078 Shl1->hasNoSignedWrap())))
1079 return Builder.CreateUDiv(LHS: X, RHS: Y, Name: "", isExact: I.isExact());
1080
1081 // For signed div, we need 'nsw' on both shifts + 'nuw' on the divisor.
1082 // (X << Z) / (Y << Z) --> X / Y
1083 if (IsSigned && Shl0->hasNoSignedWrap() && Shl1->hasNoSignedWrap() &&
1084 Shl1->hasNoUnsignedWrap())
1085 return Builder.CreateSDiv(LHS: X, RHS: Y, Name: "", isExact: I.isExact());
1086 }
1087
1088 // If X << Y and X << Z does not overflow, then:
1089 // (X << Y) / (X << Z) -> (1 << Y) / (1 << Z) -> 1 << Y >> Z
1090 if (match(V: Op0, P: m_Shl(L: m_Value(V&: X), R: m_Value(V&: Y))) &&
1091 match(V: Op1, P: m_Shl(L: m_Specific(V: X), R: m_Value(V&: Z)))) {
1092 auto *Shl0 = cast<OverflowingBinaryOperator>(Val: Op0);
1093 auto *Shl1 = cast<OverflowingBinaryOperator>(Val: Op1);
1094
1095 if (IsSigned ? (Shl0->hasNoSignedWrap() && Shl1->hasNoSignedWrap())
1096 : (Shl0->hasNoUnsignedWrap() && Shl1->hasNoUnsignedWrap())) {
1097 Constant *One = ConstantInt::get(Ty: X->getType(), V: 1);
1098 // Only preserve the nsw flag if dividend has nsw
1099 // or divisor has nsw and operator is sdiv.
1100 Value *Dividend = Builder.CreateShl(
1101 LHS: One, RHS: Y, Name: "shl.dividend",
1102 /*HasNUW*/ true,
1103 /*HasNSW*/
1104 IsSigned ? (Shl0->hasNoUnsignedWrap() || Shl1->hasNoUnsignedWrap())
1105 : Shl0->hasNoSignedWrap());
1106 return Builder.CreateLShr(LHS: Dividend, RHS: Z, Name: "", isExact: I.isExact());
1107 }
1108 }
1109
1110 return nullptr;
1111}
1112
1113/// This function implements the transforms common to both integer division
1114/// instructions (udiv and sdiv). It is called by the visitors to those integer
1115/// division instructions.
1116/// Common integer divide transforms
1117Instruction *InstCombinerImpl::commonIDivTransforms(BinaryOperator &I) {
1118 if (Instruction *Phi = foldBinopWithPhiOperands(BO&: I))
1119 return Phi;
1120
1121 Value *Op0 = I.getOperand(i_nocapture: 0), *Op1 = I.getOperand(i_nocapture: 1);
1122 bool IsSigned = I.getOpcode() == Instruction::SDiv;
1123 Type *Ty = I.getType();
1124
1125 // The RHS is known non-zero.
1126 if (Value *V = simplifyValueKnownNonZero(V: I.getOperand(i_nocapture: 1), IC&: *this, CxtI&: I))
1127 return replaceOperand(I, OpNum: 1, V);
1128
1129 // Handle cases involving: [su]div X, (select Cond, Y, Z)
1130 // This does not apply for fdiv.
1131 if (simplifyDivRemOfSelectWithZeroOp(I))
1132 return &I;
1133
1134 // If the divisor is a select-of-constants, try to constant fold all div ops:
1135 // C / (select Cond, TrueC, FalseC) --> select Cond, (C / TrueC), (C / FalseC)
1136 // TODO: Adapt simplifyDivRemOfSelectWithZeroOp to allow this and other folds.
1137 if (match(V: Op0, P: m_ImmConstant()) &&
1138 match(V: Op1, P: m_Select(C: m_Value(), L: m_ImmConstant(), R: m_ImmConstant()))) {
1139 if (Instruction *R = FoldOpIntoSelect(Op&: I, SI: cast<SelectInst>(Val: Op1),
1140 /*FoldWithMultiUse*/ true))
1141 return R;
1142 }
1143
1144 const APInt *C2;
1145 if (match(V: Op1, P: m_APInt(Res&: C2))) {
1146 Value *X;
1147 const APInt *C1;
1148
1149 // (X / C1) / C2 -> X / (C1*C2)
1150 if ((IsSigned && match(V: Op0, P: m_SDiv(L: m_Value(V&: X), R: m_APInt(Res&: C1)))) ||
1151 (!IsSigned && match(V: Op0, P: m_UDiv(L: m_Value(V&: X), R: m_APInt(Res&: C1))))) {
1152 APInt Product(C1->getBitWidth(), /*val=*/0ULL, IsSigned);
1153 if (!multiplyOverflows(C1: *C1, C2: *C2, Product, IsSigned))
1154 return BinaryOperator::Create(Op: I.getOpcode(), S1: X,
1155 S2: ConstantInt::get(Ty, V: Product));
1156 }
1157
1158 APInt Quotient(C2->getBitWidth(), /*val=*/0ULL, IsSigned);
1159 if ((IsSigned && match(V: Op0, P: m_NSWMul(L: m_Value(V&: X), R: m_APInt(Res&: C1)))) ||
1160 (!IsSigned && match(V: Op0, P: m_NUWMul(L: m_Value(V&: X), R: m_APInt(Res&: C1))))) {
1161
1162 // (X * C1) / C2 -> X / (C2 / C1) if C2 is a multiple of C1.
1163 if (isMultiple(C1: *C2, C2: *C1, Quotient, IsSigned)) {
1164 auto *NewDiv = BinaryOperator::Create(Op: I.getOpcode(), S1: X,
1165 S2: ConstantInt::get(Ty, V: Quotient));
1166 NewDiv->setIsExact(I.isExact());
1167 return NewDiv;
1168 }
1169
1170 // (X * C1) / C2 -> X * (C1 / C2) if C1 is a multiple of C2.
1171 if (isMultiple(C1: *C1, C2: *C2, Quotient, IsSigned)) {
1172 auto *Mul = BinaryOperator::Create(Op: Instruction::Mul, S1: X,
1173 S2: ConstantInt::get(Ty, V: Quotient));
1174 auto *OBO = cast<OverflowingBinaryOperator>(Val: Op0);
1175 Mul->setHasNoUnsignedWrap(!IsSigned && OBO->hasNoUnsignedWrap());
1176 Mul->setHasNoSignedWrap(OBO->hasNoSignedWrap());
1177 return Mul;
1178 }
1179 }
1180
1181 if ((IsSigned && match(V: Op0, P: m_NSWShl(L: m_Value(V&: X), R: m_APInt(Res&: C1))) &&
1182 C1->ult(RHS: C1->getBitWidth() - 1)) ||
1183 (!IsSigned && match(V: Op0, P: m_NUWShl(L: m_Value(V&: X), R: m_APInt(Res&: C1))) &&
1184 C1->ult(RHS: C1->getBitWidth()))) {
1185 APInt C1Shifted = APInt::getOneBitSet(
1186 numBits: C1->getBitWidth(), BitNo: static_cast<unsigned>(C1->getZExtValue()));
1187
1188 // (X << C1) / C2 -> X / (C2 >> C1) if C2 is a multiple of 1 << C1.
1189 if (isMultiple(C1: *C2, C2: C1Shifted, Quotient, IsSigned)) {
1190 auto *BO = BinaryOperator::Create(Op: I.getOpcode(), S1: X,
1191 S2: ConstantInt::get(Ty, V: Quotient));
1192 BO->setIsExact(I.isExact());
1193 return BO;
1194 }
1195
1196 // (X << C1) / C2 -> X * ((1 << C1) / C2) if 1 << C1 is a multiple of C2.
1197 if (isMultiple(C1: C1Shifted, C2: *C2, Quotient, IsSigned)) {
1198 auto *Mul = BinaryOperator::Create(Op: Instruction::Mul, S1: X,
1199 S2: ConstantInt::get(Ty, V: Quotient));
1200 auto *OBO = cast<OverflowingBinaryOperator>(Val: Op0);
1201 Mul->setHasNoUnsignedWrap(!IsSigned && OBO->hasNoUnsignedWrap());
1202 Mul->setHasNoSignedWrap(OBO->hasNoSignedWrap());
1203 return Mul;
1204 }
1205 }
1206
1207 // Distribute div over add to eliminate a matching div/mul pair:
1208 // ((X * C2) + C1) / C2 --> X + C1/C2
1209 // We need a multiple of the divisor for a signed add constant, but
1210 // unsigned is fine with any constant pair.
1211 if (IsSigned &&
1212 match(V: Op0, P: m_NSWAddLike(L: m_NSWMul(L: m_Value(V&: X), R: m_SpecificInt(V: *C2)),
1213 R: m_APInt(Res&: C1))) &&
1214 isMultiple(C1: *C1, C2: *C2, Quotient, IsSigned)) {
1215 return BinaryOperator::CreateNSWAdd(V1: X, V2: ConstantInt::get(Ty, V: Quotient));
1216 }
1217 if (!IsSigned &&
1218 match(V: Op0, P: m_NUWAddLike(L: m_NUWMul(L: m_Value(V&: X), R: m_SpecificInt(V: *C2)),
1219 R: m_APInt(Res&: C1)))) {
1220 return BinaryOperator::CreateNUWAdd(V1: X,
1221 V2: ConstantInt::get(Ty, V: C1->udiv(RHS: *C2)));
1222 }
1223
1224 if (!C2->isZero()) // avoid X udiv 0
1225 if (Instruction *FoldedDiv = foldBinOpIntoSelectOrPhi(I))
1226 return FoldedDiv;
1227 }
1228
1229 if (match(V: Op0, P: m_One())) {
1230 assert(!Ty->isIntOrIntVectorTy(1) && "i1 divide not removed?");
1231 if (IsSigned) {
1232 // 1 / 0 --> undef ; 1 / 1 --> 1 ; 1 / -1 --> -1 ; 1 / anything else --> 0
1233 // (Op1 + 1) u< 3 ? Op1 : 0
1234 // Op1 must be frozen because we are increasing its number of uses.
1235 Value *F1 = Builder.CreateFreeze(V: Op1, Name: Op1->getName() + ".fr");
1236 Value *Inc = Builder.CreateAdd(LHS: F1, RHS: Op0);
1237 Value *Cmp = Builder.CreateICmpULT(LHS: Inc, RHS: ConstantInt::get(Ty, V: 3));
1238 return SelectInst::Create(C: Cmp, S1: F1, S2: ConstantInt::get(Ty, V: 0));
1239 } else {
1240 // If Op1 is 0 then it's undefined behaviour. If Op1 is 1 then the
1241 // result is one, otherwise it's zero.
1242 return new ZExtInst(Builder.CreateICmpEQ(LHS: Op1, RHS: Op0), Ty);
1243 }
1244 }
1245
1246 // See if we can fold away this div instruction.
1247 if (SimplifyDemandedInstructionBits(Inst&: I))
1248 return &I;
1249
1250 // (X - (X rem Y)) / Y -> X / Y; usually originates as ((X / Y) * Y) / Y
1251 Value *X, *Z;
1252 if (match(V: Op0, P: m_Sub(L: m_Value(V&: X), R: m_Value(V&: Z)))) // (X - Z) / Y; Y = Op1
1253 if ((IsSigned && match(V: Z, P: m_SRem(L: m_Specific(V: X), R: m_Specific(V: Op1)))) ||
1254 (!IsSigned && match(V: Z, P: m_URem(L: m_Specific(V: X), R: m_Specific(V: Op1)))))
1255 return BinaryOperator::Create(Op: I.getOpcode(), S1: X, S2: Op1);
1256
1257 // (X << Y) / X -> 1 << Y
1258 Value *Y;
1259 if (IsSigned && match(V: Op0, P: m_NSWShl(L: m_Specific(V: Op1), R: m_Value(V&: Y))))
1260 return BinaryOperator::CreateNSWShl(V1: ConstantInt::get(Ty, V: 1), V2: Y);
1261 if (!IsSigned && match(V: Op0, P: m_NUWShl(L: m_Specific(V: Op1), R: m_Value(V&: Y))))
1262 return BinaryOperator::CreateNUWShl(V1: ConstantInt::get(Ty, V: 1), V2: Y);
1263
1264 // X / (X * Y) -> 1 / Y if the multiplication does not overflow.
1265 if (match(V: Op1, P: m_c_Mul(L: m_Specific(V: Op0), R: m_Value(V&: Y)))) {
1266 bool HasNSW = cast<OverflowingBinaryOperator>(Val: Op1)->hasNoSignedWrap();
1267 bool HasNUW = cast<OverflowingBinaryOperator>(Val: Op1)->hasNoUnsignedWrap();
1268 if ((IsSigned && HasNSW) || (!IsSigned && HasNUW)) {
1269 replaceOperand(I, OpNum: 0, V: ConstantInt::get(Ty, V: 1));
1270 replaceOperand(I, OpNum: 1, V: Y);
1271 return &I;
1272 }
1273 }
1274
1275 // (X << Z) / (X * Y) -> (1 << Z) / Y
1276 // TODO: Handle sdiv.
1277 if (!IsSigned && Op1->hasOneUse() &&
1278 match(V: Op0, P: m_NUWShl(L: m_Value(V&: X), R: m_Value(V&: Z))) &&
1279 match(V: Op1, P: m_c_Mul(L: m_Specific(V: X), R: m_Value(V&: Y))))
1280 if (cast<OverflowingBinaryOperator>(Val: Op1)->hasNoUnsignedWrap()) {
1281 Instruction *NewDiv = BinaryOperator::CreateUDiv(
1282 V1: Builder.CreateShl(LHS: ConstantInt::get(Ty, V: 1), RHS: Z, Name: "", /*NUW*/ HasNUW: true), V2: Y);
1283 NewDiv->setIsExact(I.isExact());
1284 return NewDiv;
1285 }
1286
1287 if (Value *R = foldIDivShl(I, Builder))
1288 return replaceInstUsesWith(I, V: R);
1289
1290 // With the appropriate no-wrap constraint, remove a multiply by the divisor
1291 // after peeking through another divide:
1292 // ((Op1 * X) / Y) / Op1 --> X / Y
1293 if (match(V: Op0, P: m_BinOp(Opcode: I.getOpcode(), L: m_c_Mul(L: m_Specific(V: Op1), R: m_Value(V&: X)),
1294 R: m_Value(V&: Y)))) {
1295 auto *InnerDiv = cast<PossiblyExactOperator>(Val: Op0);
1296 auto *Mul = cast<OverflowingBinaryOperator>(Val: InnerDiv->getOperand(i_nocapture: 0));
1297 Instruction *NewDiv = nullptr;
1298 if (!IsSigned && Mul->hasNoUnsignedWrap())
1299 NewDiv = BinaryOperator::CreateUDiv(V1: X, V2: Y);
1300 else if (IsSigned && Mul->hasNoSignedWrap())
1301 NewDiv = BinaryOperator::CreateSDiv(V1: X, V2: Y);
1302
1303 // Exact propagates only if both of the original divides are exact.
1304 if (NewDiv) {
1305 NewDiv->setIsExact(I.isExact() && InnerDiv->isExact());
1306 return NewDiv;
1307 }
1308 }
1309
1310 // (X * Y) / (X * Z) --> Y / Z (and commuted variants)
1311 if (match(V: Op0, P: m_Mul(L: m_Value(V&: X), R: m_Value(V&: Y)))) {
1312 auto OB0HasNSW = cast<OverflowingBinaryOperator>(Val: Op0)->hasNoSignedWrap();
1313 auto OB0HasNUW = cast<OverflowingBinaryOperator>(Val: Op0)->hasNoUnsignedWrap();
1314
1315 auto CreateDivOrNull = [&](Value *A, Value *B) -> Instruction * {
1316 auto OB1HasNSW = cast<OverflowingBinaryOperator>(Val: Op1)->hasNoSignedWrap();
1317 auto OB1HasNUW =
1318 cast<OverflowingBinaryOperator>(Val: Op1)->hasNoUnsignedWrap();
1319 const APInt *C1, *C2;
1320 if (IsSigned && OB0HasNSW) {
1321 if (OB1HasNSW && match(V: B, P: m_APInt(Res&: C1)) && !C1->isAllOnes())
1322 return BinaryOperator::CreateSDiv(V1: A, V2: B);
1323 }
1324 if (!IsSigned && OB0HasNUW) {
1325 if (OB1HasNUW)
1326 return BinaryOperator::CreateUDiv(V1: A, V2: B);
1327 if (match(V: A, P: m_APInt(Res&: C1)) && match(V: B, P: m_APInt(Res&: C2)) && C2->ule(RHS: *C1))
1328 return BinaryOperator::CreateUDiv(V1: A, V2: B);
1329 }
1330 return nullptr;
1331 };
1332
1333 if (match(V: Op1, P: m_c_Mul(L: m_Specific(V: X), R: m_Value(V&: Z)))) {
1334 if (auto *Val = CreateDivOrNull(Y, Z))
1335 return Val;
1336 }
1337 if (match(V: Op1, P: m_c_Mul(L: m_Specific(V: Y), R: m_Value(V&: Z)))) {
1338 if (auto *Val = CreateDivOrNull(X, Z))
1339 return Val;
1340 }
1341 }
1342 return nullptr;
1343}
1344
1345static const unsigned MaxDepth = 6;
1346
1347// Take the exact integer log2 of the value. If DoFold is true, create the
1348// actual instructions, otherwise return a non-null dummy value. Return nullptr
1349// on failure.
1350static Value *takeLog2(IRBuilderBase &Builder, Value *Op, unsigned Depth,
1351 bool AssumeNonZero, bool DoFold) {
1352 auto IfFold = [DoFold](function_ref<Value *()> Fn) {
1353 if (!DoFold)
1354 return reinterpret_cast<Value *>(-1);
1355 return Fn();
1356 };
1357
1358 // FIXME: assert that Op1 isn't/doesn't contain undef.
1359
1360 // log2(2^C) -> C
1361 if (match(V: Op, P: m_Power2()))
1362 return IfFold([&]() {
1363 Constant *C = ConstantExpr::getExactLogBase2(C: cast<Constant>(Val: Op));
1364 if (!C)
1365 llvm_unreachable("Failed to constant fold udiv -> logbase2");
1366 return C;
1367 });
1368
1369 // The remaining tests are all recursive, so bail out if we hit the limit.
1370 if (Depth++ == MaxDepth)
1371 return nullptr;
1372
1373 // log2(zext X) -> zext log2(X)
1374 // FIXME: Require one use?
1375 Value *X, *Y;
1376 if (match(V: Op, P: m_ZExt(Op: m_Value(V&: X))))
1377 if (Value *LogX = takeLog2(Builder, Op: X, Depth, AssumeNonZero, DoFold))
1378 return IfFold([&]() { return Builder.CreateZExt(V: LogX, DestTy: Op->getType()); });
1379
1380 // log2(X << Y) -> log2(X) + Y
1381 // FIXME: Require one use unless X is 1?
1382 if (match(V: Op, P: m_Shl(L: m_Value(V&: X), R: m_Value(V&: Y)))) {
1383 auto *BO = cast<OverflowingBinaryOperator>(Val: Op);
1384 // nuw will be set if the `shl` is trivially non-zero.
1385 if (AssumeNonZero || BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap())
1386 if (Value *LogX = takeLog2(Builder, Op: X, Depth, AssumeNonZero, DoFold))
1387 return IfFold([&]() { return Builder.CreateAdd(LHS: LogX, RHS: Y); });
1388 }
1389
1390 // log2(Cond ? X : Y) -> Cond ? log2(X) : log2(Y)
1391 // FIXME: Require one use?
1392 if (SelectInst *SI = dyn_cast<SelectInst>(Val: Op))
1393 if (Value *LogX = takeLog2(Builder, Op: SI->getOperand(i_nocapture: 1), Depth,
1394 AssumeNonZero, DoFold))
1395 if (Value *LogY = takeLog2(Builder, Op: SI->getOperand(i_nocapture: 2), Depth,
1396 AssumeNonZero, DoFold))
1397 return IfFold([&]() {
1398 return Builder.CreateSelect(C: SI->getOperand(i_nocapture: 0), True: LogX, False: LogY);
1399 });
1400
1401 // log2(umin(X, Y)) -> umin(log2(X), log2(Y))
1402 // log2(umax(X, Y)) -> umax(log2(X), log2(Y))
1403 auto *MinMax = dyn_cast<MinMaxIntrinsic>(Val: Op);
1404 if (MinMax && MinMax->hasOneUse() && !MinMax->isSigned()) {
1405 // Use AssumeNonZero as false here. Otherwise we can hit case where
1406 // log2(umax(X, Y)) != umax(log2(X), log2(Y)) (because overflow).
1407 if (Value *LogX = takeLog2(Builder, Op: MinMax->getLHS(), Depth,
1408 /*AssumeNonZero*/ false, DoFold))
1409 if (Value *LogY = takeLog2(Builder, Op: MinMax->getRHS(), Depth,
1410 /*AssumeNonZero*/ false, DoFold))
1411 return IfFold([&]() {
1412 return Builder.CreateBinaryIntrinsic(ID: MinMax->getIntrinsicID(), LHS: LogX,
1413 RHS: LogY);
1414 });
1415 }
1416
1417 return nullptr;
1418}
1419
1420/// If we have zero-extended operands of an unsigned div or rem, we may be able
1421/// to narrow the operation (sink the zext below the math).
1422static Instruction *narrowUDivURem(BinaryOperator &I,
1423 InstCombinerImpl &IC) {
1424 Instruction::BinaryOps Opcode = I.getOpcode();
1425 Value *N = I.getOperand(i_nocapture: 0);
1426 Value *D = I.getOperand(i_nocapture: 1);
1427 Type *Ty = I.getType();
1428 Value *X, *Y;
1429 if (match(V: N, P: m_ZExt(Op: m_Value(V&: X))) && match(V: D, P: m_ZExt(Op: m_Value(V&: Y))) &&
1430 X->getType() == Y->getType() && (N->hasOneUse() || D->hasOneUse())) {
1431 // udiv (zext X), (zext Y) --> zext (udiv X, Y)
1432 // urem (zext X), (zext Y) --> zext (urem X, Y)
1433 Value *NarrowOp = IC.Builder.CreateBinOp(Opc: Opcode, LHS: X, RHS: Y);
1434 return new ZExtInst(NarrowOp, Ty);
1435 }
1436
1437 Constant *C;
1438 if (isa<Instruction>(Val: N) && match(V: N, P: m_OneUse(SubPattern: m_ZExt(Op: m_Value(V&: X)))) &&
1439 match(V: D, P: m_Constant(C))) {
1440 // If the constant is the same in the smaller type, use the narrow version.
1441 Constant *TruncC = IC.getLosslessUnsignedTrunc(C, TruncTy: X->getType());
1442 if (!TruncC)
1443 return nullptr;
1444
1445 // udiv (zext X), C --> zext (udiv X, C')
1446 // urem (zext X), C --> zext (urem X, C')
1447 return new ZExtInst(IC.Builder.CreateBinOp(Opc: Opcode, LHS: X, RHS: TruncC), Ty);
1448 }
1449 if (isa<Instruction>(Val: D) && match(V: D, P: m_OneUse(SubPattern: m_ZExt(Op: m_Value(V&: X)))) &&
1450 match(V: N, P: m_Constant(C))) {
1451 // If the constant is the same in the smaller type, use the narrow version.
1452 Constant *TruncC = IC.getLosslessUnsignedTrunc(C, TruncTy: X->getType());
1453 if (!TruncC)
1454 return nullptr;
1455
1456 // udiv C, (zext X) --> zext (udiv C', X)
1457 // urem C, (zext X) --> zext (urem C', X)
1458 return new ZExtInst(IC.Builder.CreateBinOp(Opc: Opcode, LHS: TruncC, RHS: X), Ty);
1459 }
1460
1461 return nullptr;
1462}
1463
1464Instruction *InstCombinerImpl::visitUDiv(BinaryOperator &I) {
1465 if (Value *V = simplifyUDivInst(LHS: I.getOperand(i_nocapture: 0), RHS: I.getOperand(i_nocapture: 1), IsExact: I.isExact(),
1466 Q: SQ.getWithInstruction(I: &I)))
1467 return replaceInstUsesWith(I, V);
1468
1469 if (Instruction *X = foldVectorBinop(Inst&: I))
1470 return X;
1471
1472 // Handle the integer div common cases
1473 if (Instruction *Common = commonIDivTransforms(I))
1474 return Common;
1475
1476 Value *Op0 = I.getOperand(i_nocapture: 0), *Op1 = I.getOperand(i_nocapture: 1);
1477 Value *X;
1478 const APInt *C1, *C2;
1479 if (match(V: Op0, P: m_LShr(L: m_Value(V&: X), R: m_APInt(Res&: C1))) && match(V: Op1, P: m_APInt(Res&: C2))) {
1480 // (X lshr C1) udiv C2 --> X udiv (C2 << C1)
1481 bool Overflow;
1482 APInt C2ShlC1 = C2->ushl_ov(Amt: *C1, Overflow);
1483 if (!Overflow) {
1484 bool IsExact = I.isExact() && match(V: Op0, P: m_Exact(SubPattern: m_Value()));
1485 BinaryOperator *BO = BinaryOperator::CreateUDiv(
1486 V1: X, V2: ConstantInt::get(Ty: X->getType(), V: C2ShlC1));
1487 if (IsExact)
1488 BO->setIsExact();
1489 return BO;
1490 }
1491 }
1492
1493 // Op0 / C where C is large (negative) --> zext (Op0 >= C)
1494 // TODO: Could use isKnownNegative() to handle non-constant values.
1495 Type *Ty = I.getType();
1496 if (match(V: Op1, P: m_Negative())) {
1497 Value *Cmp = Builder.CreateICmpUGE(LHS: Op0, RHS: Op1);
1498 return CastInst::CreateZExtOrBitCast(S: Cmp, Ty);
1499 }
1500 // Op0 / (sext i1 X) --> zext (Op0 == -1) (if X is 0, the div is undefined)
1501 if (match(V: Op1, P: m_SExt(Op: m_Value(V&: X))) && X->getType()->isIntOrIntVectorTy(BitWidth: 1)) {
1502 Value *Cmp = Builder.CreateICmpEQ(LHS: Op0, RHS: ConstantInt::getAllOnesValue(Ty));
1503 return CastInst::CreateZExtOrBitCast(S: Cmp, Ty);
1504 }
1505
1506 if (Instruction *NarrowDiv = narrowUDivURem(I, IC&: *this))
1507 return NarrowDiv;
1508
1509 Value *A, *B;
1510
1511 // Look through a right-shift to find the common factor:
1512 // ((Op1 *nuw A) >> B) / Op1 --> A >> B
1513 if (match(V: Op0, P: m_LShr(L: m_NUWMul(L: m_Specific(V: Op1), R: m_Value(V&: A)), R: m_Value(V&: B))) ||
1514 match(V: Op0, P: m_LShr(L: m_NUWMul(L: m_Value(V&: A), R: m_Specific(V: Op1)), R: m_Value(V&: B)))) {
1515 Instruction *Lshr = BinaryOperator::CreateLShr(V1: A, V2: B);
1516 if (I.isExact() && cast<PossiblyExactOperator>(Val: Op0)->isExact())
1517 Lshr->setIsExact();
1518 return Lshr;
1519 }
1520
1521 // Op1 udiv Op2 -> Op1 lshr log2(Op2), if log2() folds away.
1522 if (takeLog2(Builder, Op: Op1, /*Depth*/ 0, /*AssumeNonZero*/ true,
1523 /*DoFold*/ false)) {
1524 Value *Res = takeLog2(Builder, Op: Op1, /*Depth*/ 0,
1525 /*AssumeNonZero*/ true, /*DoFold*/ true);
1526 return replaceInstUsesWith(
1527 I, V: Builder.CreateLShr(LHS: Op0, RHS: Res, Name: I.getName(), isExact: I.isExact()));
1528 }
1529
1530 return nullptr;
1531}
1532
1533Instruction *InstCombinerImpl::visitSDiv(BinaryOperator &I) {
1534 if (Value *V = simplifySDivInst(LHS: I.getOperand(i_nocapture: 0), RHS: I.getOperand(i_nocapture: 1), IsExact: I.isExact(),
1535 Q: SQ.getWithInstruction(I: &I)))
1536 return replaceInstUsesWith(I, V);
1537
1538 if (Instruction *X = foldVectorBinop(Inst&: I))
1539 return X;
1540
1541 // Handle the integer div common cases
1542 if (Instruction *Common = commonIDivTransforms(I))
1543 return Common;
1544
1545 Value *Op0 = I.getOperand(i_nocapture: 0), *Op1 = I.getOperand(i_nocapture: 1);
1546 Type *Ty = I.getType();
1547 Value *X;
1548 // sdiv Op0, -1 --> -Op0
1549 // sdiv Op0, (sext i1 X) --> -Op0 (because if X is 0, the op is undefined)
1550 if (match(V: Op1, P: m_AllOnes()) ||
1551 (match(V: Op1, P: m_SExt(Op: m_Value(V&: X))) && X->getType()->isIntOrIntVectorTy(BitWidth: 1)))
1552 return BinaryOperator::CreateNSWNeg(Op: Op0);
1553
1554 // X / INT_MIN --> X == INT_MIN
1555 if (match(V: Op1, P: m_SignMask()))
1556 return new ZExtInst(Builder.CreateICmpEQ(LHS: Op0, RHS: Op1), Ty);
1557
1558 if (I.isExact()) {
1559 // sdiv exact X, 1<<C --> ashr exact X, C iff 1<<C is non-negative
1560 if (match(V: Op1, P: m_Power2()) && match(V: Op1, P: m_NonNegative())) {
1561 Constant *C = ConstantExpr::getExactLogBase2(C: cast<Constant>(Val: Op1));
1562 return BinaryOperator::CreateExactAShr(V1: Op0, V2: C);
1563 }
1564
1565 // sdiv exact X, (1<<ShAmt) --> ashr exact X, ShAmt (if shl is non-negative)
1566 Value *ShAmt;
1567 if (match(V: Op1, P: m_NSWShl(L: m_One(), R: m_Value(V&: ShAmt))))
1568 return BinaryOperator::CreateExactAShr(V1: Op0, V2: ShAmt);
1569
1570 // sdiv exact X, -1<<C --> -(ashr exact X, C)
1571 if (match(V: Op1, P: m_NegatedPower2())) {
1572 Constant *NegPow2C = ConstantExpr::getNeg(C: cast<Constant>(Val: Op1));
1573 Constant *C = ConstantExpr::getExactLogBase2(C: NegPow2C);
1574 Value *Ashr = Builder.CreateAShr(LHS: Op0, RHS: C, Name: I.getName() + ".neg", isExact: true);
1575 return BinaryOperator::CreateNSWNeg(Op: Ashr);
1576 }
1577 }
1578
1579 const APInt *Op1C;
1580 if (match(V: Op1, P: m_APInt(Res&: Op1C))) {
1581 // If the dividend is sign-extended and the constant divisor is small enough
1582 // to fit in the source type, shrink the division to the narrower type:
1583 // (sext X) sdiv C --> sext (X sdiv C)
1584 Value *Op0Src;
1585 if (match(V: Op0, P: m_OneUse(SubPattern: m_SExt(Op: m_Value(V&: Op0Src)))) &&
1586 Op0Src->getType()->getScalarSizeInBits() >=
1587 Op1C->getSignificantBits()) {
1588
1589 // In the general case, we need to make sure that the dividend is not the
1590 // minimum signed value because dividing that by -1 is UB. But here, we
1591 // know that the -1 divisor case is already handled above.
1592
1593 Constant *NarrowDivisor =
1594 ConstantExpr::getTrunc(C: cast<Constant>(Val: Op1), Ty: Op0Src->getType());
1595 Value *NarrowOp = Builder.CreateSDiv(LHS: Op0Src, RHS: NarrowDivisor);
1596 return new SExtInst(NarrowOp, Ty);
1597 }
1598
1599 // -X / C --> X / -C (if the negation doesn't overflow).
1600 // TODO: This could be enhanced to handle arbitrary vector constants by
1601 // checking if all elements are not the min-signed-val.
1602 if (!Op1C->isMinSignedValue() && match(V: Op0, P: m_NSWNeg(V: m_Value(V&: X)))) {
1603 Constant *NegC = ConstantInt::get(Ty, V: -(*Op1C));
1604 Instruction *BO = BinaryOperator::CreateSDiv(V1: X, V2: NegC);
1605 BO->setIsExact(I.isExact());
1606 return BO;
1607 }
1608 }
1609
1610 // -X / Y --> -(X / Y)
1611 Value *Y;
1612 if (match(V: &I, P: m_SDiv(L: m_OneUse(SubPattern: m_NSWNeg(V: m_Value(V&: X))), R: m_Value(V&: Y))))
1613 return BinaryOperator::CreateNSWNeg(
1614 Op: Builder.CreateSDiv(LHS: X, RHS: Y, Name: I.getName(), isExact: I.isExact()));
1615
1616 // abs(X) / X --> X > -1 ? 1 : -1
1617 // X / abs(X) --> X > -1 ? 1 : -1
1618 if (match(&I, m_c_BinOp(
1619 m_OneUse(m_Intrinsic<Intrinsic::abs>(m_Value(V&: X), m_One())),
1620 m_Deferred(V: X)))) {
1621 Value *Cond = Builder.CreateIsNotNeg(Arg: X);
1622 return SelectInst::Create(C: Cond, S1: ConstantInt::get(Ty, V: 1),
1623 S2: ConstantInt::getAllOnesValue(Ty));
1624 }
1625
1626 KnownBits KnownDividend = computeKnownBits(V: Op0, Depth: 0, CxtI: &I);
1627 if (!I.isExact() &&
1628 (match(V: Op1, P: m_Power2(V&: Op1C)) || match(V: Op1, P: m_NegatedPower2(V&: Op1C))) &&
1629 KnownDividend.countMinTrailingZeros() >= Op1C->countr_zero()) {
1630 I.setIsExact();
1631 return &I;
1632 }
1633
1634 if (KnownDividend.isNonNegative()) {
1635 // If both operands are unsigned, turn this into a udiv.
1636 if (isKnownNonNegative(V: Op1, SQ: SQ.getWithInstruction(I: &I))) {
1637 auto *BO = BinaryOperator::CreateUDiv(V1: Op0, V2: Op1, Name: I.getName());
1638 BO->setIsExact(I.isExact());
1639 return BO;
1640 }
1641
1642 if (match(V: Op1, P: m_NegatedPower2())) {
1643 // X sdiv (-(1 << C)) -> -(X sdiv (1 << C)) ->
1644 // -> -(X udiv (1 << C)) -> -(X u>> C)
1645 Constant *CNegLog2 = ConstantExpr::getExactLogBase2(
1646 C: ConstantExpr::getNeg(C: cast<Constant>(Val: Op1)));
1647 Value *Shr = Builder.CreateLShr(LHS: Op0, RHS: CNegLog2, Name: I.getName(), isExact: I.isExact());
1648 return BinaryOperator::CreateNeg(Op: Shr);
1649 }
1650
1651 if (isKnownToBeAPowerOfTwo(V: Op1, /*OrZero*/ true, Depth: 0, CxtI: &I)) {
1652 // X sdiv (1 << Y) -> X udiv (1 << Y) ( -> X u>> Y)
1653 // Safe because the only negative value (1 << Y) can take on is
1654 // INT_MIN, and X sdiv INT_MIN == X udiv INT_MIN == 0 if X doesn't have
1655 // the sign bit set.
1656 auto *BO = BinaryOperator::CreateUDiv(V1: Op0, V2: Op1, Name: I.getName());
1657 BO->setIsExact(I.isExact());
1658 return BO;
1659 }
1660 }
1661
1662 // -X / X --> X == INT_MIN ? 1 : -1
1663 if (isKnownNegation(X: Op0, Y: Op1)) {
1664 APInt MinVal = APInt::getSignedMinValue(numBits: Ty->getScalarSizeInBits());
1665 Value *Cond = Builder.CreateICmpEQ(LHS: Op0, RHS: ConstantInt::get(Ty, V: MinVal));
1666 return SelectInst::Create(C: Cond, S1: ConstantInt::get(Ty, V: 1),
1667 S2: ConstantInt::getAllOnesValue(Ty));
1668 }
1669 return nullptr;
1670}
1671
1672/// Remove negation and try to convert division into multiplication.
1673Instruction *InstCombinerImpl::foldFDivConstantDivisor(BinaryOperator &I) {
1674 Constant *C;
1675 if (!match(V: I.getOperand(i_nocapture: 1), P: m_Constant(C)))
1676 return nullptr;
1677
1678 // -X / C --> X / -C
1679 Value *X;
1680 const DataLayout &DL = I.getModule()->getDataLayout();
1681 if (match(V: I.getOperand(i_nocapture: 0), P: m_FNeg(X: m_Value(V&: X))))
1682 if (Constant *NegC = ConstantFoldUnaryOpOperand(Opcode: Instruction::FNeg, Op: C, DL))
1683 return BinaryOperator::CreateFDivFMF(V1: X, V2: NegC, FMFSource: &I);
1684
1685 // nnan X / +0.0 -> copysign(inf, X)
1686 // nnan nsz X / -0.0 -> copysign(inf, X)
1687 if (I.hasNoNaNs() &&
1688 (match(V: I.getOperand(i_nocapture: 1), P: m_PosZeroFP()) ||
1689 (I.hasNoSignedZeros() && match(V: I.getOperand(i_nocapture: 1), P: m_AnyZeroFP())))) {
1690 IRBuilder<> B(&I);
1691 CallInst *CopySign = B.CreateIntrinsic(
1692 Intrinsic::copysign, {C->getType()},
1693 {ConstantFP::getInfinity(Ty: I.getType()), I.getOperand(i_nocapture: 0)}, &I);
1694 CopySign->takeName(V: &I);
1695 return replaceInstUsesWith(I, V: CopySign);
1696 }
1697
1698 // If the constant divisor has an exact inverse, this is always safe. If not,
1699 // then we can still create a reciprocal if fast-math-flags allow it and the
1700 // constant is a regular number (not zero, infinite, or denormal).
1701 if (!(C->hasExactInverseFP() || (I.hasAllowReciprocal() && C->isNormalFP())))
1702 return nullptr;
1703
1704 // Disallow denormal constants because we don't know what would happen
1705 // on all targets.
1706 // TODO: Use Intrinsic::canonicalize or let function attributes tell us that
1707 // denorms are flushed?
1708 auto *RecipC = ConstantFoldBinaryOpOperands(
1709 Opcode: Instruction::FDiv, LHS: ConstantFP::get(Ty: I.getType(), V: 1.0), RHS: C, DL);
1710 if (!RecipC || !RecipC->isNormalFP())
1711 return nullptr;
1712
1713 // X / C --> X * (1 / C)
1714 return BinaryOperator::CreateFMulFMF(V1: I.getOperand(i_nocapture: 0), V2: RecipC, FMFSource: &I);
1715}
1716
1717/// Remove negation and try to reassociate constant math.
1718static Instruction *foldFDivConstantDividend(BinaryOperator &I) {
1719 Constant *C;
1720 if (!match(V: I.getOperand(i_nocapture: 0), P: m_Constant(C)))
1721 return nullptr;
1722
1723 // C / -X --> -C / X
1724 Value *X;
1725 const DataLayout &DL = I.getModule()->getDataLayout();
1726 if (match(V: I.getOperand(i_nocapture: 1), P: m_FNeg(X: m_Value(V&: X))))
1727 if (Constant *NegC = ConstantFoldUnaryOpOperand(Opcode: Instruction::FNeg, Op: C, DL))
1728 return BinaryOperator::CreateFDivFMF(V1: NegC, V2: X, FMFSource: &I);
1729
1730 if (!I.hasAllowReassoc() || !I.hasAllowReciprocal())
1731 return nullptr;
1732
1733 // Try to reassociate C / X expressions where X includes another constant.
1734 Constant *C2, *NewC = nullptr;
1735 if (match(V: I.getOperand(i_nocapture: 1), P: m_FMul(L: m_Value(V&: X), R: m_Constant(C&: C2)))) {
1736 // C / (X * C2) --> (C / C2) / X
1737 NewC = ConstantFoldBinaryOpOperands(Opcode: Instruction::FDiv, LHS: C, RHS: C2, DL);
1738 } else if (match(V: I.getOperand(i_nocapture: 1), P: m_FDiv(L: m_Value(V&: X), R: m_Constant(C&: C2)))) {
1739 // C / (X / C2) --> (C * C2) / X
1740 NewC = ConstantFoldBinaryOpOperands(Opcode: Instruction::FMul, LHS: C, RHS: C2, DL);
1741 }
1742 // Disallow denormal constants because we don't know what would happen
1743 // on all targets.
1744 // TODO: Use Intrinsic::canonicalize or let function attributes tell us that
1745 // denorms are flushed?
1746 if (!NewC || !NewC->isNormalFP())
1747 return nullptr;
1748
1749 return BinaryOperator::CreateFDivFMF(V1: NewC, V2: X, FMFSource: &I);
1750}
1751
1752/// Negate the exponent of pow/exp to fold division-by-pow() into multiply.
1753static Instruction *foldFDivPowDivisor(BinaryOperator &I,
1754 InstCombiner::BuilderTy &Builder) {
1755 Value *Op0 = I.getOperand(i_nocapture: 0), *Op1 = I.getOperand(i_nocapture: 1);
1756 auto *II = dyn_cast<IntrinsicInst>(Val: Op1);
1757 if (!II || !II->hasOneUse() || !I.hasAllowReassoc() ||
1758 !I.hasAllowReciprocal())
1759 return nullptr;
1760
1761 // Z / pow(X, Y) --> Z * pow(X, -Y)
1762 // Z / exp{2}(Y) --> Z * exp{2}(-Y)
1763 // In the general case, this creates an extra instruction, but fmul allows
1764 // for better canonicalization and optimization than fdiv.
1765 Intrinsic::ID IID = II->getIntrinsicID();
1766 SmallVector<Value *> Args;
1767 switch (IID) {
1768 case Intrinsic::pow:
1769 Args.push_back(Elt: II->getArgOperand(i: 0));
1770 Args.push_back(Elt: Builder.CreateFNegFMF(V: II->getArgOperand(i: 1), FMFSource: &I));
1771 break;
1772 case Intrinsic::powi: {
1773 // Require 'ninf' assuming that makes powi(X, -INT_MIN) acceptable.
1774 // That is, X ** (huge negative number) is 0.0, ~1.0, or INF and so
1775 // dividing by that is INF, ~1.0, or 0.0. Code that uses powi allows
1776 // non-standard results, so this corner case should be acceptable if the
1777 // code rules out INF values.
1778 if (!I.hasNoInfs())
1779 return nullptr;
1780 Args.push_back(Elt: II->getArgOperand(i: 0));
1781 Args.push_back(Elt: Builder.CreateNeg(V: II->getArgOperand(i: 1)));
1782 Type *Tys[] = {I.getType(), II->getArgOperand(i: 1)->getType()};
1783 Value *Pow = Builder.CreateIntrinsic(ID: IID, Types: Tys, Args, FMFSource: &I);
1784 return BinaryOperator::CreateFMulFMF(V1: Op0, V2: Pow, FMFSource: &I);
1785 }
1786 case Intrinsic::exp:
1787 case Intrinsic::exp2:
1788 Args.push_back(Elt: Builder.CreateFNegFMF(V: II->getArgOperand(i: 0), FMFSource: &I));
1789 break;
1790 default:
1791 return nullptr;
1792 }
1793 Value *Pow = Builder.CreateIntrinsic(ID: IID, Types: I.getType(), Args, FMFSource: &I);
1794 return BinaryOperator::CreateFMulFMF(V1: Op0, V2: Pow, FMFSource: &I);
1795}
1796
1797/// Convert div to mul if we have an sqrt divisor iff sqrt's operand is a fdiv
1798/// instruction.
1799static Instruction *foldFDivSqrtDivisor(BinaryOperator &I,
1800 InstCombiner::BuilderTy &Builder) {
1801 // X / sqrt(Y / Z) --> X * sqrt(Z / Y)
1802 if (!I.hasAllowReassoc() || !I.hasAllowReciprocal())
1803 return nullptr;
1804 Value *Op0 = I.getOperand(i_nocapture: 0), *Op1 = I.getOperand(i_nocapture: 1);
1805 auto *II = dyn_cast<IntrinsicInst>(Val: Op1);
1806 if (!II || II->getIntrinsicID() != Intrinsic::sqrt || !II->hasOneUse() ||
1807 !II->hasAllowReassoc() || !II->hasAllowReciprocal())
1808 return nullptr;
1809
1810 Value *Y, *Z;
1811 auto *DivOp = dyn_cast<Instruction>(Val: II->getOperand(i_nocapture: 0));
1812 if (!DivOp)
1813 return nullptr;
1814 if (!match(V: DivOp, P: m_FDiv(L: m_Value(V&: Y), R: m_Value(V&: Z))))
1815 return nullptr;
1816 if (!DivOp->hasAllowReassoc() || !I.hasAllowReciprocal() ||
1817 !DivOp->hasOneUse())
1818 return nullptr;
1819 Value *SwapDiv = Builder.CreateFDivFMF(L: Z, R: Y, FMFSource: DivOp);
1820 Value *NewSqrt =
1821 Builder.CreateUnaryIntrinsic(ID: II->getIntrinsicID(), V: SwapDiv, FMFSource: II);
1822 return BinaryOperator::CreateFMulFMF(V1: Op0, V2: NewSqrt, FMFSource: &I);
1823}
1824
1825Instruction *InstCombinerImpl::visitFDiv(BinaryOperator &I) {
1826 Module *M = I.getModule();
1827
1828 if (Value *V = simplifyFDivInst(LHS: I.getOperand(i_nocapture: 0), RHS: I.getOperand(i_nocapture: 1),
1829 FMF: I.getFastMathFlags(),
1830 Q: SQ.getWithInstruction(I: &I)))
1831 return replaceInstUsesWith(I, V);
1832
1833 if (Instruction *X = foldVectorBinop(Inst&: I))
1834 return X;
1835
1836 if (Instruction *Phi = foldBinopWithPhiOperands(BO&: I))
1837 return Phi;
1838
1839 if (Instruction *R = foldFDivConstantDivisor(I))
1840 return R;
1841
1842 if (Instruction *R = foldFDivConstantDividend(I))
1843 return R;
1844
1845 if (Instruction *R = foldFPSignBitOps(I))
1846 return R;
1847
1848 Value *Op0 = I.getOperand(i_nocapture: 0), *Op1 = I.getOperand(i_nocapture: 1);
1849 if (isa<Constant>(Val: Op0))
1850 if (SelectInst *SI = dyn_cast<SelectInst>(Val: Op1))
1851 if (Instruction *R = FoldOpIntoSelect(Op&: I, SI))
1852 return R;
1853
1854 if (isa<Constant>(Val: Op1))
1855 if (SelectInst *SI = dyn_cast<SelectInst>(Val: Op0))
1856 if (Instruction *R = FoldOpIntoSelect(Op&: I, SI))
1857 return R;
1858
1859 if (I.hasAllowReassoc() && I.hasAllowReciprocal()) {
1860 Value *X, *Y;
1861 if (match(V: Op0, P: m_OneUse(SubPattern: m_FDiv(L: m_Value(V&: X), R: m_Value(V&: Y)))) &&
1862 (!isa<Constant>(Val: Y) || !isa<Constant>(Val: Op1))) {
1863 // (X / Y) / Z => X / (Y * Z)
1864 Value *YZ = Builder.CreateFMulFMF(L: Y, R: Op1, FMFSource: &I);
1865 return BinaryOperator::CreateFDivFMF(V1: X, V2: YZ, FMFSource: &I);
1866 }
1867 if (match(V: Op1, P: m_OneUse(SubPattern: m_FDiv(L: m_Value(V&: X), R: m_Value(V&: Y)))) &&
1868 (!isa<Constant>(Val: Y) || !isa<Constant>(Val: Op0))) {
1869 // Z / (X / Y) => (Y * Z) / X
1870 Value *YZ = Builder.CreateFMulFMF(L: Y, R: Op0, FMFSource: &I);
1871 return BinaryOperator::CreateFDivFMF(V1: YZ, V2: X, FMFSource: &I);
1872 }
1873 // Z / (1.0 / Y) => (Y * Z)
1874 //
1875 // This is a special case of Z / (X / Y) => (Y * Z) / X, with X = 1.0. The
1876 // m_OneUse check is avoided because even in the case of the multiple uses
1877 // for 1.0/Y, the number of instructions remain the same and a division is
1878 // replaced by a multiplication.
1879 if (match(V: Op1, P: m_FDiv(L: m_SpecificFP(V: 1.0), R: m_Value(V&: Y))))
1880 return BinaryOperator::CreateFMulFMF(V1: Y, V2: Op0, FMFSource: &I);
1881 }
1882
1883 if (I.hasAllowReassoc() && Op0->hasOneUse() && Op1->hasOneUse()) {
1884 // sin(X) / cos(X) -> tan(X)
1885 // cos(X) / sin(X) -> 1/tan(X) (cotangent)
1886 Value *X;
1887 bool IsTan = match(Op0, m_Intrinsic<Intrinsic::sin>(m_Value(X))) &&
1888 match(Op1, m_Intrinsic<Intrinsic::cos>(m_Specific(X)));
1889 bool IsCot =
1890 !IsTan && match(Op0, m_Intrinsic<Intrinsic::cos>(m_Value(X))) &&
1891 match(Op1, m_Intrinsic<Intrinsic::sin>(m_Specific(X)));
1892
1893 if ((IsTan || IsCot) && hasFloatFn(M, TLI: &TLI, Ty: I.getType(), DoubleFn: LibFunc_tan,
1894 FloatFn: LibFunc_tanf, LongDoubleFn: LibFunc_tanl)) {
1895 IRBuilder<> B(&I);
1896 IRBuilder<>::FastMathFlagGuard FMFGuard(B);
1897 B.setFastMathFlags(I.getFastMathFlags());
1898 AttributeList Attrs =
1899 cast<CallBase>(Val: Op0)->getCalledFunction()->getAttributes();
1900 Value *Res = emitUnaryFloatFnCall(Op: X, TLI: &TLI, DoubleFn: LibFunc_tan, FloatFn: LibFunc_tanf,
1901 LongDoubleFn: LibFunc_tanl, B, Attrs);
1902 if (IsCot)
1903 Res = B.CreateFDiv(L: ConstantFP::get(Ty: I.getType(), V: 1.0), R: Res);
1904 return replaceInstUsesWith(I, V: Res);
1905 }
1906 }
1907
1908 // X / (X * Y) --> 1.0 / Y
1909 // Reassociate to (X / X -> 1.0) is legal when NaNs are not allowed.
1910 // We can ignore the possibility that X is infinity because INF/INF is NaN.
1911 Value *X, *Y;
1912 if (I.hasNoNaNs() && I.hasAllowReassoc() &&
1913 match(V: Op1, P: m_c_FMul(L: m_Specific(V: Op0), R: m_Value(V&: Y)))) {
1914 replaceOperand(I, OpNum: 0, V: ConstantFP::get(Ty: I.getType(), V: 1.0));
1915 replaceOperand(I, OpNum: 1, V: Y);
1916 return &I;
1917 }
1918
1919 // X / fabs(X) -> copysign(1.0, X)
1920 // fabs(X) / X -> copysign(1.0, X)
1921 if (I.hasNoNaNs() && I.hasNoInfs() &&
1922 (match(V: &I, P: m_FDiv(L: m_Value(V&: X), R: m_FAbs(Op0: m_Deferred(V: X)))) ||
1923 match(V: &I, P: m_FDiv(L: m_FAbs(Op0: m_Value(V&: X)), R: m_Deferred(V: X))))) {
1924 Value *V = Builder.CreateBinaryIntrinsic(
1925 Intrinsic::copysign, ConstantFP::get(I.getType(), 1.0), X, &I);
1926 return replaceInstUsesWith(I, V);
1927 }
1928
1929 if (Instruction *Mul = foldFDivPowDivisor(I, Builder))
1930 return Mul;
1931
1932 if (Instruction *Mul = foldFDivSqrtDivisor(I, Builder))
1933 return Mul;
1934
1935 // pow(X, Y) / X --> pow(X, Y-1)
1936 if (I.hasAllowReassoc() &&
1937 match(Op0, m_OneUse(m_Intrinsic<Intrinsic::pow>(m_Specific(Op1),
1938 m_Value(Y))))) {
1939 Value *Y1 =
1940 Builder.CreateFAddFMF(L: Y, R: ConstantFP::get(Ty: I.getType(), V: -1.0), FMFSource: &I);
1941 Value *Pow = Builder.CreateBinaryIntrinsic(Intrinsic::pow, Op1, Y1, &I);
1942 return replaceInstUsesWith(I, V: Pow);
1943 }
1944
1945 if (Instruction *FoldedPowi = foldPowiReassoc(I))
1946 return FoldedPowi;
1947
1948 return nullptr;
1949}
1950
1951// Variety of transform for:
1952// (urem/srem (mul X, Y), (mul X, Z))
1953// (urem/srem (shl X, Y), (shl X, Z))
1954// (urem/srem (shl Y, X), (shl Z, X))
1955// NB: The shift cases are really just extensions of the mul case. We treat
1956// shift as Val * (1 << Amt).
1957static Instruction *simplifyIRemMulShl(BinaryOperator &I,
1958 InstCombinerImpl &IC) {
1959 Value *Op0 = I.getOperand(i_nocapture: 0), *Op1 = I.getOperand(i_nocapture: 1), *X = nullptr;
1960 APInt Y, Z;
1961 bool ShiftByX = false;
1962
1963 // If V is not nullptr, it will be matched using m_Specific.
1964 auto MatchShiftOrMulXC = [](Value *Op, Value *&V, APInt &C) -> bool {
1965 const APInt *Tmp = nullptr;
1966 if ((!V && match(V: Op, P: m_Mul(L: m_Value(V), R: m_APInt(Res&: Tmp)))) ||
1967 (V && match(V: Op, P: m_Mul(L: m_Specific(V), R: m_APInt(Res&: Tmp)))))
1968 C = *Tmp;
1969 else if ((!V && match(V: Op, P: m_Shl(L: m_Value(V), R: m_APInt(Res&: Tmp)))) ||
1970 (V && match(V: Op, P: m_Shl(L: m_Specific(V), R: m_APInt(Res&: Tmp)))))
1971 C = APInt(Tmp->getBitWidth(), 1) << *Tmp;
1972 if (Tmp != nullptr)
1973 return true;
1974
1975 // Reset `V` so we don't start with specific value on next match attempt.
1976 V = nullptr;
1977 return false;
1978 };
1979
1980 auto MatchShiftCX = [](Value *Op, APInt &C, Value *&V) -> bool {
1981 const APInt *Tmp = nullptr;
1982 if ((!V && match(V: Op, P: m_Shl(L: m_APInt(Res&: Tmp), R: m_Value(V)))) ||
1983 (V && match(V: Op, P: m_Shl(L: m_APInt(Res&: Tmp), R: m_Specific(V))))) {
1984 C = *Tmp;
1985 return true;
1986 }
1987
1988 // Reset `V` so we don't start with specific value on next match attempt.
1989 V = nullptr;
1990 return false;
1991 };
1992
1993 if (MatchShiftOrMulXC(Op0, X, Y) && MatchShiftOrMulXC(Op1, X, Z)) {
1994 // pass
1995 } else if (MatchShiftCX(Op0, Y, X) && MatchShiftCX(Op1, Z, X)) {
1996 ShiftByX = true;
1997 } else {
1998 return nullptr;
1999 }
2000
2001 bool IsSRem = I.getOpcode() == Instruction::SRem;
2002
2003 OverflowingBinaryOperator *BO0 = cast<OverflowingBinaryOperator>(Val: Op0);
2004 // TODO: We may be able to deduce more about nsw/nuw of BO0/BO1 based on Y >=
2005 // Z or Z >= Y.
2006 bool BO0HasNSW = BO0->hasNoSignedWrap();
2007 bool BO0HasNUW = BO0->hasNoUnsignedWrap();
2008 bool BO0NoWrap = IsSRem ? BO0HasNSW : BO0HasNUW;
2009
2010 APInt RemYZ = IsSRem ? Y.srem(RHS: Z) : Y.urem(RHS: Z);
2011 // (rem (mul nuw/nsw X, Y), (mul X, Z))
2012 // if (rem Y, Z) == 0
2013 // -> 0
2014 if (RemYZ.isZero() && BO0NoWrap)
2015 return IC.replaceInstUsesWith(I, V: ConstantInt::getNullValue(Ty: I.getType()));
2016
2017 // Helper function to emit either (RemSimplificationC << X) or
2018 // (RemSimplificationC * X) depending on whether we matched Op0/Op1 as
2019 // (shl V, X) or (mul V, X) respectively.
2020 auto CreateMulOrShift =
2021 [&](const APInt &RemSimplificationC) -> BinaryOperator * {
2022 Value *RemSimplification =
2023 ConstantInt::get(Ty: I.getType(), V: RemSimplificationC);
2024 return ShiftByX ? BinaryOperator::CreateShl(V1: RemSimplification, V2: X)
2025 : BinaryOperator::CreateMul(V1: X, V2: RemSimplification);
2026 };
2027
2028 OverflowingBinaryOperator *BO1 = cast<OverflowingBinaryOperator>(Val: Op1);
2029 bool BO1HasNSW = BO1->hasNoSignedWrap();
2030 bool BO1HasNUW = BO1->hasNoUnsignedWrap();
2031 bool BO1NoWrap = IsSRem ? BO1HasNSW : BO1HasNUW;
2032 // (rem (mul X, Y), (mul nuw/nsw X, Z))
2033 // if (rem Y, Z) == Y
2034 // -> (mul nuw/nsw X, Y)
2035 if (RemYZ == Y && BO1NoWrap) {
2036 BinaryOperator *BO = CreateMulOrShift(Y);
2037 // Copy any overflow flags from Op0.
2038 BO->setHasNoSignedWrap(IsSRem || BO0HasNSW);
2039 BO->setHasNoUnsignedWrap(!IsSRem || BO0HasNUW);
2040 return BO;
2041 }
2042
2043 // (rem (mul nuw/nsw X, Y), (mul {nsw} X, Z))
2044 // if Y >= Z
2045 // -> (mul {nuw} nsw X, (rem Y, Z))
2046 if (Y.uge(RHS: Z) && (IsSRem ? (BO0HasNSW && BO1HasNSW) : BO0HasNUW)) {
2047 BinaryOperator *BO = CreateMulOrShift(RemYZ);
2048 BO->setHasNoSignedWrap();
2049 BO->setHasNoUnsignedWrap(BO0HasNUW);
2050 return BO;
2051 }
2052
2053 return nullptr;
2054}
2055
2056/// This function implements the transforms common to both integer remainder
2057/// instructions (urem and srem). It is called by the visitors to those integer
2058/// remainder instructions.
2059/// Common integer remainder transforms
2060Instruction *InstCombinerImpl::commonIRemTransforms(BinaryOperator &I) {
2061 if (Instruction *Phi = foldBinopWithPhiOperands(BO&: I))
2062 return Phi;
2063
2064 Value *Op0 = I.getOperand(i_nocapture: 0), *Op1 = I.getOperand(i_nocapture: 1);
2065
2066 // The RHS is known non-zero.
2067 if (Value *V = simplifyValueKnownNonZero(V: I.getOperand(i_nocapture: 1), IC&: *this, CxtI&: I))
2068 return replaceOperand(I, OpNum: 1, V);
2069
2070 // Handle cases involving: rem X, (select Cond, Y, Z)
2071 if (simplifyDivRemOfSelectWithZeroOp(I))
2072 return &I;
2073
2074 // If the divisor is a select-of-constants, try to constant fold all rem ops:
2075 // C % (select Cond, TrueC, FalseC) --> select Cond, (C % TrueC), (C % FalseC)
2076 // TODO: Adapt simplifyDivRemOfSelectWithZeroOp to allow this and other folds.
2077 if (match(V: Op0, P: m_ImmConstant()) &&
2078 match(V: Op1, P: m_Select(C: m_Value(), L: m_ImmConstant(), R: m_ImmConstant()))) {
2079 if (Instruction *R = FoldOpIntoSelect(Op&: I, SI: cast<SelectInst>(Val: Op1),
2080 /*FoldWithMultiUse*/ true))
2081 return R;
2082 }
2083
2084 if (isa<Constant>(Val: Op1)) {
2085 if (Instruction *Op0I = dyn_cast<Instruction>(Val: Op0)) {
2086 if (SelectInst *SI = dyn_cast<SelectInst>(Val: Op0I)) {
2087 if (Instruction *R = FoldOpIntoSelect(Op&: I, SI))
2088 return R;
2089 } else if (auto *PN = dyn_cast<PHINode>(Val: Op0I)) {
2090 const APInt *Op1Int;
2091 if (match(V: Op1, P: m_APInt(Res&: Op1Int)) && !Op1Int->isMinValue() &&
2092 (I.getOpcode() == Instruction::URem ||
2093 !Op1Int->isMinSignedValue())) {
2094 // foldOpIntoPhi will speculate instructions to the end of the PHI's
2095 // predecessor blocks, so do this only if we know the srem or urem
2096 // will not fault.
2097 if (Instruction *NV = foldOpIntoPhi(I, PN))
2098 return NV;
2099 }
2100 }
2101
2102 // See if we can fold away this rem instruction.
2103 if (SimplifyDemandedInstructionBits(Inst&: I))
2104 return &I;
2105 }
2106 }
2107
2108 if (Instruction *R = simplifyIRemMulShl(I, IC&: *this))
2109 return R;
2110
2111 return nullptr;
2112}
2113
2114Instruction *InstCombinerImpl::visitURem(BinaryOperator &I) {
2115 if (Value *V = simplifyURemInst(LHS: I.getOperand(i_nocapture: 0), RHS: I.getOperand(i_nocapture: 1),
2116 Q: SQ.getWithInstruction(I: &I)))
2117 return replaceInstUsesWith(I, V);
2118
2119 if (Instruction *X = foldVectorBinop(Inst&: I))
2120 return X;
2121
2122 if (Instruction *common = commonIRemTransforms(I))
2123 return common;
2124
2125 if (Instruction *NarrowRem = narrowUDivURem(I, IC&: *this))
2126 return NarrowRem;
2127
2128 // X urem Y -> X and Y-1, where Y is a power of 2,
2129 Value *Op0 = I.getOperand(i_nocapture: 0), *Op1 = I.getOperand(i_nocapture: 1);
2130 Type *Ty = I.getType();
2131 if (isKnownToBeAPowerOfTwo(V: Op1, /*OrZero*/ true, Depth: 0, CxtI: &I)) {
2132 // This may increase instruction count, we don't enforce that Y is a
2133 // constant.
2134 Constant *N1 = Constant::getAllOnesValue(Ty);
2135 Value *Add = Builder.CreateAdd(LHS: Op1, RHS: N1);
2136 return BinaryOperator::CreateAnd(V1: Op0, V2: Add);
2137 }
2138
2139 // 1 urem X -> zext(X != 1)
2140 if (match(V: Op0, P: m_One())) {
2141 Value *Cmp = Builder.CreateICmpNE(LHS: Op1, RHS: ConstantInt::get(Ty, V: 1));
2142 return CastInst::CreateZExtOrBitCast(S: Cmp, Ty);
2143 }
2144
2145 // Op0 urem C -> Op0 < C ? Op0 : Op0 - C, where C >= signbit.
2146 // Op0 must be frozen because we are increasing its number of uses.
2147 if (match(V: Op1, P: m_Negative())) {
2148 Value *F0 = Builder.CreateFreeze(V: Op0, Name: Op0->getName() + ".fr");
2149 Value *Cmp = Builder.CreateICmpULT(LHS: F0, RHS: Op1);
2150 Value *Sub = Builder.CreateSub(LHS: F0, RHS: Op1);
2151 return SelectInst::Create(C: Cmp, S1: F0, S2: Sub);
2152 }
2153
2154 // If the divisor is a sext of a boolean, then the divisor must be max
2155 // unsigned value (-1). Therefore, the remainder is Op0 unless Op0 is also
2156 // max unsigned value. In that case, the remainder is 0:
2157 // urem Op0, (sext i1 X) --> (Op0 == -1) ? 0 : Op0
2158 Value *X;
2159 if (match(V: Op1, P: m_SExt(Op: m_Value(V&: X))) && X->getType()->isIntOrIntVectorTy(BitWidth: 1)) {
2160 Value *FrozenOp0 = Builder.CreateFreeze(V: Op0, Name: Op0->getName() + ".frozen");
2161 Value *Cmp =
2162 Builder.CreateICmpEQ(LHS: FrozenOp0, RHS: ConstantInt::getAllOnesValue(Ty));
2163 return SelectInst::Create(C: Cmp, S1: ConstantInt::getNullValue(Ty), S2: FrozenOp0);
2164 }
2165
2166 // For "(X + 1) % Op1" and if (X u< Op1) => (X + 1) == Op1 ? 0 : X + 1 .
2167 if (match(V: Op0, P: m_Add(L: m_Value(V&: X), R: m_One()))) {
2168 Value *Val =
2169 simplifyICmpInst(Predicate: ICmpInst::ICMP_ULT, LHS: X, RHS: Op1, Q: SQ.getWithInstruction(I: &I));
2170 if (Val && match(V: Val, P: m_One())) {
2171 Value *FrozenOp0 = Builder.CreateFreeze(V: Op0, Name: Op0->getName() + ".frozen");
2172 Value *Cmp = Builder.CreateICmpEQ(LHS: FrozenOp0, RHS: Op1);
2173 return SelectInst::Create(C: Cmp, S1: ConstantInt::getNullValue(Ty), S2: FrozenOp0);
2174 }
2175 }
2176
2177 return nullptr;
2178}
2179
2180Instruction *InstCombinerImpl::visitSRem(BinaryOperator &I) {
2181 if (Value *V = simplifySRemInst(LHS: I.getOperand(i_nocapture: 0), RHS: I.getOperand(i_nocapture: 1),
2182 Q: SQ.getWithInstruction(I: &I)))
2183 return replaceInstUsesWith(I, V);
2184
2185 if (Instruction *X = foldVectorBinop(Inst&: I))
2186 return X;
2187
2188 // Handle the integer rem common cases
2189 if (Instruction *Common = commonIRemTransforms(I))
2190 return Common;
2191
2192 Value *Op0 = I.getOperand(i_nocapture: 0), *Op1 = I.getOperand(i_nocapture: 1);
2193 {
2194 const APInt *Y;
2195 // X % -Y -> X % Y
2196 if (match(V: Op1, P: m_Negative(V&: Y)) && !Y->isMinSignedValue())
2197 return replaceOperand(I, OpNum: 1, V: ConstantInt::get(Ty: I.getType(), V: -*Y));
2198 }
2199
2200 // -X srem Y --> -(X srem Y)
2201 Value *X, *Y;
2202 if (match(V: &I, P: m_SRem(L: m_OneUse(SubPattern: m_NSWNeg(V: m_Value(V&: X))), R: m_Value(V&: Y))))
2203 return BinaryOperator::CreateNSWNeg(Op: Builder.CreateSRem(LHS: X, RHS: Y));
2204
2205 // If the sign bits of both operands are zero (i.e. we can prove they are
2206 // unsigned inputs), turn this into a urem.
2207 APInt Mask(APInt::getSignMask(BitWidth: I.getType()->getScalarSizeInBits()));
2208 if (MaskedValueIsZero(V: Op1, Mask, Depth: 0, CxtI: &I) &&
2209 MaskedValueIsZero(V: Op0, Mask, Depth: 0, CxtI: &I)) {
2210 // X srem Y -> X urem Y, iff X and Y don't have sign bit set
2211 return BinaryOperator::CreateURem(V1: Op0, V2: Op1, Name: I.getName());
2212 }
2213
2214 // If it's a constant vector, flip any negative values positive.
2215 if (isa<ConstantVector>(Val: Op1) || isa<ConstantDataVector>(Val: Op1)) {
2216 Constant *C = cast<Constant>(Val: Op1);
2217 unsigned VWidth = cast<FixedVectorType>(Val: C->getType())->getNumElements();
2218
2219 bool hasNegative = false;
2220 bool hasMissing = false;
2221 for (unsigned i = 0; i != VWidth; ++i) {
2222 Constant *Elt = C->getAggregateElement(Elt: i);
2223 if (!Elt) {
2224 hasMissing = true;
2225 break;
2226 }
2227
2228 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Val: Elt))
2229 if (RHS->isNegative())
2230 hasNegative = true;
2231 }
2232
2233 if (hasNegative && !hasMissing) {
2234 SmallVector<Constant *, 16> Elts(VWidth);
2235 for (unsigned i = 0; i != VWidth; ++i) {
2236 Elts[i] = C->getAggregateElement(Elt: i); // Handle undef, etc.
2237 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Val: Elts[i])) {
2238 if (RHS->isNegative())
2239 Elts[i] = cast<ConstantInt>(Val: ConstantExpr::getNeg(C: RHS));
2240 }
2241 }
2242
2243 Constant *NewRHSV = ConstantVector::get(V: Elts);
2244 if (NewRHSV != C) // Don't loop on -MININT
2245 return replaceOperand(I, OpNum: 1, V: NewRHSV);
2246 }
2247 }
2248
2249 return nullptr;
2250}
2251
2252Instruction *InstCombinerImpl::visitFRem(BinaryOperator &I) {
2253 if (Value *V = simplifyFRemInst(LHS: I.getOperand(i_nocapture: 0), RHS: I.getOperand(i_nocapture: 1),
2254 FMF: I.getFastMathFlags(),
2255 Q: SQ.getWithInstruction(I: &I)))
2256 return replaceInstUsesWith(I, V);
2257
2258 if (Instruction *X = foldVectorBinop(Inst&: I))
2259 return X;
2260
2261 if (Instruction *Phi = foldBinopWithPhiOperands(BO&: I))
2262 return Phi;
2263
2264 return nullptr;
2265}
2266

source code of llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp