LLVM 23.0.0git
InstructionCombining.cpp
Go to the documentation of this file.
1//===- InstructionCombining.cpp - Combine multiple instructions -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// InstructionCombining - Combine instructions to form fewer, simple
10// instructions. This pass does not modify the CFG. This pass is where
11// algebraic simplification happens.
12//
13// This pass combines things like:
14// %Y = add i32 %X, 1
15// %Z = add i32 %Y, 1
16// into:
17// %Z = add i32 %X, 2
18//
19// This is a simple worklist driven algorithm.
20//
21// This pass guarantees that the following canonicalizations are performed on
22// the program:
23// 1. If a binary operator has a constant operand, it is moved to the RHS
24// 2. Bitwise operators with constant operands are always grouped so that
25// shifts are performed first, then or's, then and's, then xor's.
26// 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
27// 4. All cmp instructions on boolean values are replaced with logical ops
28// 5. add X, X is represented as (X*2) => (X << 1)
29// 6. Multiplies with a power-of-two constant argument are transformed into
30// shifts.
31// ... etc.
32//
33//===----------------------------------------------------------------------===//
34
35#include "InstCombineInternal.h"
36#include "llvm/ADT/APFloat.h"
37#include "llvm/ADT/APInt.h"
38#include "llvm/ADT/ArrayRef.h"
39#include "llvm/ADT/DenseMap.h"
42#include "llvm/ADT/Statistic.h"
47#include "llvm/Analysis/CFG.h"
62#include "llvm/IR/BasicBlock.h"
63#include "llvm/IR/CFG.h"
64#include "llvm/IR/Constant.h"
65#include "llvm/IR/Constants.h"
66#include "llvm/IR/DIBuilder.h"
67#include "llvm/IR/DataLayout.h"
68#include "llvm/IR/DebugInfo.h"
70#include "llvm/IR/Dominators.h"
72#include "llvm/IR/Function.h"
74#include "llvm/IR/IRBuilder.h"
75#include "llvm/IR/InstrTypes.h"
76#include "llvm/IR/Instruction.h"
79#include "llvm/IR/Intrinsics.h"
80#include "llvm/IR/Metadata.h"
81#include "llvm/IR/Operator.h"
82#include "llvm/IR/PassManager.h"
84#include "llvm/IR/Type.h"
85#include "llvm/IR/Use.h"
86#include "llvm/IR/User.h"
87#include "llvm/IR/Value.h"
88#include "llvm/IR/ValueHandle.h"
93#include "llvm/Support/Debug.h"
102#include <algorithm>
103#include <cassert>
104#include <cstdint>
105#include <memory>
106#include <optional>
107#include <string>
108#include <utility>
109
110#define DEBUG_TYPE "instcombine"
112#include <optional>
113
114using namespace llvm;
115using namespace llvm::PatternMatch;
116
117STATISTIC(NumWorklistIterations,
118 "Number of instruction combining iterations performed");
119STATISTIC(NumOneIteration, "Number of functions with one iteration");
120STATISTIC(NumTwoIterations, "Number of functions with two iterations");
121STATISTIC(NumThreeIterations, "Number of functions with three iterations");
122STATISTIC(NumFourOrMoreIterations,
123 "Number of functions with four or more iterations");
124
125STATISTIC(NumCombined , "Number of insts combined");
126STATISTIC(NumConstProp, "Number of constant folds");
127STATISTIC(NumDeadInst , "Number of dead inst eliminated");
128STATISTIC(NumSunkInst , "Number of instructions sunk");
129STATISTIC(NumExpand, "Number of expansions");
130STATISTIC(NumFactor , "Number of factorizations");
131STATISTIC(NumReassoc , "Number of reassociations");
132DEBUG_COUNTER(VisitCounter, "instcombine-visit",
133 "Controls which instructions are visited");
134
135static cl::opt<bool> EnableCodeSinking("instcombine-code-sinking",
136 cl::desc("Enable code sinking"),
137 cl::init(true));
138
140 "instcombine-max-sink-users", cl::init(32),
141 cl::desc("Maximum number of undroppable users for instruction sinking"));
142
144MaxArraySize("instcombine-maxarray-size", cl::init(1024),
145 cl::desc("Maximum array size considered when doing a combine"));
146
147namespace llvm {
149} // end namespace llvm
150
151// FIXME: Remove this flag when it is no longer necessary to convert
152// llvm.dbg.declare to avoid inaccurate debug info. Setting this to false
153// increases variable availability at the cost of accuracy. Variables that
154// cannot be promoted by mem2reg or SROA will be described as living in memory
155// for their entire lifetime. However, passes like DSE and instcombine can
156// delete stores to the alloca, leading to misleading and inaccurate debug
157// information. This flag can be removed when those passes are fixed.
158static cl::opt<unsigned> ShouldLowerDbgDeclare("instcombine-lower-dbg-declare",
159 cl::Hidden, cl::init(true));
160
161std::optional<Instruction *>
163 // Handle target specific intrinsics
164 if (II.getCalledFunction()->isTargetIntrinsic()) {
165 return TTIForTargetIntrinsicsOnly.instCombineIntrinsic(*this, II);
166 }
167 return std::nullopt;
168}
169
171 IntrinsicInst &II, APInt DemandedMask, KnownBits &Known,
172 bool &KnownBitsComputed) {
173 // Handle target specific intrinsics
174 if (II.getCalledFunction()->isTargetIntrinsic()) {
175 return TTIForTargetIntrinsicsOnly.simplifyDemandedUseBitsIntrinsic(
176 *this, II, DemandedMask, Known, KnownBitsComputed);
177 }
178 return std::nullopt;
179}
180
182 IntrinsicInst &II, APInt DemandedElts, APInt &PoisonElts,
183 APInt &PoisonElts2, APInt &PoisonElts3,
184 std::function<void(Instruction *, unsigned, APInt, APInt &)>
185 SimplifyAndSetOp) {
186 // Handle target specific intrinsics
187 if (II.getCalledFunction()->isTargetIntrinsic()) {
188 return TTIForTargetIntrinsicsOnly.simplifyDemandedVectorEltsIntrinsic(
189 *this, II, DemandedElts, PoisonElts, PoisonElts2, PoisonElts3,
190 SimplifyAndSetOp);
191 }
192 return std::nullopt;
193}
194
195bool InstCombiner::isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const {
196 // Approved exception for TTI use: This queries a legality property of the
197 // target, not an profitability heuristic. Ideally this should be part of
198 // DataLayout instead.
199 return TTIForTargetIntrinsicsOnly.isValidAddrSpaceCast(FromAS, ToAS);
200}
201
202Value *InstCombinerImpl::EmitGEPOffset(GEPOperator *GEP, bool RewriteGEP) {
203 if (!RewriteGEP)
204 return llvm::emitGEPOffset(&Builder, DL, GEP);
205
206 IRBuilderBase::InsertPointGuard Guard(Builder);
207 auto *Inst = dyn_cast<Instruction>(GEP);
208 if (Inst)
209 Builder.SetInsertPoint(Inst);
210
211 Value *Offset = EmitGEPOffset(GEP);
212 // Rewrite non-trivial GEPs to avoid duplicating the offset arithmetic.
213 if (Inst && !GEP->hasAllConstantIndices() &&
214 !GEP->getSourceElementType()->isIntegerTy(8)) {
216 *Inst, Builder.CreateGEP(Builder.getInt8Ty(), GEP->getPointerOperand(),
217 Offset, "", GEP->getNoWrapFlags()));
219 }
220 return Offset;
221}
222
223Value *InstCombinerImpl::EmitGEPOffsets(ArrayRef<GEPOperator *> GEPs,
224 GEPNoWrapFlags NW, Type *IdxTy,
225 bool RewriteGEPs) {
226 auto Add = [&](Value *Sum, Value *Offset) -> Value * {
227 if (Sum)
228 return Builder.CreateAdd(Sum, Offset, "", NW.hasNoUnsignedWrap(),
229 NW.isInBounds());
230 else
231 return Offset;
232 };
233
234 Value *Sum = nullptr;
235 Value *OneUseSum = nullptr;
236 Value *OneUseBase = nullptr;
237 GEPNoWrapFlags OneUseFlags = GEPNoWrapFlags::all();
238 for (GEPOperator *GEP : reverse(GEPs)) {
239 Value *Offset;
240 {
241 // Expand the offset at the point of the previous GEP to enable rewriting.
242 // However, use the original insertion point for calculating Sum.
243 IRBuilderBase::InsertPointGuard Guard(Builder);
244 auto *Inst = dyn_cast<Instruction>(GEP);
245 if (RewriteGEPs && Inst)
246 Builder.SetInsertPoint(Inst);
247
249 if (Offset->getType() != IdxTy)
250 Offset = Builder.CreateVectorSplat(
251 cast<VectorType>(IdxTy)->getElementCount(), Offset);
252 if (GEP->hasOneUse()) {
253 // Offsets of one-use GEPs will be merged into the next multi-use GEP.
254 OneUseSum = Add(OneUseSum, Offset);
255 OneUseFlags = OneUseFlags.intersectForOffsetAdd(GEP->getNoWrapFlags());
256 if (!OneUseBase)
257 OneUseBase = GEP->getPointerOperand();
258 continue;
259 }
260
261 if (OneUseSum)
262 Offset = Add(OneUseSum, Offset);
263
264 // Rewrite the GEP to reuse the computed offset. This also includes
265 // offsets from preceding one-use GEPs.
266 if (RewriteGEPs && Inst &&
267 !(GEP->getSourceElementType()->isIntegerTy(8) &&
268 GEP->getOperand(1) == Offset)) {
270 *Inst,
271 Builder.CreatePtrAdd(
272 OneUseBase ? OneUseBase : GEP->getPointerOperand(), Offset, "",
273 OneUseFlags.intersectForOffsetAdd(GEP->getNoWrapFlags())));
275 }
276 }
277
278 Sum = Add(Sum, Offset);
279 OneUseSum = OneUseBase = nullptr;
280 OneUseFlags = GEPNoWrapFlags::all();
281 }
282 if (OneUseSum)
283 Sum = Add(Sum, OneUseSum);
284 if (!Sum)
285 return Constant::getNullValue(IdxTy);
286 return Sum;
287}
288
289/// Legal integers and common types are considered desirable. This is used to
290/// avoid creating instructions with types that may not be supported well by the
291/// the backend.
292/// NOTE: This treats i8, i16 and i32 specially because they are common
293/// types in frontend languages.
294bool InstCombinerImpl::isDesirableIntType(unsigned BitWidth) const {
295 switch (BitWidth) {
296 case 8:
297 case 16:
298 case 32:
299 return true;
300 default:
301 return DL.isLegalInteger(BitWidth);
302 }
303}
304
305/// Return true if it is desirable to convert an integer computation from a
306/// given bit width to a new bit width.
307/// We don't want to convert from a legal or desirable type (like i8) to an
308/// illegal type or from a smaller to a larger illegal type. A width of '1'
309/// is always treated as a desirable type because i1 is a fundamental type in
310/// IR, and there are many specialized optimizations for i1 types.
311/// Common/desirable widths are equally treated as legal to convert to, in
312/// order to open up more combining opportunities.
313bool InstCombinerImpl::shouldChangeType(unsigned FromWidth,
314 unsigned ToWidth) const {
315 bool FromLegal = FromWidth == 1 || DL.isLegalInteger(FromWidth);
316 bool ToLegal = ToWidth == 1 || DL.isLegalInteger(ToWidth);
317
318 // Convert to desirable widths even if they are not legal types.
319 // Only shrink types, to prevent infinite loops.
320 if (ToWidth < FromWidth && isDesirableIntType(ToWidth))
321 return true;
322
323 // If this is a legal or desiable integer from type, and the result would be
324 // an illegal type, don't do the transformation.
325 if ((FromLegal || isDesirableIntType(FromWidth)) && !ToLegal)
326 return false;
327
328 // Otherwise, if both are illegal, do not increase the size of the result. We
329 // do allow things like i160 -> i64, but not i64 -> i160.
330 if (!FromLegal && !ToLegal && ToWidth > FromWidth)
331 return false;
332
333 return true;
334}
335
336/// Return true if it is desirable to convert a computation from 'From' to 'To'.
337/// We don't want to convert from a legal to an illegal type or from a smaller
338/// to a larger illegal type. i1 is always treated as a legal type because it is
339/// a fundamental type in IR, and there are many specialized optimizations for
340/// i1 types.
341bool InstCombinerImpl::shouldChangeType(Type *From, Type *To) const {
342 // TODO: This could be extended to allow vectors. Datalayout changes might be
343 // needed to properly support that.
344 if (!From->isIntegerTy() || !To->isIntegerTy())
345 return false;
346
347 unsigned FromWidth = From->getPrimitiveSizeInBits();
348 unsigned ToWidth = To->getPrimitiveSizeInBits();
349 return shouldChangeType(FromWidth, ToWidth);
350}
351
352// Return true, if No Signed Wrap should be maintained for I.
353// The No Signed Wrap flag can be kept if the operation "B (I.getOpcode) C",
354// where both B and C should be ConstantInts, results in a constant that does
355// not overflow. This function only handles the Add/Sub/Mul opcodes. For
356// all other opcodes, the function conservatively returns false.
359 if (!OBO || !OBO->hasNoSignedWrap())
360 return false;
361
362 const APInt *BVal, *CVal;
363 if (!match(B, m_APInt(BVal)) || !match(C, m_APInt(CVal)))
364 return false;
365
366 // We reason about Add/Sub/Mul Only.
367 bool Overflow = false;
368 switch (I.getOpcode()) {
369 case Instruction::Add:
370 (void)BVal->sadd_ov(*CVal, Overflow);
371 break;
372 case Instruction::Sub:
373 (void)BVal->ssub_ov(*CVal, Overflow);
374 break;
375 case Instruction::Mul:
376 (void)BVal->smul_ov(*CVal, Overflow);
377 break;
378 default:
379 // Conservatively return false for other opcodes.
380 return false;
381 }
382 return !Overflow;
383}
384
387 return OBO && OBO->hasNoUnsignedWrap();
388}
389
392 return OBO && OBO->hasNoSignedWrap();
393}
394
395/// Conservatively clears subclassOptionalData after a reassociation or
396/// commutation. We preserve fast-math flags when applicable as they can be
397/// preserved.
400 if (!FPMO) {
401 I.clearSubclassOptionalData();
402 return;
403 }
404
405 FastMathFlags FMF = I.getFastMathFlags();
406 I.clearSubclassOptionalData();
407 I.setFastMathFlags(FMF);
408}
409
410/// Combine constant operands of associative operations either before or after a
411/// cast to eliminate one of the associative operations:
412/// (op (cast (op X, C2)), C1) --> (cast (op X, op (C1, C2)))
413/// (op (cast (op X, C2)), C1) --> (op (cast X), op (C1, C2))
415 InstCombinerImpl &IC) {
416 auto *Cast = dyn_cast<CastInst>(BinOp1->getOperand(0));
417 if (!Cast || !Cast->hasOneUse())
418 return false;
419
420 // TODO: Enhance logic for other casts and remove this check.
421 auto CastOpcode = Cast->getOpcode();
422 if (CastOpcode != Instruction::ZExt)
423 return false;
424
425 // TODO: Enhance logic for other BinOps and remove this check.
426 if (!BinOp1->isBitwiseLogicOp())
427 return false;
428
429 auto AssocOpcode = BinOp1->getOpcode();
430 auto *BinOp2 = dyn_cast<BinaryOperator>(Cast->getOperand(0));
431 if (!BinOp2 || !BinOp2->hasOneUse() || BinOp2->getOpcode() != AssocOpcode)
432 return false;
433
434 Constant *C1, *C2;
435 if (!match(BinOp1->getOperand(1), m_Constant(C1)) ||
436 !match(BinOp2->getOperand(1), m_Constant(C2)))
437 return false;
438
439 // TODO: This assumes a zext cast.
440 // Eg, if it was a trunc, we'd cast C1 to the source type because casting C2
441 // to the destination type might lose bits.
442
443 // Fold the constants together in the destination type:
444 // (op (cast (op X, C2)), C1) --> (op (cast X), FoldedC)
445 const DataLayout &DL = IC.getDataLayout();
446 Type *DestTy = C1->getType();
447 Constant *CastC2 = ConstantFoldCastOperand(CastOpcode, C2, DestTy, DL);
448 if (!CastC2)
449 return false;
450 Constant *FoldedC = ConstantFoldBinaryOpOperands(AssocOpcode, C1, CastC2, DL);
451 if (!FoldedC)
452 return false;
453
454 IC.replaceOperand(*Cast, 0, BinOp2->getOperand(0));
455 IC.replaceOperand(*BinOp1, 1, FoldedC);
457 Cast->dropPoisonGeneratingFlags();
458 return true;
459}
460
461// Simplifies IntToPtr/PtrToInt RoundTrip Cast.
462// inttoptr ( ptrtoint (x) ) --> x
463Value *InstCombinerImpl::simplifyIntToPtrRoundTripCast(Value *Val) {
464 auto *IntToPtr = dyn_cast<IntToPtrInst>(Val);
465 if (IntToPtr && DL.getTypeSizeInBits(IntToPtr->getDestTy()) ==
466 DL.getTypeSizeInBits(IntToPtr->getSrcTy())) {
467 auto *PtrToInt = dyn_cast<PtrToIntInst>(IntToPtr->getOperand(0));
468 Type *CastTy = IntToPtr->getDestTy();
469 if (PtrToInt &&
470 CastTy->getPointerAddressSpace() ==
471 PtrToInt->getSrcTy()->getPointerAddressSpace() &&
472 DL.getTypeSizeInBits(PtrToInt->getSrcTy()) ==
473 DL.getTypeSizeInBits(PtrToInt->getDestTy()))
474 return PtrToInt->getOperand(0);
475 }
476 return nullptr;
477}
478
479/// This performs a few simplifications for operators that are associative or
480/// commutative:
481///
482/// Commutative operators:
483///
484/// 1. Order operands such that they are listed from right (least complex) to
485/// left (most complex). This puts constants before unary operators before
486/// binary operators.
487///
488/// Associative operators:
489///
490/// 2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
491/// 3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
492///
493/// Associative and commutative operators:
494///
495/// 4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
496/// 5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
497/// 6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
498/// if C1 and C2 are constants.
500 Instruction::BinaryOps Opcode = I.getOpcode();
501 bool Changed = false;
502
503 do {
504 // Order operands such that they are listed from right (least complex) to
505 // left (most complex). This puts constants before unary operators before
506 // binary operators.
507 if (I.isCommutative() && getComplexity(I.getOperand(0)) <
508 getComplexity(I.getOperand(1)))
509 Changed = !I.swapOperands();
510
511 if (I.isCommutative()) {
512 if (auto Pair = matchSymmetricPair(I.getOperand(0), I.getOperand(1))) {
513 replaceOperand(I, 0, Pair->first);
514 replaceOperand(I, 1, Pair->second);
515 Changed = true;
516 }
517 }
518
519 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0));
520 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1));
521
522 if (I.isAssociative()) {
523 // Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
524 if (Op0 && Op0->getOpcode() == Opcode) {
525 Value *A = Op0->getOperand(0);
526 Value *B = Op0->getOperand(1);
527 Value *C = I.getOperand(1);
528
529 // Does "B op C" simplify?
530 if (Value *V = simplifyBinOp(Opcode, B, C, SQ.getWithInstruction(&I))) {
531 // It simplifies to V. Form "A op V".
532 replaceOperand(I, 0, A);
533 replaceOperand(I, 1, V);
534 bool IsNUW = hasNoUnsignedWrap(I) && hasNoUnsignedWrap(*Op0);
535 bool IsNSW = maintainNoSignedWrap(I, B, C) && hasNoSignedWrap(*Op0);
536
537 // Conservatively clear all optional flags since they may not be
538 // preserved by the reassociation. Reset nsw/nuw based on the above
539 // analysis.
541
542 // Note: this is only valid because SimplifyBinOp doesn't look at
543 // the operands to Op0.
544 if (IsNUW)
545 I.setHasNoUnsignedWrap(true);
546
547 if (IsNSW)
548 I.setHasNoSignedWrap(true);
549
550 Changed = true;
551 ++NumReassoc;
552 continue;
553 }
554 }
555
556 // Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
557 if (Op1 && Op1->getOpcode() == Opcode) {
558 Value *A = I.getOperand(0);
559 Value *B = Op1->getOperand(0);
560 Value *C = Op1->getOperand(1);
561
562 // Does "A op B" simplify?
563 if (Value *V = simplifyBinOp(Opcode, A, B, SQ.getWithInstruction(&I))) {
564 // It simplifies to V. Form "V op C".
565 replaceOperand(I, 0, V);
566 replaceOperand(I, 1, C);
567 // Conservatively clear the optional flags, since they may not be
568 // preserved by the reassociation.
570 Changed = true;
571 ++NumReassoc;
572 continue;
573 }
574 }
575 }
576
577 if (I.isAssociative() && I.isCommutative()) {
578 if (simplifyAssocCastAssoc(&I, *this)) {
579 Changed = true;
580 ++NumReassoc;
581 continue;
582 }
583
584 // Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
585 if (Op0 && Op0->getOpcode() == Opcode) {
586 Value *A = Op0->getOperand(0);
587 Value *B = Op0->getOperand(1);
588 Value *C = I.getOperand(1);
589
590 // Does "C op A" simplify?
591 if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
592 // It simplifies to V. Form "V op B".
593 replaceOperand(I, 0, V);
594 replaceOperand(I, 1, B);
595 // Conservatively clear the optional flags, since they may not be
596 // preserved by the reassociation.
598 Changed = true;
599 ++NumReassoc;
600 continue;
601 }
602 }
603
604 // Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
605 if (Op1 && Op1->getOpcode() == Opcode) {
606 Value *A = I.getOperand(0);
607 Value *B = Op1->getOperand(0);
608 Value *C = Op1->getOperand(1);
609
610 // Does "C op A" simplify?
611 if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
612 // It simplifies to V. Form "B op V".
613 replaceOperand(I, 0, B);
614 replaceOperand(I, 1, V);
615 // Conservatively clear the optional flags, since they may not be
616 // preserved by the reassociation.
618 Changed = true;
619 ++NumReassoc;
620 continue;
621 }
622 }
623
624 // Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
625 // if C1 and C2 are constants.
626 Value *A, *B;
627 Constant *C1, *C2, *CRes;
628 if (Op0 && Op1 &&
629 Op0->getOpcode() == Opcode && Op1->getOpcode() == Opcode &&
630 match(Op0, m_OneUse(m_BinOp(m_Value(A), m_Constant(C1)))) &&
631 match(Op1, m_OneUse(m_BinOp(m_Value(B), m_Constant(C2)))) &&
632 (CRes = ConstantFoldBinaryOpOperands(Opcode, C1, C2, DL))) {
633 bool IsNUW = hasNoUnsignedWrap(I) &&
634 hasNoUnsignedWrap(*Op0) &&
635 hasNoUnsignedWrap(*Op1);
636 BinaryOperator *NewBO = (IsNUW && Opcode == Instruction::Add) ?
637 BinaryOperator::CreateNUW(Opcode, A, B) :
638 BinaryOperator::Create(Opcode, A, B);
639
640 if (isa<FPMathOperator>(NewBO)) {
641 FastMathFlags Flags = I.getFastMathFlags() &
642 Op0->getFastMathFlags() &
643 Op1->getFastMathFlags();
644 NewBO->setFastMathFlags(Flags);
645 }
646 InsertNewInstWith(NewBO, I.getIterator());
647 NewBO->takeName(Op1);
648 replaceOperand(I, 0, NewBO);
649 replaceOperand(I, 1, CRes);
650 // Conservatively clear the optional flags, since they may not be
651 // preserved by the reassociation.
653 if (IsNUW)
654 I.setHasNoUnsignedWrap(true);
655
656 Changed = true;
657 continue;
658 }
659 }
660
661 // No further simplifications.
662 return Changed;
663 } while (true);
664}
665
666/// Return whether "X LOp (Y ROp Z)" is always equal to
667/// "(X LOp Y) ROp (X LOp Z)".
670 // X & (Y | Z) <--> (X & Y) | (X & Z)
671 // X & (Y ^ Z) <--> (X & Y) ^ (X & Z)
672 if (LOp == Instruction::And)
673 return ROp == Instruction::Or || ROp == Instruction::Xor;
674
675 // X | (Y & Z) <--> (X | Y) & (X | Z)
676 if (LOp == Instruction::Or)
677 return ROp == Instruction::And;
678
679 // X * (Y + Z) <--> (X * Y) + (X * Z)
680 // X * (Y - Z) <--> (X * Y) - (X * Z)
681 if (LOp == Instruction::Mul)
682 return ROp == Instruction::Add || ROp == Instruction::Sub;
683
684 return false;
685}
686
687/// Return whether "(X LOp Y) ROp Z" is always equal to
688/// "(X ROp Z) LOp (Y ROp Z)".
692 return leftDistributesOverRight(ROp, LOp);
693
694 // (X {&|^} Y) >> Z <--> (X >> Z) {&|^} (Y >> Z) for all shifts.
696
697 // TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z",
698 // but this requires knowing that the addition does not overflow and other
699 // such subtleties.
700}
701
702/// This function returns identity value for given opcode, which can be used to
703/// factor patterns like (X * 2) + X ==> (X * 2) + (X * 1) ==> X * (2 + 1).
705 if (isa<Constant>(V))
706 return nullptr;
707
708 return ConstantExpr::getBinOpIdentity(Opcode, V->getType());
709}
710
711/// This function predicates factorization using distributive laws. By default,
712/// it just returns the 'Op' inputs. But for special-cases like
713/// 'add(shl(X, 5), ...)', this function will have TopOpcode == Instruction::Add
714/// and Op = shl(X, 5). The 'shl' is treated as the more general 'mul X, 32' to
715/// allow more factorization opportunities.
718 Value *&LHS, Value *&RHS, BinaryOperator *OtherOp) {
719 assert(Op && "Expected a binary operator");
720 LHS = Op->getOperand(0);
721 RHS = Op->getOperand(1);
722 if (TopOpcode == Instruction::Add || TopOpcode == Instruction::Sub) {
723 Constant *C;
724 if (match(Op, m_Shl(m_Value(), m_ImmConstant(C)))) {
725 // X << C --> X * (1 << C)
727 Instruction::Shl, ConstantInt::get(Op->getType(), 1), C);
728 assert(RHS && "Constant folding of immediate constants failed");
729 return Instruction::Mul;
730 }
731 // TODO: We can add other conversions e.g. shr => div etc.
732 }
733 if (Instruction::isBitwiseLogicOp(TopOpcode)) {
734 if (OtherOp && OtherOp->getOpcode() == Instruction::AShr &&
736 // lshr nneg C, X --> ashr nneg C, X
737 return Instruction::AShr;
738 }
739 }
740 return Op->getOpcode();
741}
742
743/// This tries to simplify binary operations by factorizing out common terms
744/// (e. g. "(A*B)+(A*C)" -> "A*(B+C)").
747 Instruction::BinaryOps InnerOpcode, Value *A,
748 Value *B, Value *C, Value *D) {
749 assert(A && B && C && D && "All values must be provided");
750
751 Value *V = nullptr;
752 Value *RetVal = nullptr;
753 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
754 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
755
756 // Does "X op' Y" always equal "Y op' X"?
757 bool InnerCommutative = Instruction::isCommutative(InnerOpcode);
758
759 // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"?
760 if (leftDistributesOverRight(InnerOpcode, TopLevelOpcode)) {
761 // Does the instruction have the form "(A op' B) op (A op' D)" or, in the
762 // commutative case, "(A op' B) op (C op' A)"?
763 if (A == C || (InnerCommutative && A == D)) {
764 if (A != C)
765 std::swap(C, D);
766 // Consider forming "A op' (B op D)".
767 // If "B op D" simplifies then it can be formed with no cost.
768 V = simplifyBinOp(TopLevelOpcode, B, D, SQ.getWithInstruction(&I));
769
770 // If "B op D" doesn't simplify then only go on if one of the existing
771 // operations "A op' B" and "C op' D" will be zapped as no longer used.
772 if (!V && (LHS->hasOneUse() || RHS->hasOneUse()))
773 V = Builder.CreateBinOp(TopLevelOpcode, B, D, RHS->getName());
774 if (V)
775 RetVal = Builder.CreateBinOp(InnerOpcode, A, V);
776 }
777 }
778
779 // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"?
780 if (!RetVal && rightDistributesOverLeft(TopLevelOpcode, InnerOpcode)) {
781 // Does the instruction have the form "(A op' B) op (C op' B)" or, in the
782 // commutative case, "(A op' B) op (B op' D)"?
783 if (B == D || (InnerCommutative && B == C)) {
784 if (B != D)
785 std::swap(C, D);
786 // Consider forming "(A op C) op' B".
787 // If "A op C" simplifies then it can be formed with no cost.
788 V = simplifyBinOp(TopLevelOpcode, A, C, SQ.getWithInstruction(&I));
789
790 // If "A op C" doesn't simplify then only go on if one of the existing
791 // operations "A op' B" and "C op' D" will be zapped as no longer used.
792 if (!V && (LHS->hasOneUse() || RHS->hasOneUse()))
793 V = Builder.CreateBinOp(TopLevelOpcode, A, C, LHS->getName());
794 if (V)
795 RetVal = Builder.CreateBinOp(InnerOpcode, V, B);
796 }
797 }
798
799 if (!RetVal)
800 return nullptr;
801
802 ++NumFactor;
803 RetVal->takeName(&I);
804
805 // Try to add no-overflow flags to the final value.
806 if (isa<BinaryOperator>(RetVal)) {
807 bool HasNSW = false;
808 bool HasNUW = false;
810 HasNSW = I.hasNoSignedWrap();
811 HasNUW = I.hasNoUnsignedWrap();
812 }
813 if (auto *LOBO = dyn_cast<OverflowingBinaryOperator>(LHS)) {
814 HasNSW &= LOBO->hasNoSignedWrap();
815 HasNUW &= LOBO->hasNoUnsignedWrap();
816 }
817
818 if (auto *ROBO = dyn_cast<OverflowingBinaryOperator>(RHS)) {
819 HasNSW &= ROBO->hasNoSignedWrap();
820 HasNUW &= ROBO->hasNoUnsignedWrap();
821 }
822
823 if (TopLevelOpcode == Instruction::Add && InnerOpcode == Instruction::Mul) {
824 // We can propagate 'nsw' if we know that
825 // %Y = mul nsw i16 %X, C
826 // %Z = add nsw i16 %Y, %X
827 // =>
828 // %Z = mul nsw i16 %X, C+1
829 //
830 // iff C+1 isn't INT_MIN
831 const APInt *CInt;
832 if (match(V, m_APInt(CInt)) && !CInt->isMinSignedValue())
833 cast<Instruction>(RetVal)->setHasNoSignedWrap(HasNSW);
834
835 // nuw can be propagated with any constant or nuw value.
836 cast<Instruction>(RetVal)->setHasNoUnsignedWrap(HasNUW);
837 }
838 }
839 return RetVal;
840}
841
842// If `I` has one Const operand and the other matches `(ctpop (not x))`,
843// replace `(ctpop (not x))` with `(sub nuw nsw BitWidth(x), (ctpop x))`.
844// This is only useful is the new subtract can fold so we only handle the
845// following cases:
846// 1) (add/sub/disjoint_or C, (ctpop (not x))
847// -> (add/sub/disjoint_or C', (ctpop x))
848// 1) (cmp pred C, (ctpop (not x))
849// -> (cmp pred C', (ctpop x))
851 unsigned Opc = I->getOpcode();
852 unsigned ConstIdx = 1;
853 switch (Opc) {
854 default:
855 return nullptr;
856 // (ctpop (not x)) <-> (sub nuw nsw BitWidth(x) - (ctpop x))
857 // We can fold the BitWidth(x) with add/sub/icmp as long the other operand
858 // is constant.
859 case Instruction::Sub:
860 ConstIdx = 0;
861 break;
862 case Instruction::ICmp:
863 // Signed predicates aren't correct in some edge cases like for i2 types, as
864 // well since (ctpop x) is known [0, log2(BitWidth(x))] almost all signed
865 // comparisons against it are simplfied to unsigned.
866 if (cast<ICmpInst>(I)->isSigned())
867 return nullptr;
868 break;
869 case Instruction::Or:
870 if (!match(I, m_DisjointOr(m_Value(), m_Value())))
871 return nullptr;
872 [[fallthrough]];
873 case Instruction::Add:
874 break;
875 }
876
877 Value *Op;
878 // Find ctpop.
879 if (!match(I->getOperand(1 - ConstIdx),
881 return nullptr;
882
883 Constant *C;
884 // Check other operand is ImmConstant.
885 if (!match(I->getOperand(ConstIdx), m_ImmConstant(C)))
886 return nullptr;
887
888 Type *Ty = Op->getType();
889 Constant *BitWidthC = ConstantInt::get(Ty, Ty->getScalarSizeInBits());
890 // Need extra check for icmp. Note if this check is true, it generally means
891 // the icmp will simplify to true/false.
892 if (Opc == Instruction::ICmp && !cast<ICmpInst>(I)->isEquality()) {
893 Constant *Cmp =
895 if (!Cmp || !Cmp->isNullValue())
896 return nullptr;
897 }
898
899 // Check we can invert `(not x)` for free.
900 bool Consumes = false;
901 if (!isFreeToInvert(Op, Op->hasOneUse(), Consumes) || !Consumes)
902 return nullptr;
903 Value *NotOp = getFreelyInverted(Op, Op->hasOneUse(), &Builder);
904 assert(NotOp != nullptr &&
905 "Desync between isFreeToInvert and getFreelyInverted");
906
907 Value *CtpopOfNotOp = Builder.CreateIntrinsic(Ty, Intrinsic::ctpop, NotOp);
908
909 Value *R = nullptr;
910
911 // Do the transformation here to avoid potentially introducing an infinite
912 // loop.
913 switch (Opc) {
914 case Instruction::Sub:
915 R = Builder.CreateAdd(CtpopOfNotOp, ConstantExpr::getSub(C, BitWidthC));
916 break;
917 case Instruction::Or:
918 case Instruction::Add:
919 R = Builder.CreateSub(ConstantExpr::getAdd(C, BitWidthC), CtpopOfNotOp);
920 break;
921 case Instruction::ICmp:
922 R = Builder.CreateICmp(cast<ICmpInst>(I)->getSwappedPredicate(),
923 CtpopOfNotOp, ConstantExpr::getSub(BitWidthC, C));
924 break;
925 default:
926 llvm_unreachable("Unhandled Opcode");
927 }
928 assert(R != nullptr);
929 return replaceInstUsesWith(*I, R);
930}
931
932// (Binop1 (Binop2 (logic_shift X, C), C1), (logic_shift Y, C))
933// IFF
934// 1) the logic_shifts match
935// 2) either both binops are binops and one is `and` or
936// BinOp1 is `and`
937// (logic_shift (inv_logic_shift C1, C), C) == C1 or
938//
939// -> (logic_shift (Binop1 (Binop2 X, inv_logic_shift(C1, C)), Y), C)
940//
941// (Binop1 (Binop2 (logic_shift X, Amt), Mask), (logic_shift Y, Amt))
942// IFF
943// 1) the logic_shifts match
944// 2) BinOp1 == BinOp2 (if BinOp == `add`, then also requires `shl`).
945//
946// -> (BinOp (logic_shift (BinOp X, Y)), Mask)
947//
948// (Binop1 (Binop2 (arithmetic_shift X, Amt), Mask), (arithmetic_shift Y, Amt))
949// IFF
950// 1) Binop1 is bitwise logical operator `and`, `or` or `xor`
951// 2) Binop2 is `not`
952//
953// -> (arithmetic_shift Binop1((not X), Y), Amt)
954
956 const DataLayout &DL = I.getDataLayout();
957 auto IsValidBinOpc = [](unsigned Opc) {
958 switch (Opc) {
959 default:
960 return false;
961 case Instruction::And:
962 case Instruction::Or:
963 case Instruction::Xor:
964 case Instruction::Add:
965 // Skip Sub as we only match constant masks which will canonicalize to use
966 // add.
967 return true;
968 }
969 };
970
971 // Check if we can distribute binop arbitrarily. `add` + `lshr` has extra
972 // constraints.
973 auto IsCompletelyDistributable = [](unsigned BinOpc1, unsigned BinOpc2,
974 unsigned ShOpc) {
975 assert(ShOpc != Instruction::AShr);
976 return (BinOpc1 != Instruction::Add && BinOpc2 != Instruction::Add) ||
977 ShOpc == Instruction::Shl;
978 };
979
980 auto GetInvShift = [](unsigned ShOpc) {
981 assert(ShOpc != Instruction::AShr);
982 return ShOpc == Instruction::LShr ? Instruction::Shl : Instruction::LShr;
983 };
984
985 auto CanDistributeBinops = [&](unsigned BinOpc1, unsigned BinOpc2,
986 unsigned ShOpc, Constant *CMask,
987 Constant *CShift) {
988 // If the BinOp1 is `and` we don't need to check the mask.
989 if (BinOpc1 == Instruction::And)
990 return true;
991
992 // For all other possible transfers we need complete distributable
993 // binop/shift (anything but `add` + `lshr`).
994 if (!IsCompletelyDistributable(BinOpc1, BinOpc2, ShOpc))
995 return false;
996
997 // If BinOp2 is `and`, any mask works (this only really helps for non-splat
998 // vecs, otherwise the mask will be simplified and the following check will
999 // handle it).
1000 if (BinOpc2 == Instruction::And)
1001 return true;
1002
1003 // Otherwise, need mask that meets the below requirement.
1004 // (logic_shift (inv_logic_shift Mask, ShAmt), ShAmt) == Mask
1005 Constant *MaskInvShift =
1006 ConstantFoldBinaryOpOperands(GetInvShift(ShOpc), CMask, CShift, DL);
1007 return ConstantFoldBinaryOpOperands(ShOpc, MaskInvShift, CShift, DL) ==
1008 CMask;
1009 };
1010
1011 auto MatchBinOp = [&](unsigned ShOpnum) -> Instruction * {
1012 Constant *CMask, *CShift;
1013 Value *X, *Y, *ShiftedX, *Mask, *Shift;
1014 if (!match(I.getOperand(ShOpnum),
1015 m_OneUse(m_Shift(m_Value(Y), m_Value(Shift)))))
1016 return nullptr;
1017 if (!match(I.getOperand(1 - ShOpnum),
1019 m_OneUse(m_Shift(m_Value(X), m_Specific(Shift))),
1020 m_Value(ShiftedX)),
1021 m_Value(Mask))))
1022 return nullptr;
1023 // Make sure we are matching instruction shifts and not ConstantExpr
1024 auto *IY = dyn_cast<Instruction>(I.getOperand(ShOpnum));
1025 auto *IX = dyn_cast<Instruction>(ShiftedX);
1026 if (!IY || !IX)
1027 return nullptr;
1028
1029 // LHS and RHS need same shift opcode
1030 unsigned ShOpc = IY->getOpcode();
1031 if (ShOpc != IX->getOpcode())
1032 return nullptr;
1033
1034 // Make sure binop is real instruction and not ConstantExpr
1035 auto *BO2 = dyn_cast<Instruction>(I.getOperand(1 - ShOpnum));
1036 if (!BO2)
1037 return nullptr;
1038
1039 unsigned BinOpc = BO2->getOpcode();
1040 // Make sure we have valid binops.
1041 if (!IsValidBinOpc(I.getOpcode()) || !IsValidBinOpc(BinOpc))
1042 return nullptr;
1043
1044 if (ShOpc == Instruction::AShr) {
1045 if (Instruction::isBitwiseLogicOp(I.getOpcode()) &&
1046 BinOpc == Instruction::Xor && match(Mask, m_AllOnes())) {
1047 Value *NotX = Builder.CreateNot(X);
1048 Value *NewBinOp = Builder.CreateBinOp(I.getOpcode(), Y, NotX);
1050 static_cast<Instruction::BinaryOps>(ShOpc), NewBinOp, Shift);
1051 }
1052
1053 return nullptr;
1054 }
1055
1056 // If BinOp1 == BinOp2 and it's bitwise or shl with add, then just
1057 // distribute to drop the shift irrelevant of constants.
1058 if (BinOpc == I.getOpcode() &&
1059 IsCompletelyDistributable(I.getOpcode(), BinOpc, ShOpc)) {
1060 Value *NewBinOp2 = Builder.CreateBinOp(I.getOpcode(), X, Y);
1061 Value *NewBinOp1 = Builder.CreateBinOp(
1062 static_cast<Instruction::BinaryOps>(ShOpc), NewBinOp2, Shift);
1063 return BinaryOperator::Create(I.getOpcode(), NewBinOp1, Mask);
1064 }
1065
1066 // Otherwise we can only distribute by constant shifting the mask, so
1067 // ensure we have constants.
1068 if (!match(Shift, m_ImmConstant(CShift)))
1069 return nullptr;
1070 if (!match(Mask, m_ImmConstant(CMask)))
1071 return nullptr;
1072
1073 // Check if we can distribute the binops.
1074 if (!CanDistributeBinops(I.getOpcode(), BinOpc, ShOpc, CMask, CShift))
1075 return nullptr;
1076
1077 Constant *NewCMask =
1078 ConstantFoldBinaryOpOperands(GetInvShift(ShOpc), CMask, CShift, DL);
1079 Value *NewBinOp2 = Builder.CreateBinOp(
1080 static_cast<Instruction::BinaryOps>(BinOpc), X, NewCMask);
1081 Value *NewBinOp1 = Builder.CreateBinOp(I.getOpcode(), Y, NewBinOp2);
1082 return BinaryOperator::Create(static_cast<Instruction::BinaryOps>(ShOpc),
1083 NewBinOp1, CShift);
1084 };
1085
1086 if (Instruction *R = MatchBinOp(0))
1087 return R;
1088 return MatchBinOp(1);
1089}
1090
1091// (Binop (zext C), (select C, T, F))
1092// -> (select C, (binop 1, T), (binop 0, F))
1093//
1094// (Binop (sext C), (select C, T, F))
1095// -> (select C, (binop -1, T), (binop 0, F))
1096//
1097// Attempt to simplify binary operations into a select with folded args, when
1098// one operand of the binop is a select instruction and the other operand is a
1099// zext/sext extension, whose value is the select condition.
1102 // TODO: this simplification may be extended to any speculatable instruction,
1103 // not just binops, and would possibly be handled better in FoldOpIntoSelect.
1104 Instruction::BinaryOps Opc = I.getOpcode();
1105 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1106 Value *A, *CondVal, *TrueVal, *FalseVal;
1107 Value *CastOp;
1108 Constant *CastTrueVal, *CastFalseVal;
1109
1110 auto MatchSelectAndCast = [&](Value *CastOp, Value *SelectOp) {
1111 return match(CastOp, m_SelectLike(m_Value(A), m_Constant(CastTrueVal),
1112 m_Constant(CastFalseVal))) &&
1113 match(SelectOp, m_Select(m_Value(CondVal), m_Value(TrueVal),
1114 m_Value(FalseVal)));
1115 };
1116
1117 // Make sure one side of the binop is a select instruction, and the other is a
1118 // zero/sign extension operating on a i1.
1119 if (MatchSelectAndCast(LHS, RHS))
1120 CastOp = LHS;
1121 else if (MatchSelectAndCast(RHS, LHS))
1122 CastOp = RHS;
1123 else
1124 return nullptr;
1125
1127 ? nullptr
1128 : cast<SelectInst>(CastOp == LHS ? RHS : LHS);
1129
1130 auto NewFoldedConst = [&](bool IsTrueArm, Value *V) {
1131 bool IsCastOpRHS = (CastOp == RHS);
1132 Value *CastVal = IsTrueArm ? CastFalseVal : CastTrueVal;
1133
1134 return IsCastOpRHS ? Builder.CreateBinOp(Opc, V, CastVal)
1135 : Builder.CreateBinOp(Opc, CastVal, V);
1136 };
1137
1138 // If the value used in the zext/sext is the select condition, or the negated
1139 // of the select condition, the binop can be simplified.
1140 if (CondVal == A) {
1141 Value *NewTrueVal = NewFoldedConst(false, TrueVal);
1142 return SelectInst::Create(CondVal, NewTrueVal,
1143 NewFoldedConst(true, FalseVal), "", nullptr, SI);
1144 }
1145 if (match(A, m_Not(m_Specific(CondVal)))) {
1146 Value *NewTrueVal = NewFoldedConst(true, TrueVal);
1147 return SelectInst::Create(CondVal, NewTrueVal,
1148 NewFoldedConst(false, FalseVal), "", nullptr, SI);
1149 }
1150
1151 return nullptr;
1152}
1153
1155 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1158 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
1159 Value *A, *B, *C, *D;
1160 Instruction::BinaryOps LHSOpcode, RHSOpcode;
1161
1162 if (Op0)
1163 LHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op0, A, B, Op1);
1164 if (Op1)
1165 RHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op1, C, D, Op0);
1166
1167 // The instruction has the form "(A op' B) op (C op' D)". Try to factorize
1168 // a common term.
1169 if (Op0 && Op1 && LHSOpcode == RHSOpcode)
1170 if (Value *V = tryFactorization(I, SQ, Builder, LHSOpcode, A, B, C, D))
1171 return V;
1172
1173 // The instruction has the form "(A op' B) op (C)". Try to factorize common
1174 // term.
1175 if (Op0)
1176 if (Value *Ident = getIdentityValue(LHSOpcode, RHS))
1177 if (Value *V =
1178 tryFactorization(I, SQ, Builder, LHSOpcode, A, B, RHS, Ident))
1179 return V;
1180
1181 // The instruction has the form "(B) op (C op' D)". Try to factorize common
1182 // term.
1183 if (Op1)
1184 if (Value *Ident = getIdentityValue(RHSOpcode, LHS))
1185 if (Value *V =
1186 tryFactorization(I, SQ, Builder, RHSOpcode, LHS, Ident, C, D))
1187 return V;
1188
1189 return nullptr;
1190}
1191
1192/// This tries to simplify binary operations which some other binary operation
1193/// distributes over either by factorizing out common terms
1194/// (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this results in
1195/// simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is a win).
1196/// Returns the simplified value, or null if it didn't simplify.
1198 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1201 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
1202
1203 // Factorization.
1204 if (Value *R = tryFactorizationFolds(I))
1205 return R;
1206
1207 // Expansion.
1208 if (Op0 && rightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) {
1209 // The instruction has the form "(A op' B) op C". See if expanding it out
1210 // to "(A op C) op' (B op C)" results in simplifications.
1211 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
1212 Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
1213
1214 // Disable the use of undef because it's not safe to distribute undef.
1215 auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
1216 Value *L = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
1217 Value *R = simplifyBinOp(TopLevelOpcode, B, C, SQDistributive);
1218
1219 // Do "A op C" and "B op C" both simplify?
1220 if (L && R) {
1221 // They do! Return "L op' R".
1222 ++NumExpand;
1223 C = Builder.CreateBinOp(InnerOpcode, L, R);
1224 C->takeName(&I);
1225 return C;
1226 }
1227
1228 // Does "A op C" simplify to the identity value for the inner opcode?
1229 if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
1230 // They do! Return "B op C".
1231 ++NumExpand;
1232 C = Builder.CreateBinOp(TopLevelOpcode, B, C);
1233 C->takeName(&I);
1234 return C;
1235 }
1236
1237 // Does "B op C" simplify to the identity value for the inner opcode?
1238 if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
1239 // They do! Return "A op C".
1240 ++NumExpand;
1241 C = Builder.CreateBinOp(TopLevelOpcode, A, C);
1242 C->takeName(&I);
1243 return C;
1244 }
1245 }
1246
1247 if (Op1 && leftDistributesOverRight(TopLevelOpcode, Op1->getOpcode())) {
1248 // The instruction has the form "A op (B op' C)". See if expanding it out
1249 // to "(A op B) op' (A op C)" results in simplifications.
1250 Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
1251 Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op'
1252
1253 // Disable the use of undef because it's not safe to distribute undef.
1254 auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
1255 Value *L = simplifyBinOp(TopLevelOpcode, A, B, SQDistributive);
1256 Value *R = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
1257
1258 // Do "A op B" and "A op C" both simplify?
1259 if (L && R) {
1260 // They do! Return "L op' R".
1261 ++NumExpand;
1262 A = Builder.CreateBinOp(InnerOpcode, L, R);
1263 A->takeName(&I);
1264 return A;
1265 }
1266
1267 // Does "A op B" simplify to the identity value for the inner opcode?
1268 if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
1269 // They do! Return "A op C".
1270 ++NumExpand;
1271 A = Builder.CreateBinOp(TopLevelOpcode, A, C);
1272 A->takeName(&I);
1273 return A;
1274 }
1275
1276 // Does "A op C" simplify to the identity value for the inner opcode?
1277 if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
1278 // They do! Return "A op B".
1279 ++NumExpand;
1280 A = Builder.CreateBinOp(TopLevelOpcode, A, B);
1281 A->takeName(&I);
1282 return A;
1283 }
1284 }
1285
1286 return SimplifySelectsFeedingBinaryOp(I, LHS, RHS);
1287}
1288
1289static std::optional<std::pair<Value *, Value *>>
1291 if (LHS->getParent() != RHS->getParent())
1292 return std::nullopt;
1293
1294 if (LHS->getNumIncomingValues() < 2)
1295 return std::nullopt;
1296
1297 if (!equal(LHS->blocks(), RHS->blocks()))
1298 return std::nullopt;
1299
1300 Value *L0 = LHS->getIncomingValue(0);
1301 Value *R0 = RHS->getIncomingValue(0);
1302
1303 for (unsigned I = 1, E = LHS->getNumIncomingValues(); I != E; ++I) {
1304 Value *L1 = LHS->getIncomingValue(I);
1305 Value *R1 = RHS->getIncomingValue(I);
1306
1307 if ((L0 == L1 && R0 == R1) || (L0 == R1 && R0 == L1))
1308 continue;
1309
1310 return std::nullopt;
1311 }
1312
1313 return std::optional(std::pair(L0, R0));
1314}
1315
1316std::optional<std::pair<Value *, Value *>>
1317InstCombinerImpl::matchSymmetricPair(Value *LHS, Value *RHS) {
1320 if (!LHSInst || !RHSInst || LHSInst->getOpcode() != RHSInst->getOpcode())
1321 return std::nullopt;
1322 switch (LHSInst->getOpcode()) {
1323 case Instruction::PHI:
1325 case Instruction::Select: {
1326 Value *Cond = LHSInst->getOperand(0);
1327 Value *TrueVal = LHSInst->getOperand(1);
1328 Value *FalseVal = LHSInst->getOperand(2);
1329 if (Cond == RHSInst->getOperand(0) && TrueVal == RHSInst->getOperand(2) &&
1330 FalseVal == RHSInst->getOperand(1))
1331 return std::pair(TrueVal, FalseVal);
1332 return std::nullopt;
1333 }
1334 case Instruction::Call: {
1335 // Match min(a, b) and max(a, b)
1336 MinMaxIntrinsic *LHSMinMax = dyn_cast<MinMaxIntrinsic>(LHSInst);
1337 MinMaxIntrinsic *RHSMinMax = dyn_cast<MinMaxIntrinsic>(RHSInst);
1338 if (LHSMinMax && RHSMinMax &&
1339 LHSMinMax->getPredicate() ==
1341 ((LHSMinMax->getLHS() == RHSMinMax->getLHS() &&
1342 LHSMinMax->getRHS() == RHSMinMax->getRHS()) ||
1343 (LHSMinMax->getLHS() == RHSMinMax->getRHS() &&
1344 LHSMinMax->getRHS() == RHSMinMax->getLHS())))
1345 return std::pair(LHSMinMax->getLHS(), LHSMinMax->getRHS());
1346 return std::nullopt;
1347 }
1348 default:
1349 return std::nullopt;
1350 }
1351}
1352
1354 Value *LHS,
1355 Value *RHS) {
1356 Value *A, *B, *C, *D, *E, *F;
1357 bool LHSIsSelect = match(LHS, m_Select(m_Value(A), m_Value(B), m_Value(C)));
1358 bool RHSIsSelect = match(RHS, m_Select(m_Value(D), m_Value(E), m_Value(F)));
1359 if (!LHSIsSelect && !RHSIsSelect)
1360 return nullptr;
1361
1363 ? nullptr
1364 : cast<SelectInst>(LHSIsSelect ? LHS : RHS);
1365
1366 FastMathFlags FMF;
1368 if (const auto *FPOp = dyn_cast<FPMathOperator>(&I)) {
1369 FMF = FPOp->getFastMathFlags();
1370 Builder.setFastMathFlags(FMF);
1371 }
1372
1373 Instruction::BinaryOps Opcode = I.getOpcode();
1374 SimplifyQuery Q = SQ.getWithInstruction(&I);
1375
1376 Value *Cond, *True = nullptr, *False = nullptr;
1377
1378 // Special-case for add/negate combination. Replace the zero in the negation
1379 // with the trailing add operand:
1380 // (Cond ? TVal : -N) + Z --> Cond ? True : (Z - N)
1381 // (Cond ? -N : FVal) + Z --> Cond ? (Z - N) : False
1382 auto foldAddNegate = [&](Value *TVal, Value *FVal, Value *Z) -> Value * {
1383 // We need an 'add' and exactly 1 arm of the select to have been simplified.
1384 if (Opcode != Instruction::Add || (!True && !False) || (True && False))
1385 return nullptr;
1386 Value *N;
1387 if (True && match(FVal, m_Neg(m_Value(N)))) {
1388 Value *Sub = Builder.CreateSub(Z, N);
1389 return Builder.CreateSelect(Cond, True, Sub, I.getName(), SI);
1390 }
1391 if (False && match(TVal, m_Neg(m_Value(N)))) {
1392 Value *Sub = Builder.CreateSub(Z, N);
1393 return Builder.CreateSelect(Cond, Sub, False, I.getName(), SI);
1394 }
1395 return nullptr;
1396 };
1397
1398 if (LHSIsSelect && RHSIsSelect && A == D) {
1399 // (A ? B : C) op (A ? E : F) -> A ? (B op E) : (C op F)
1400 Cond = A;
1401 True = simplifyBinOp(Opcode, B, E, FMF, Q);
1402 False = simplifyBinOp(Opcode, C, F, FMF, Q);
1403
1404 if (LHS->hasOneUse() && RHS->hasOneUse()) {
1405 if (False && !True)
1406 True = Builder.CreateBinOp(Opcode, B, E);
1407 else if (True && !False)
1408 False = Builder.CreateBinOp(Opcode, C, F);
1409 }
1410 } else if (LHSIsSelect && LHS->hasOneUse()) {
1411 // (A ? B : C) op Y -> A ? (B op Y) : (C op Y)
1412 Cond = A;
1413 True = simplifyBinOp(Opcode, B, RHS, FMF, Q);
1414 False = simplifyBinOp(Opcode, C, RHS, FMF, Q);
1415 if (Value *NewSel = foldAddNegate(B, C, RHS))
1416 return NewSel;
1417 } else if (RHSIsSelect && RHS->hasOneUse()) {
1418 // X op (D ? E : F) -> D ? (X op E) : (X op F)
1419 Cond = D;
1420 True = simplifyBinOp(Opcode, LHS, E, FMF, Q);
1421 False = simplifyBinOp(Opcode, LHS, F, FMF, Q);
1422 if (Value *NewSel = foldAddNegate(E, F, LHS))
1423 return NewSel;
1424 }
1425
1426 if (!True || !False)
1427 return nullptr;
1428
1429 Value *NewSI = Builder.CreateSelect(Cond, True, False, I.getName(), SI);
1430 NewSI->takeName(&I);
1431 return NewSI;
1432}
1433
1434/// Freely adapt every user of V as-if V was changed to !V.
1435/// WARNING: only if canFreelyInvertAllUsersOf() said this can be done.
1437 assert(!isa<Constant>(I) && "Shouldn't invert users of constant");
1438 for (User *U : make_early_inc_range(I->users())) {
1439 if (U == IgnoredUser)
1440 continue; // Don't consider this user.
1441 switch (cast<Instruction>(U)->getOpcode()) {
1442 case Instruction::Select: {
1443 auto *SI = cast<SelectInst>(U);
1444 SI->swapValues();
1445 SI->swapProfMetadata();
1446 break;
1447 }
1448 case Instruction::Br: {
1450 BI->swapSuccessors(); // swaps prof metadata too
1451 if (BPI)
1452 BPI->swapSuccEdgesProbabilities(BI->getParent());
1453 break;
1454 }
1455 case Instruction::Xor:
1457 // Add to worklist for DCE.
1459 break;
1460 default:
1461 llvm_unreachable("Got unexpected user - out of sync with "
1462 "canFreelyInvertAllUsersOf() ?");
1463 }
1464 }
1465
1466 // Update pre-existing debug value uses.
1467 SmallVector<DbgVariableRecord *, 4> DbgVariableRecords;
1468 llvm::findDbgValues(I, DbgVariableRecords);
1469
1470 for (DbgVariableRecord *DbgVal : DbgVariableRecords) {
1471 SmallVector<uint64_t, 1> Ops = {dwarf::DW_OP_not};
1472 for (unsigned Idx = 0, End = DbgVal->getNumVariableLocationOps();
1473 Idx != End; ++Idx)
1474 if (DbgVal->getVariableLocationOp(Idx) == I)
1475 DbgVal->setExpression(
1476 DIExpression::appendOpsToArg(DbgVal->getExpression(), Ops, Idx));
1477 }
1478}
1479
1480/// Given a 'sub' instruction, return the RHS of the instruction if the LHS is a
1481/// constant zero (which is the 'negate' form).
1482Value *InstCombinerImpl::dyn_castNegVal(Value *V) const {
1483 Value *NegV;
1484 if (match(V, m_Neg(m_Value(NegV))))
1485 return NegV;
1486
1487 // Constants can be considered to be negated values if they can be folded.
1489 return ConstantExpr::getNeg(C);
1490
1492 if (C->getType()->getElementType()->isIntegerTy())
1493 return ConstantExpr::getNeg(C);
1494
1496 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1497 Constant *Elt = CV->getAggregateElement(i);
1498 if (!Elt)
1499 return nullptr;
1500
1501 if (isa<UndefValue>(Elt))
1502 continue;
1503
1504 if (!isa<ConstantInt>(Elt))
1505 return nullptr;
1506 }
1507 return ConstantExpr::getNeg(CV);
1508 }
1509
1510 // Negate integer vector splats.
1511 if (auto *CV = dyn_cast<Constant>(V))
1512 if (CV->getType()->isVectorTy() &&
1513 CV->getType()->getScalarType()->isIntegerTy() && CV->getSplatValue())
1514 return ConstantExpr::getNeg(CV);
1515
1516 return nullptr;
1517}
1518
1519// Try to fold:
1520// 1) (fp_binop ({s|u}itofp x), ({s|u}itofp y))
1521// -> ({s|u}itofp (int_binop x, y))
1522// 2) (fp_binop ({s|u}itofp x), FpC)
1523// -> ({s|u}itofp (int_binop x, (fpto{s|u}i FpC)))
1524//
1525// Assuming the sign of the cast for x/y is `OpsFromSigned`.
1526Instruction *InstCombinerImpl::foldFBinOpOfIntCastsFromSign(
1527 BinaryOperator &BO, bool OpsFromSigned, std::array<Value *, 2> IntOps,
1529
1530 Type *FPTy = BO.getType();
1531 Type *IntTy = IntOps[0]->getType();
1532
1533 unsigned IntSz = IntTy->getScalarSizeInBits();
1534 // This is the maximum number of inuse bits by the integer where the int -> fp
1535 // casts are exact.
1536 unsigned MaxRepresentableBits =
1538
1539 // Preserve known number of leading bits. This can allow us to trivial nsw/nuw
1540 // checks later on.
1541 unsigned NumUsedLeadingBits[2] = {IntSz, IntSz};
1542
1543 // NB: This only comes up if OpsFromSigned is true, so there is no need to
1544 // cache if between calls to `foldFBinOpOfIntCastsFromSign`.
1545 auto IsNonZero = [&](unsigned OpNo) -> bool {
1546 if (OpsKnown[OpNo].hasKnownBits() &&
1547 OpsKnown[OpNo].getKnownBits(SQ).isNonZero())
1548 return true;
1549 return isKnownNonZero(IntOps[OpNo], SQ);
1550 };
1551
1552 auto IsNonNeg = [&](unsigned OpNo) -> bool {
1553 // NB: This matches the impl in ValueTracking, we just try to use cached
1554 // knownbits here. If we ever start supporting WithCache for
1555 // `isKnownNonNegative`, change this to an explicit call.
1556 return OpsKnown[OpNo].getKnownBits(SQ).isNonNegative();
1557 };
1558
1559 // Check if we know for certain that ({s|u}itofp op) is exact.
1560 auto IsValidPromotion = [&](unsigned OpNo) -> bool {
1561 // Can we treat this operand as the desired sign?
1562 if (OpsFromSigned != isa<SIToFPInst>(BO.getOperand(OpNo)) &&
1563 !IsNonNeg(OpNo))
1564 return false;
1565
1566 // If fp precision >= bitwidth(op) then its exact.
1567 // NB: This is slightly conservative for `sitofp`. For signed conversion, we
1568 // can handle `MaxRepresentableBits == IntSz - 1` as the sign bit will be
1569 // handled specially. We can't, however, increase the bound arbitrarily for
1570 // `sitofp` as for larger sizes, it won't sign extend.
1571 if (MaxRepresentableBits < IntSz) {
1572 // Otherwise if its signed cast check that fp precisions >= bitwidth(op) -
1573 // numSignBits(op).
1574 // TODO: If we add support for `WithCache` in `ComputeNumSignBits`, change
1575 // `IntOps[OpNo]` arguments to `KnownOps[OpNo]`.
1576 if (OpsFromSigned)
1577 NumUsedLeadingBits[OpNo] = IntSz - ComputeNumSignBits(IntOps[OpNo]);
1578 // Finally for unsigned check that fp precision >= bitwidth(op) -
1579 // numLeadingZeros(op).
1580 else {
1581 NumUsedLeadingBits[OpNo] =
1582 IntSz - OpsKnown[OpNo].getKnownBits(SQ).countMinLeadingZeros();
1583 }
1584 }
1585 // NB: We could also check if op is known to be a power of 2 or zero (which
1586 // will always be representable). Its unlikely, however, that is we are
1587 // unable to bound op in any way we will be able to pass the overflow checks
1588 // later on.
1589
1590 if (MaxRepresentableBits < NumUsedLeadingBits[OpNo])
1591 return false;
1592 // Signed + Mul also requires that op is non-zero to avoid -0 cases.
1593 return !OpsFromSigned || BO.getOpcode() != Instruction::FMul ||
1594 IsNonZero(OpNo);
1595 };
1596
1597 // If we have a constant rhs, see if we can losslessly convert it to an int.
1598 if (Op1FpC != nullptr) {
1599 // Signed + Mul req non-zero
1600 if (OpsFromSigned && BO.getOpcode() == Instruction::FMul &&
1601 !match(Op1FpC, m_NonZeroFP()))
1602 return nullptr;
1603
1605 OpsFromSigned ? Instruction::FPToSI : Instruction::FPToUI, Op1FpC,
1606 IntTy, DL);
1607 if (Op1IntC == nullptr)
1608 return nullptr;
1609 if (ConstantFoldCastOperand(OpsFromSigned ? Instruction::SIToFP
1610 : Instruction::UIToFP,
1611 Op1IntC, FPTy, DL) != Op1FpC)
1612 return nullptr;
1613
1614 // First try to keep sign of cast the same.
1615 IntOps[1] = Op1IntC;
1616 }
1617
1618 // Ensure lhs/rhs integer types match.
1619 if (IntTy != IntOps[1]->getType())
1620 return nullptr;
1621
1622 if (Op1FpC == nullptr) {
1623 if (!IsValidPromotion(1))
1624 return nullptr;
1625 }
1626 if (!IsValidPromotion(0))
1627 return nullptr;
1628
1629 // Final we check if the integer version of the binop will not overflow.
1631 // Because of the precision check, we can often rule out overflows.
1632 bool NeedsOverflowCheck = true;
1633 // Try to conservatively rule out overflow based on the already done precision
1634 // checks.
1635 unsigned OverflowMaxOutputBits = OpsFromSigned ? 2 : 1;
1636 unsigned OverflowMaxCurBits =
1637 std::max(NumUsedLeadingBits[0], NumUsedLeadingBits[1]);
1638 bool OutputSigned = OpsFromSigned;
1639 switch (BO.getOpcode()) {
1640 case Instruction::FAdd:
1641 IntOpc = Instruction::Add;
1642 OverflowMaxOutputBits += OverflowMaxCurBits;
1643 break;
1644 case Instruction::FSub:
1645 IntOpc = Instruction::Sub;
1646 OverflowMaxOutputBits += OverflowMaxCurBits;
1647 break;
1648 case Instruction::FMul:
1649 IntOpc = Instruction::Mul;
1650 OverflowMaxOutputBits += OverflowMaxCurBits * 2;
1651 break;
1652 default:
1653 llvm_unreachable("Unsupported binop");
1654 }
1655 // The precision check may have already ruled out overflow.
1656 if (OverflowMaxOutputBits < IntSz) {
1657 NeedsOverflowCheck = false;
1658 // We can bound unsigned overflow from sub to in range signed value (this is
1659 // what allows us to avoid the overflow check for sub).
1660 if (IntOpc == Instruction::Sub)
1661 OutputSigned = true;
1662 }
1663
1664 // Precision check did not rule out overflow, so need to check.
1665 // TODO: If we add support for `WithCache` in `willNotOverflow`, change
1666 // `IntOps[...]` arguments to `KnownOps[...]`.
1667 if (NeedsOverflowCheck &&
1668 !willNotOverflow(IntOpc, IntOps[0], IntOps[1], BO, OutputSigned))
1669 return nullptr;
1670
1671 Value *IntBinOp = Builder.CreateBinOp(IntOpc, IntOps[0], IntOps[1]);
1672 if (auto *IntBO = dyn_cast<BinaryOperator>(IntBinOp)) {
1673 IntBO->setHasNoSignedWrap(OutputSigned);
1674 IntBO->setHasNoUnsignedWrap(!OutputSigned);
1675 }
1676 if (OutputSigned)
1677 return new SIToFPInst(IntBinOp, FPTy);
1678 return new UIToFPInst(IntBinOp, FPTy);
1679}
1680
1681// Try to fold:
1682// 1) (fp_binop ({s|u}itofp x), ({s|u}itofp y))
1683// -> ({s|u}itofp (int_binop x, y))
1684// 2) (fp_binop ({s|u}itofp x), FpC)
1685// -> ({s|u}itofp (int_binop x, (fpto{s|u}i FpC)))
1686Instruction *InstCombinerImpl::foldFBinOpOfIntCasts(BinaryOperator &BO) {
1687 // Don't perform the fold on vectors, as the integer operation may be much
1688 // more expensive than the float operation in that case.
1689 if (BO.getType()->isVectorTy())
1690 return nullptr;
1691
1692 std::array<Value *, 2> IntOps = {nullptr, nullptr};
1693 Constant *Op1FpC = nullptr;
1694 // Check for:
1695 // 1) (binop ({s|u}itofp x), ({s|u}itofp y))
1696 // 2) (binop ({s|u}itofp x), FpC)
1697 if (!match(BO.getOperand(0), m_SIToFP(m_Value(IntOps[0]))) &&
1698 !match(BO.getOperand(0), m_UIToFP(m_Value(IntOps[0]))))
1699 return nullptr;
1700
1701 if (!match(BO.getOperand(1), m_Constant(Op1FpC)) &&
1702 !match(BO.getOperand(1), m_SIToFP(m_Value(IntOps[1]))) &&
1703 !match(BO.getOperand(1), m_UIToFP(m_Value(IntOps[1]))))
1704 return nullptr;
1705
1706 // Cache KnownBits a bit to potentially save some analysis.
1707 SmallVector<WithCache<const Value *>, 2> OpsKnown = {IntOps[0], IntOps[1]};
1708
1709 // Try treating x/y as coming from both `uitofp` and `sitofp`. There are
1710 // different constraints depending on the sign of the cast.
1711 // NB: `(uitofp nneg X)` == `(sitofp nneg X)`.
1712 if (Instruction *R = foldFBinOpOfIntCastsFromSign(BO, /*OpsFromSigned=*/false,
1713 IntOps, Op1FpC, OpsKnown))
1714 return R;
1715 return foldFBinOpOfIntCastsFromSign(BO, /*OpsFromSigned=*/true, IntOps,
1716 Op1FpC, OpsKnown);
1717}
1718
1719/// A binop with a constant operand and a sign-extended boolean operand may be
1720/// converted into a select of constants by applying the binary operation to
1721/// the constant with the two possible values of the extended boolean (0 or -1).
1722Instruction *InstCombinerImpl::foldBinopOfSextBoolToSelect(BinaryOperator &BO) {
1723 // TODO: Handle non-commutative binop (constant is operand 0).
1724 // TODO: Handle zext.
1725 // TODO: Peek through 'not' of cast.
1726 Value *BO0 = BO.getOperand(0);
1727 Value *BO1 = BO.getOperand(1);
1728 Value *X;
1729 Constant *C;
1730 if (!match(BO0, m_SExt(m_Value(X))) || !match(BO1, m_ImmConstant(C)) ||
1731 !X->getType()->isIntOrIntVectorTy(1))
1732 return nullptr;
1733
1734 // bo (sext i1 X), C --> select X, (bo -1, C), (bo 0, C)
1737 Value *TVal = Builder.CreateBinOp(BO.getOpcode(), Ones, C);
1738 Value *FVal = Builder.CreateBinOp(BO.getOpcode(), Zero, C);
1739 return createSelectInstWithUnknownProfile(X, TVal, FVal);
1740}
1741
1743 bool IsTrueArm) {
1745 for (Value *Op : I.operands()) {
1746 Value *V = nullptr;
1747 if (Op == SI) {
1748 V = IsTrueArm ? SI->getTrueValue() : SI->getFalseValue();
1749 } else if (match(SI->getCondition(),
1752 m_Specific(Op), m_Value(V))) &&
1754 // Pass
1755 } else if (match(Op, m_ZExt(m_Specific(SI->getCondition())))) {
1756 V = IsTrueArm ? ConstantInt::get(Op->getType(), 1)
1757 : ConstantInt::getNullValue(Op->getType());
1758 } else {
1759 V = Op;
1760 }
1761 Ops.push_back(V);
1762 }
1763
1764 return simplifyInstructionWithOperands(&I, Ops, I.getDataLayout());
1765}
1766
1768 Value *NewOp, InstCombiner &IC) {
1769 Instruction *Clone = I.clone();
1770 Clone->replaceUsesOfWith(SI, NewOp);
1772 IC.InsertNewInstBefore(Clone, I.getIterator());
1773 return Clone;
1774}
1775
1777 bool FoldWithMultiUse,
1778 bool SimplifyBothArms) {
1779 // Don't modify shared select instructions unless set FoldWithMultiUse
1780 if (!SI->hasOneUser() && !FoldWithMultiUse)
1781 return nullptr;
1782
1783 Value *TV = SI->getTrueValue();
1784 Value *FV = SI->getFalseValue();
1785
1786 // Bool selects with constant operands can be folded to logical ops.
1787 if (SI->getType()->isIntOrIntVectorTy(1))
1788 return nullptr;
1789
1790 // Avoid breaking min/max reduction pattern,
1791 // which is necessary for vectorization later.
1793 for (Value *IntrinOp : Op.operands())
1794 if (auto *PN = dyn_cast<PHINode>(IntrinOp))
1795 for (Value *PhiOp : PN->operands())
1796 if (PhiOp == &Op)
1797 return nullptr;
1798
1799 // Test if a FCmpInst instruction is used exclusively by a select as
1800 // part of a minimum or maximum operation. If so, refrain from doing
1801 // any other folding. This helps out other analyses which understand
1802 // non-obfuscated minimum and maximum idioms. And in this case, at
1803 // least one of the comparison operands has at least one user besides
1804 // the compare (the select), which would often largely negate the
1805 // benefit of folding anyway.
1806 if (auto *CI = dyn_cast<FCmpInst>(SI->getCondition())) {
1807 if (CI->hasOneUse()) {
1808 Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
1809 if (((TV == Op0 && FV == Op1) || (FV == Op0 && TV == Op1)) &&
1810 !CI->isCommutative())
1811 return nullptr;
1812 }
1813 }
1814
1815 // Make sure that one of the select arms folds successfully.
1816 Value *NewTV = simplifyOperationIntoSelectOperand(Op, SI, /*IsTrueArm=*/true);
1817 Value *NewFV =
1818 simplifyOperationIntoSelectOperand(Op, SI, /*IsTrueArm=*/false);
1819 if (!NewTV && !NewFV)
1820 return nullptr;
1821
1822 if (SimplifyBothArms && !(NewTV && NewFV))
1823 return nullptr;
1824
1825 // Create an instruction for the arm that did not fold.
1826 if (!NewTV)
1827 NewTV = foldOperationIntoSelectOperand(Op, SI, TV, *this);
1828 if (!NewFV)
1829 NewFV = foldOperationIntoSelectOperand(Op, SI, FV, *this);
1830 return SelectInst::Create(SI->getCondition(), NewTV, NewFV, "", nullptr, SI);
1831}
1832
1834 Value *InValue, BasicBlock *InBB,
1835 const DataLayout &DL,
1836 const SimplifyQuery SQ) {
1837 // NB: It is a precondition of this transform that the operands be
1838 // phi translatable!
1840 for (Value *Op : I.operands()) {
1841 if (Op == PN)
1842 Ops.push_back(InValue);
1843 else
1844 Ops.push_back(Op->DoPHITranslation(PN->getParent(), InBB));
1845 }
1846
1847 // Don't consider the simplification successful if we get back a constant
1848 // expression. That's just an instruction in hiding.
1849 // Also reject the case where we simplify back to the phi node. We wouldn't
1850 // be able to remove it in that case.
1852 &I, Ops, SQ.getWithInstruction(InBB->getTerminator()));
1853 if (NewVal && NewVal != PN && !match(NewVal, m_ConstantExpr()))
1854 return NewVal;
1855
1856 // Check if incoming PHI value can be replaced with constant
1857 // based on implied condition.
1858 BranchInst *TerminatorBI = dyn_cast<BranchInst>(InBB->getTerminator());
1859 const ICmpInst *ICmp = dyn_cast<ICmpInst>(&I);
1860 if (TerminatorBI && TerminatorBI->isConditional() &&
1861 TerminatorBI->getSuccessor(0) != TerminatorBI->getSuccessor(1) && ICmp) {
1862 bool LHSIsTrue = TerminatorBI->getSuccessor(0) == PN->getParent();
1863 std::optional<bool> ImpliedCond = isImpliedCondition(
1864 TerminatorBI->getCondition(), ICmp->getCmpPredicate(), Ops[0], Ops[1],
1865 DL, LHSIsTrue);
1866 if (ImpliedCond)
1867 return ConstantInt::getBool(I.getType(), ImpliedCond.value());
1868 }
1869
1870 return nullptr;
1871}
1872
1873/// In some cases it is beneficial to fold a select into a binary operator.
1874/// For example:
1875/// %1 = or %in, 4
1876/// %2 = select %cond, %1, %in
1877/// %3 = or %2, 1
1878/// =>
1879/// %1 = select i1 %cond, 5, 1
1880/// %2 = or %1, %in
1882 assert(Op.isAssociative() && "The operation must be associative!");
1883
1884 SelectInst *SI = dyn_cast<SelectInst>(Op.getOperand(0));
1885
1886 Constant *Const;
1887 if (!SI || !match(Op.getOperand(1), m_ImmConstant(Const)) ||
1888 !Op.hasOneUse() || !SI->hasOneUse())
1889 return nullptr;
1890
1891 Value *TV = SI->getTrueValue();
1892 Value *FV = SI->getFalseValue();
1893 Value *Input, *NewTV, *NewFV;
1894 Constant *Const2;
1895
1896 if (TV->hasOneUse() && match(TV, m_BinOp(Op.getOpcode(), m_Specific(FV),
1897 m_ImmConstant(Const2)))) {
1898 NewTV = ConstantFoldBinaryInstruction(Op.getOpcode(), Const, Const2);
1899 NewFV = Const;
1900 Input = FV;
1901 } else if (FV->hasOneUse() &&
1902 match(FV, m_BinOp(Op.getOpcode(), m_Specific(TV),
1903 m_ImmConstant(Const2)))) {
1904 NewTV = Const;
1905 NewFV = ConstantFoldBinaryInstruction(Op.getOpcode(), Const, Const2);
1906 Input = TV;
1907 } else
1908 return nullptr;
1909
1910 if (!NewTV || !NewFV)
1911 return nullptr;
1912
1913 Value *NewSI =
1914 Builder.CreateSelect(SI->getCondition(), NewTV, NewFV, "",
1915 ProfcheckDisableMetadataFixes ? nullptr : SI);
1916 return BinaryOperator::Create(Op.getOpcode(), NewSI, Input);
1917}
1918
1920 bool AllowMultipleUses) {
1921 unsigned NumPHIValues = PN->getNumIncomingValues();
1922 if (NumPHIValues == 0)
1923 return nullptr;
1924
1925 // We normally only transform phis with a single use. However, if a PHI has
1926 // multiple uses and they are all the same operation, we can fold *all* of the
1927 // uses into the PHI.
1928 bool OneUse = PN->hasOneUse();
1929 bool IdenticalUsers = false;
1930 if (!AllowMultipleUses && !OneUse) {
1931 // Walk the use list for the instruction, comparing them to I.
1932 for (User *U : PN->users()) {
1934 if (UI != &I && !I.isIdenticalTo(UI))
1935 return nullptr;
1936 }
1937 // Otherwise, we can replace *all* users with the new PHI we form.
1938 IdenticalUsers = true;
1939 }
1940
1941 // Check that all operands are phi-translatable.
1942 for (Value *Op : I.operands()) {
1943 if (Op == PN)
1944 continue;
1945
1946 // Non-instructions never require phi-translation.
1947 auto *I = dyn_cast<Instruction>(Op);
1948 if (!I)
1949 continue;
1950
1951 // Phi-translate can handle phi nodes in the same block.
1952 if (isa<PHINode>(I))
1953 if (I->getParent() == PN->getParent())
1954 continue;
1955
1956 // Operand dominates the block, no phi-translation necessary.
1957 if (DT.dominates(I, PN->getParent()))
1958 continue;
1959
1960 // Not phi-translatable, bail out.
1961 return nullptr;
1962 }
1963
1964 // Check to see whether the instruction can be folded into each phi operand.
1965 // If there is one operand that does not fold, remember the BB it is in.
1966 SmallVector<Value *> NewPhiValues;
1967 SmallVector<unsigned int> OpsToMoveUseToIncomingBB;
1968 bool SeenNonSimplifiedInVal = false;
1969 for (unsigned i = 0; i != NumPHIValues; ++i) {
1970 Value *InVal = PN->getIncomingValue(i);
1971 BasicBlock *InBB = PN->getIncomingBlock(i);
1972
1973 if (auto *NewVal = simplifyInstructionWithPHI(I, PN, InVal, InBB, DL, SQ)) {
1974 NewPhiValues.push_back(NewVal);
1975 continue;
1976 }
1977
1978 // Handle some cases that can't be fully simplified, but where we know that
1979 // the two instructions will fold into one.
1980 auto WillFold = [&]() {
1981 if (!InVal->hasUseList() || !InVal->hasOneUser())
1982 return false;
1983
1984 // icmp of ucmp/scmp with constant will fold to icmp.
1985 const APInt *Ignored;
1986 if (isa<CmpIntrinsic>(InVal) &&
1987 match(&I, m_ICmp(m_Specific(PN), m_APInt(Ignored))))
1988 return true;
1989
1990 // icmp eq zext(bool), 0 will fold to !bool.
1991 if (isa<ZExtInst>(InVal) &&
1992 cast<ZExtInst>(InVal)->getSrcTy()->isIntOrIntVectorTy(1) &&
1993 match(&I,
1995 return true;
1996
1997 return false;
1998 };
1999
2000 if (WillFold()) {
2001 OpsToMoveUseToIncomingBB.push_back(i);
2002 NewPhiValues.push_back(nullptr);
2003 continue;
2004 }
2005
2006 if (!OneUse && !IdenticalUsers)
2007 return nullptr;
2008
2009 if (SeenNonSimplifiedInVal)
2010 return nullptr; // More than one non-simplified value.
2011 SeenNonSimplifiedInVal = true;
2012
2013 // If there is exactly one non-simplified value, we can insert a copy of the
2014 // operation in that block. However, if this is a critical edge, we would
2015 // be inserting the computation on some other paths (e.g. inside a loop).
2016 // Only do this if the pred block is unconditionally branching into the phi
2017 // block. Also, make sure that the pred block is not dead code.
2019 if (!BI || !BI->isUnconditional() || !DT.isReachableFromEntry(InBB))
2020 return nullptr;
2021
2022 NewPhiValues.push_back(nullptr);
2023 OpsToMoveUseToIncomingBB.push_back(i);
2024
2025 // Do not push the operation across a loop backedge. This could result in
2026 // an infinite combine loop, and is generally non-profitable (especially
2027 // if the operation was originally outside the loop).
2028 if (isBackEdge(InBB, PN->getParent()))
2029 return nullptr;
2030 }
2031
2032 // Clone the instruction that uses the phi node and move it into the incoming
2033 // BB because we know that the next iteration of InstCombine will simplify it.
2035 for (auto OpIndex : OpsToMoveUseToIncomingBB) {
2037 BasicBlock *OpBB = PN->getIncomingBlock(OpIndex);
2038
2039 Instruction *Clone = Clones.lookup(OpBB);
2040 if (!Clone) {
2041 Clone = I.clone();
2042 for (Use &U : Clone->operands()) {
2043 if (U == PN)
2044 U = Op;
2045 else
2046 U = U->DoPHITranslation(PN->getParent(), OpBB);
2047 }
2048 Clone = InsertNewInstBefore(Clone, OpBB->getTerminator()->getIterator());
2049 Clones.insert({OpBB, Clone});
2050 // We may have speculated the instruction.
2052 }
2053
2054 NewPhiValues[OpIndex] = Clone;
2055 }
2056
2057 // Okay, we can do the transformation: create the new PHI node.
2058 PHINode *NewPN = PHINode::Create(I.getType(), PN->getNumIncomingValues());
2059 InsertNewInstBefore(NewPN, PN->getIterator());
2060 NewPN->takeName(PN);
2061 NewPN->setDebugLoc(PN->getDebugLoc());
2062
2063 for (unsigned i = 0; i != NumPHIValues; ++i)
2064 NewPN->addIncoming(NewPhiValues[i], PN->getIncomingBlock(i));
2065
2066 if (IdenticalUsers) {
2067 // Collect and deduplicate users up-front to avoid iterator invalidation.
2069 for (User *U : PN->users()) {
2071 if (User == &I)
2072 continue;
2073 ToReplace.insert(User);
2074 }
2075 for (Instruction *I : ToReplace) {
2076 replaceInstUsesWith(*I, NewPN);
2078 }
2079 OneUse = true;
2080 }
2081
2082 if (OneUse) {
2083 replaceAllDbgUsesWith(*PN, *NewPN, *PN, DT);
2084 }
2085 return replaceInstUsesWith(I, NewPN);
2086}
2087
2089 if (!BO.isAssociative())
2090 return nullptr;
2091
2092 // Find the interleaved binary ops.
2093 auto Opc = BO.getOpcode();
2094 auto *BO0 = dyn_cast<BinaryOperator>(BO.getOperand(0));
2095 auto *BO1 = dyn_cast<BinaryOperator>(BO.getOperand(1));
2096 if (!BO0 || !BO1 || !BO0->hasNUses(2) || !BO1->hasNUses(2) ||
2097 BO0->getOpcode() != Opc || BO1->getOpcode() != Opc ||
2098 !BO0->isAssociative() || !BO1->isAssociative() ||
2099 BO0->getParent() != BO1->getParent())
2100 return nullptr;
2101
2102 assert(BO.isCommutative() && BO0->isCommutative() && BO1->isCommutative() &&
2103 "Expected commutative instructions!");
2104
2105 // Find the matching phis, forming the recurrences.
2106 PHINode *PN0, *PN1;
2107 Value *Start0, *Step0, *Start1, *Step1;
2108 if (!matchSimpleRecurrence(BO0, PN0, Start0, Step0) || !PN0->hasOneUse() ||
2109 !matchSimpleRecurrence(BO1, PN1, Start1, Step1) || !PN1->hasOneUse() ||
2110 PN0->getParent() != PN1->getParent())
2111 return nullptr;
2112
2113 assert(PN0->getNumIncomingValues() == 2 && PN1->getNumIncomingValues() == 2 &&
2114 "Expected PHIs with two incoming values!");
2115
2116 // Convert the start and step values to constants.
2117 auto *Init0 = dyn_cast<Constant>(Start0);
2118 auto *Init1 = dyn_cast<Constant>(Start1);
2119 auto *C0 = dyn_cast<Constant>(Step0);
2120 auto *C1 = dyn_cast<Constant>(Step1);
2121 if (!Init0 || !Init1 || !C0 || !C1)
2122 return nullptr;
2123
2124 // Fold the recurrence constants.
2125 auto *Init = ConstantFoldBinaryInstruction(Opc, Init0, Init1);
2126 auto *C = ConstantFoldBinaryInstruction(Opc, C0, C1);
2127 if (!Init || !C)
2128 return nullptr;
2129
2130 // Create the reduced PHI.
2131 auto *NewPN = PHINode::Create(PN0->getType(), PN0->getNumIncomingValues(),
2132 "reduced.phi");
2133
2134 // Create the new binary op.
2135 auto *NewBO = BinaryOperator::Create(Opc, NewPN, C);
2136 if (Opc == Instruction::FAdd || Opc == Instruction::FMul) {
2137 // Intersect FMF flags for FADD and FMUL.
2138 FastMathFlags Intersect = BO0->getFastMathFlags() &
2139 BO1->getFastMathFlags() & BO.getFastMathFlags();
2140 NewBO->setFastMathFlags(Intersect);
2141 } else {
2142 OverflowTracking Flags;
2143 Flags.AllKnownNonNegative = false;
2144 Flags.AllKnownNonZero = false;
2145 Flags.mergeFlags(*BO0);
2146 Flags.mergeFlags(*BO1);
2147 Flags.mergeFlags(BO);
2148 Flags.applyFlags(*NewBO);
2149 }
2150 NewBO->takeName(&BO);
2151
2152 for (unsigned I = 0, E = PN0->getNumIncomingValues(); I != E; ++I) {
2153 auto *V = PN0->getIncomingValue(I);
2154 auto *BB = PN0->getIncomingBlock(I);
2155 if (V == Init0) {
2156 assert(((PN1->getIncomingValue(0) == Init1 &&
2157 PN1->getIncomingBlock(0) == BB) ||
2158 (PN1->getIncomingValue(1) == Init1 &&
2159 PN1->getIncomingBlock(1) == BB)) &&
2160 "Invalid incoming block!");
2161 NewPN->addIncoming(Init, BB);
2162 } else if (V == BO0) {
2163 assert(((PN1->getIncomingValue(0) == BO1 &&
2164 PN1->getIncomingBlock(0) == BB) ||
2165 (PN1->getIncomingValue(1) == BO1 &&
2166 PN1->getIncomingBlock(1) == BB)) &&
2167 "Invalid incoming block!");
2168 NewPN->addIncoming(NewBO, BB);
2169 } else
2170 llvm_unreachable("Unexpected incoming value!");
2171 }
2172
2173 LLVM_DEBUG(dbgs() << " Combined " << *PN0 << "\n " << *BO0
2174 << "\n with " << *PN1 << "\n " << *BO1
2175 << '\n');
2176
2177 // Insert the new recurrence and remove the old (dead) ones.
2178 InsertNewInstWith(NewPN, PN0->getIterator());
2179 InsertNewInstWith(NewBO, BO0->getIterator());
2180
2187
2188 return replaceInstUsesWith(BO, NewBO);
2189}
2190
2192 // Attempt to fold binary operators whose operands are simple recurrences.
2193 if (auto *NewBO = foldBinopWithRecurrence(BO))
2194 return NewBO;
2195
2196 // TODO: This should be similar to the incoming values check in foldOpIntoPhi:
2197 // we are guarding against replicating the binop in >1 predecessor.
2198 // This could miss matching a phi with 2 constant incoming values.
2199 auto *Phi0 = dyn_cast<PHINode>(BO.getOperand(0));
2200 auto *Phi1 = dyn_cast<PHINode>(BO.getOperand(1));
2201 if (!Phi0 || !Phi1 || !Phi0->hasOneUse() || !Phi1->hasOneUse() ||
2202 Phi0->getNumOperands() != Phi1->getNumOperands())
2203 return nullptr;
2204
2205 // TODO: Remove the restriction for binop being in the same block as the phis.
2206 if (BO.getParent() != Phi0->getParent() ||
2207 BO.getParent() != Phi1->getParent())
2208 return nullptr;
2209
2210 // Fold if there is at least one specific constant value in phi0 or phi1's
2211 // incoming values that comes from the same block and this specific constant
2212 // value can be used to do optimization for specific binary operator.
2213 // For example:
2214 // %phi0 = phi i32 [0, %bb0], [%i, %bb1]
2215 // %phi1 = phi i32 [%j, %bb0], [0, %bb1]
2216 // %add = add i32 %phi0, %phi1
2217 // ==>
2218 // %add = phi i32 [%j, %bb0], [%i, %bb1]
2220 /*AllowRHSConstant*/ false);
2221 if (C) {
2222 SmallVector<Value *, 4> NewIncomingValues;
2223 auto CanFoldIncomingValuePair = [&](std::tuple<Use &, Use &> T) {
2224 auto &Phi0Use = std::get<0>(T);
2225 auto &Phi1Use = std::get<1>(T);
2226 if (Phi0->getIncomingBlock(Phi0Use) != Phi1->getIncomingBlock(Phi1Use))
2227 return false;
2228 Value *Phi0UseV = Phi0Use.get();
2229 Value *Phi1UseV = Phi1Use.get();
2230 if (Phi0UseV == C)
2231 NewIncomingValues.push_back(Phi1UseV);
2232 else if (Phi1UseV == C)
2233 NewIncomingValues.push_back(Phi0UseV);
2234 else
2235 return false;
2236 return true;
2237 };
2238
2239 if (all_of(zip(Phi0->operands(), Phi1->operands()),
2240 CanFoldIncomingValuePair)) {
2241 PHINode *NewPhi =
2242 PHINode::Create(Phi0->getType(), Phi0->getNumOperands());
2243 assert(NewIncomingValues.size() == Phi0->getNumOperands() &&
2244 "The number of collected incoming values should equal the number "
2245 "of the original PHINode operands!");
2246 for (unsigned I = 0; I < Phi0->getNumOperands(); I++)
2247 NewPhi->addIncoming(NewIncomingValues[I], Phi0->getIncomingBlock(I));
2248 return NewPhi;
2249 }
2250 }
2251
2252 if (Phi0->getNumOperands() != 2 || Phi1->getNumOperands() != 2)
2253 return nullptr;
2254
2255 // Match a pair of incoming constants for one of the predecessor blocks.
2256 BasicBlock *ConstBB, *OtherBB;
2257 Constant *C0, *C1;
2258 if (match(Phi0->getIncomingValue(0), m_ImmConstant(C0))) {
2259 ConstBB = Phi0->getIncomingBlock(0);
2260 OtherBB = Phi0->getIncomingBlock(1);
2261 } else if (match(Phi0->getIncomingValue(1), m_ImmConstant(C0))) {
2262 ConstBB = Phi0->getIncomingBlock(1);
2263 OtherBB = Phi0->getIncomingBlock(0);
2264 } else {
2265 return nullptr;
2266 }
2267 if (!match(Phi1->getIncomingValueForBlock(ConstBB), m_ImmConstant(C1)))
2268 return nullptr;
2269
2270 // The block that we are hoisting to must reach here unconditionally.
2271 // Otherwise, we could be speculatively executing an expensive or
2272 // non-speculative op.
2273 auto *PredBlockBranch = dyn_cast<BranchInst>(OtherBB->getTerminator());
2274 if (!PredBlockBranch || PredBlockBranch->isConditional() ||
2275 !DT.isReachableFromEntry(OtherBB))
2276 return nullptr;
2277
2278 // TODO: This check could be tightened to only apply to binops (div/rem) that
2279 // are not safe to speculatively execute. But that could allow hoisting
2280 // potentially expensive instructions (fdiv for example).
2281 for (auto BBIter = BO.getParent()->begin(); &*BBIter != &BO; ++BBIter)
2283 return nullptr;
2284
2285 // Fold constants for the predecessor block with constant incoming values.
2286 Constant *NewC = ConstantFoldBinaryOpOperands(BO.getOpcode(), C0, C1, DL);
2287 if (!NewC)
2288 return nullptr;
2289
2290 // Make a new binop in the predecessor block with the non-constant incoming
2291 // values.
2292 Builder.SetInsertPoint(PredBlockBranch);
2293 Value *NewBO = Builder.CreateBinOp(BO.getOpcode(),
2294 Phi0->getIncomingValueForBlock(OtherBB),
2295 Phi1->getIncomingValueForBlock(OtherBB));
2296 if (auto *NotFoldedNewBO = dyn_cast<BinaryOperator>(NewBO))
2297 NotFoldedNewBO->copyIRFlags(&BO);
2298
2299 // Replace the binop with a phi of the new values. The old phis are dead.
2300 PHINode *NewPhi = PHINode::Create(BO.getType(), 2);
2301 NewPhi->addIncoming(NewBO, OtherBB);
2302 NewPhi->addIncoming(NewC, ConstBB);
2303 return NewPhi;
2304}
2305
2307 auto TryFoldOperand = [&](unsigned OpIdx,
2308 bool IsOtherParamConst) -> Instruction * {
2309 if (auto *Sel = dyn_cast<SelectInst>(I.getOperand(OpIdx)))
2310 return FoldOpIntoSelect(I, Sel, false, !IsOtherParamConst);
2311 if (auto *PN = dyn_cast<PHINode>(I.getOperand(OpIdx)))
2312 return foldOpIntoPhi(I, PN);
2313 return nullptr;
2314 };
2315
2316 if (Instruction *NewI =
2317 TryFoldOperand(/*OpIdx=*/0, isa<Constant>(I.getOperand(1))))
2318 return NewI;
2319 return TryFoldOperand(/*OpIdx=*/1, isa<Constant>(I.getOperand(0)));
2320}
2321
2323 // If this GEP has only 0 indices, it is the same pointer as
2324 // Src. If Src is not a trivial GEP too, don't combine
2325 // the indices.
2326 if (GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
2327 !Src.hasOneUse())
2328 return false;
2329 return true;
2330}
2331
2332/// Find a constant NewC that has property:
2333/// shuffle(NewC, ShMask) = C
2334/// Returns nullptr if such a constant does not exist e.g. ShMask=<0,0> C=<1,2>
2335///
2336/// A 1-to-1 mapping is not required. Example:
2337/// ShMask = <1,1,2,2> and C = <5,5,6,6> --> NewC = <poison,5,6,poison>
2339 VectorType *NewCTy) {
2340 if (isa<ScalableVectorType>(NewCTy)) {
2341 Constant *Splat = C->getSplatValue();
2342 if (!Splat)
2343 return nullptr;
2345 }
2346
2347 if (cast<FixedVectorType>(NewCTy)->getNumElements() >
2348 cast<FixedVectorType>(C->getType())->getNumElements())
2349 return nullptr;
2350
2351 unsigned NewCNumElts = cast<FixedVectorType>(NewCTy)->getNumElements();
2352 PoisonValue *PoisonScalar = PoisonValue::get(C->getType()->getScalarType());
2353 SmallVector<Constant *, 16> NewVecC(NewCNumElts, PoisonScalar);
2354 unsigned NumElts = cast<FixedVectorType>(C->getType())->getNumElements();
2355 for (unsigned I = 0; I < NumElts; ++I) {
2356 Constant *CElt = C->getAggregateElement(I);
2357 if (ShMask[I] >= 0) {
2358 assert(ShMask[I] < (int)NumElts && "Not expecting narrowing shuffle");
2359 Constant *NewCElt = NewVecC[ShMask[I]];
2360 // Bail out if:
2361 // 1. The constant vector contains a constant expression.
2362 // 2. The shuffle needs an element of the constant vector that can't
2363 // be mapped to a new constant vector.
2364 // 3. This is a widening shuffle that copies elements of V1 into the
2365 // extended elements (extending with poison is allowed).
2366 if (!CElt || (!isa<PoisonValue>(NewCElt) && NewCElt != CElt) ||
2367 I >= NewCNumElts)
2368 return nullptr;
2369 NewVecC[ShMask[I]] = CElt;
2370 }
2371 }
2372 return ConstantVector::get(NewVecC);
2373}
2374
2375// Get the result of `Vector Op Splat` (or Splat Op Vector if \p SplatLHS).
2377 Constant *Splat, bool SplatLHS,
2378 const DataLayout &DL) {
2379 ElementCount EC = cast<VectorType>(Vector->getType())->getElementCount();
2381 Constant *RHS = Vector;
2382 if (!SplatLHS)
2383 std::swap(LHS, RHS);
2384 return ConstantFoldBinaryOpOperands(Opcode, LHS, RHS, DL);
2385}
2386
2387template <Intrinsic::ID SpliceID>
2389 InstCombiner::BuilderTy &Builder) {
2390 Value *LHS = Inst.getOperand(0), *RHS = Inst.getOperand(1);
2391 auto CreateBinOpSplice = [&](Value *X, Value *Y, Value *Offset) {
2392 Value *V = Builder.CreateBinOp(Inst.getOpcode(), X, Y, Inst.getName());
2393 if (auto *BO = dyn_cast<BinaryOperator>(V))
2394 BO->copyIRFlags(&Inst);
2395 Module *M = Inst.getModule();
2396 Function *F = Intrinsic::getOrInsertDeclaration(M, SpliceID, V->getType());
2397 return CallInst::Create(F, {V, PoisonValue::get(V->getType()), Offset});
2398 };
2399 Value *V1, *V2, *Offset;
2400 if (match(LHS,
2402 // Op(splice(V1, poison, offset), splice(V2, poison, offset))
2403 // -> splice(Op(V1, V2), poison, offset)
2405 m_Specific(Offset))) &&
2406 (LHS->hasOneUse() || RHS->hasOneUse() ||
2407 (LHS == RHS && LHS->hasNUses(2))))
2408 return CreateBinOpSplice(V1, V2, Offset);
2409
2410 // Op(splice(V1, poison, offset), RHSSplat)
2411 // -> splice(Op(V1, RHSSplat), poison, offset)
2412 if (LHS->hasOneUse() && isSplatValue(RHS))
2413 return CreateBinOpSplice(V1, RHS, Offset);
2414 }
2415 // Op(LHSSplat, splice(V2, poison, offset))
2416 // -> splice(Op(LHSSplat, V2), poison, offset)
2417 else if (isSplatValue(LHS) &&
2419 m_Value(Offset)))))
2420 return CreateBinOpSplice(LHS, V2, Offset);
2421
2422 // TODO: Fold binops of the form
2423 // Op(splice(poison, V1, offset), splice(poison, V2, offset))
2424 // -> splice(poison, Op(V1, V2), offset)
2425
2426 return nullptr;
2427}
2428
2430 if (!isa<VectorType>(Inst.getType()))
2431 return nullptr;
2432
2433 BinaryOperator::BinaryOps Opcode = Inst.getOpcode();
2434 Value *LHS = Inst.getOperand(0), *RHS = Inst.getOperand(1);
2435 assert(cast<VectorType>(LHS->getType())->getElementCount() ==
2436 cast<VectorType>(Inst.getType())->getElementCount());
2437 assert(cast<VectorType>(RHS->getType())->getElementCount() ==
2438 cast<VectorType>(Inst.getType())->getElementCount());
2439
2440 auto foldConstantsThroughSubVectorInsertSplat =
2441 [&](Value *MaybeSubVector, Value *MaybeSplat,
2442 bool SplatLHS) -> Instruction * {
2443 Value *Idx;
2444 Constant *Splat, *SubVector, *Dest;
2445 if (!match(MaybeSplat, m_ConstantSplat(m_Constant(Splat))) ||
2446 !match(MaybeSubVector,
2447 m_VectorInsert(m_Constant(Dest), m_Constant(SubVector),
2448 m_Value(Idx))))
2449 return nullptr;
2450 SubVector =
2451 constantFoldBinOpWithSplat(Opcode, SubVector, Splat, SplatLHS, DL);
2452 Dest = constantFoldBinOpWithSplat(Opcode, Dest, Splat, SplatLHS, DL);
2453 if (!SubVector || !Dest)
2454 return nullptr;
2455 auto *InsertVector =
2456 Builder.CreateInsertVector(Dest->getType(), Dest, SubVector, Idx);
2457 return replaceInstUsesWith(Inst, InsertVector);
2458 };
2459
2460 // If one operand is a constant splat and the other operand is a
2461 // `vector.insert` where both the destination and subvector are constant,
2462 // apply the operation to both the destination and subvector, returning a new
2463 // constant `vector.insert`. This helps constant folding for scalable vectors.
2464 if (Instruction *Folded = foldConstantsThroughSubVectorInsertSplat(
2465 /*MaybeSubVector=*/LHS, /*MaybeSplat=*/RHS, /*SplatLHS=*/false))
2466 return Folded;
2467 if (Instruction *Folded = foldConstantsThroughSubVectorInsertSplat(
2468 /*MaybeSubVector=*/RHS, /*MaybeSplat=*/LHS, /*SplatLHS=*/true))
2469 return Folded;
2470
2471 // If both operands of the binop are vector concatenations, then perform the
2472 // narrow binop on each pair of the source operands followed by concatenation
2473 // of the results.
2474 Value *L0, *L1, *R0, *R1;
2475 ArrayRef<int> Mask;
2476 if (match(LHS, m_Shuffle(m_Value(L0), m_Value(L1), m_Mask(Mask))) &&
2477 match(RHS, m_Shuffle(m_Value(R0), m_Value(R1), m_SpecificMask(Mask))) &&
2478 LHS->hasOneUse() && RHS->hasOneUse() &&
2479 cast<ShuffleVectorInst>(LHS)->isConcat() &&
2480 cast<ShuffleVectorInst>(RHS)->isConcat()) {
2481 // This transform does not have the speculative execution constraint as
2482 // below because the shuffle is a concatenation. The new binops are
2483 // operating on exactly the same elements as the existing binop.
2484 // TODO: We could ease the mask requirement to allow different undef lanes,
2485 // but that requires an analysis of the binop-with-undef output value.
2486 Value *NewBO0 = Builder.CreateBinOp(Opcode, L0, R0);
2487 if (auto *BO = dyn_cast<BinaryOperator>(NewBO0))
2488 BO->copyIRFlags(&Inst);
2489 Value *NewBO1 = Builder.CreateBinOp(Opcode, L1, R1);
2490 if (auto *BO = dyn_cast<BinaryOperator>(NewBO1))
2491 BO->copyIRFlags(&Inst);
2492 return new ShuffleVectorInst(NewBO0, NewBO1, Mask);
2493 }
2494
2495 auto createBinOpReverse = [&](Value *X, Value *Y) {
2496 Value *V = Builder.CreateBinOp(Opcode, X, Y, Inst.getName());
2497 if (auto *BO = dyn_cast<BinaryOperator>(V))
2498 BO->copyIRFlags(&Inst);
2499 Module *M = Inst.getModule();
2501 M, Intrinsic::vector_reverse, V->getType());
2502 return CallInst::Create(F, V);
2503 };
2504
2505 // NOTE: Reverse shuffles don't require the speculative execution protection
2506 // below because they don't affect which lanes take part in the computation.
2507
2508 Value *V1, *V2;
2509 if (match(LHS, m_VecReverse(m_Value(V1)))) {
2510 // Op(rev(V1), rev(V2)) -> rev(Op(V1, V2))
2511 if (match(RHS, m_VecReverse(m_Value(V2))) &&
2512 (LHS->hasOneUse() || RHS->hasOneUse() ||
2513 (LHS == RHS && LHS->hasNUses(2))))
2514 return createBinOpReverse(V1, V2);
2515
2516 // Op(rev(V1), RHSSplat)) -> rev(Op(V1, RHSSplat))
2517 if (LHS->hasOneUse() && isSplatValue(RHS))
2518 return createBinOpReverse(V1, RHS);
2519 }
2520 // Op(LHSSplat, rev(V2)) -> rev(Op(LHSSplat, V2))
2521 else if (isSplatValue(LHS) && match(RHS, m_OneUse(m_VecReverse(m_Value(V2)))))
2522 return createBinOpReverse(LHS, V2);
2523
2524 auto createBinOpVPReverse = [&](Value *X, Value *Y, Value *EVL) {
2525 Value *V = Builder.CreateBinOp(Opcode, X, Y, Inst.getName());
2526 if (auto *BO = dyn_cast<BinaryOperator>(V))
2527 BO->copyIRFlags(&Inst);
2528
2529 ElementCount EC = cast<VectorType>(V->getType())->getElementCount();
2530 Value *AllTrueMask = Builder.CreateVectorSplat(EC, Builder.getTrue());
2531 Module *M = Inst.getModule();
2533 M, Intrinsic::experimental_vp_reverse, V->getType());
2534 return CallInst::Create(F, {V, AllTrueMask, EVL});
2535 };
2536
2537 Value *EVL;
2539 m_Value(V1), m_AllOnes(), m_Value(EVL)))) {
2540 // Op(rev(V1), rev(V2)) -> rev(Op(V1, V2))
2542 m_Value(V2), m_AllOnes(), m_Specific(EVL))) &&
2543 (LHS->hasOneUse() || RHS->hasOneUse() ||
2544 (LHS == RHS && LHS->hasNUses(2))))
2545 return createBinOpVPReverse(V1, V2, EVL);
2546
2547 // Op(rev(V1), RHSSplat)) -> rev(Op(V1, RHSSplat))
2548 if (LHS->hasOneUse() && isSplatValue(RHS))
2549 return createBinOpVPReverse(V1, RHS, EVL);
2550 }
2551 // Op(LHSSplat, rev(V2)) -> rev(Op(LHSSplat, V2))
2552 else if (isSplatValue(LHS) &&
2554 m_Value(V2), m_AllOnes(), m_Value(EVL))))
2555 return createBinOpVPReverse(LHS, V2, EVL);
2556
2557 if (Instruction *Folded =
2559 return Folded;
2560 if (Instruction *Folded =
2562 return Folded;
2563
2564 // It may not be safe to reorder shuffles and things like div, urem, etc.
2565 // because we may trap when executing those ops on unknown vector elements.
2566 // See PR20059.
2568 return nullptr;
2569
2570 auto createBinOpShuffle = [&](Value *X, Value *Y, ArrayRef<int> M) {
2571 Value *XY = Builder.CreateBinOp(Opcode, X, Y);
2572 if (auto *BO = dyn_cast<BinaryOperator>(XY))
2573 BO->copyIRFlags(&Inst);
2574 return new ShuffleVectorInst(XY, M);
2575 };
2576
2577 // If both arguments of the binary operation are shuffles that use the same
2578 // mask and shuffle within a single vector, move the shuffle after the binop.
2579 if (match(LHS, m_Shuffle(m_Value(V1), m_Poison(), m_Mask(Mask))) &&
2580 match(RHS, m_Shuffle(m_Value(V2), m_Poison(), m_SpecificMask(Mask))) &&
2581 V1->getType() == V2->getType() &&
2582 (LHS->hasOneUse() || RHS->hasOneUse() || LHS == RHS)) {
2583 // Op(shuffle(V1, Mask), shuffle(V2, Mask)) -> shuffle(Op(V1, V2), Mask)
2584 return createBinOpShuffle(V1, V2, Mask);
2585 }
2586
2587 // If both arguments of a commutative binop are select-shuffles that use the
2588 // same mask with commuted operands, the shuffles are unnecessary.
2589 if (Inst.isCommutative() &&
2590 match(LHS, m_Shuffle(m_Value(V1), m_Value(V2), m_Mask(Mask))) &&
2591 match(RHS,
2592 m_Shuffle(m_Specific(V2), m_Specific(V1), m_SpecificMask(Mask)))) {
2593 auto *LShuf = cast<ShuffleVectorInst>(LHS);
2594 auto *RShuf = cast<ShuffleVectorInst>(RHS);
2595 // TODO: Allow shuffles that contain undefs in the mask?
2596 // That is legal, but it reduces undef knowledge.
2597 // TODO: Allow arbitrary shuffles by shuffling after binop?
2598 // That might be legal, but we have to deal with poison.
2599 if (LShuf->isSelect() &&
2600 !is_contained(LShuf->getShuffleMask(), PoisonMaskElem) &&
2601 RShuf->isSelect() &&
2602 !is_contained(RShuf->getShuffleMask(), PoisonMaskElem)) {
2603 // Example:
2604 // LHS = shuffle V1, V2, <0, 5, 6, 3>
2605 // RHS = shuffle V2, V1, <0, 5, 6, 3>
2606 // LHS + RHS --> (V10+V20, V21+V11, V22+V12, V13+V23) --> V1 + V2
2607 Instruction *NewBO = BinaryOperator::Create(Opcode, V1, V2);
2608 NewBO->copyIRFlags(&Inst);
2609 return NewBO;
2610 }
2611 }
2612
2613 // If one argument is a shuffle within one vector and the other is a constant,
2614 // try moving the shuffle after the binary operation. This canonicalization
2615 // intends to move shuffles closer to other shuffles and binops closer to
2616 // other binops, so they can be folded. It may also enable demanded elements
2617 // transforms.
2618 Constant *C;
2620 m_Mask(Mask))),
2621 m_ImmConstant(C)))) {
2622 assert(Inst.getType()->getScalarType() == V1->getType()->getScalarType() &&
2623 "Shuffle should not change scalar type");
2624
2625 bool ConstOp1 = isa<Constant>(RHS);
2626 if (Constant *NewC =
2628 // For fixed vectors, lanes of NewC not used by the shuffle will be poison
2629 // which will cause UB for div/rem. Mask them with a safe constant.
2630 if (isa<FixedVectorType>(V1->getType()) && Inst.isIntDivRem())
2631 NewC = getSafeVectorConstantForBinop(Opcode, NewC, ConstOp1);
2632
2633 // Op(shuffle(V1, Mask), C) -> shuffle(Op(V1, NewC), Mask)
2634 // Op(C, shuffle(V1, Mask)) -> shuffle(Op(NewC, V1), Mask)
2635 Value *NewLHS = ConstOp1 ? V1 : NewC;
2636 Value *NewRHS = ConstOp1 ? NewC : V1;
2637 return createBinOpShuffle(NewLHS, NewRHS, Mask);
2638 }
2639 }
2640
2641 // Try to reassociate to sink a splat shuffle after a binary operation.
2642 if (Inst.isAssociative() && Inst.isCommutative()) {
2643 // Canonicalize shuffle operand as LHS.
2644 if (isa<ShuffleVectorInst>(RHS))
2645 std::swap(LHS, RHS);
2646
2647 Value *X;
2648 ArrayRef<int> MaskC;
2649 int SplatIndex;
2650 Value *Y, *OtherOp;
2651 if (!match(LHS,
2652 m_OneUse(m_Shuffle(m_Value(X), m_Undef(), m_Mask(MaskC)))) ||
2653 !match(MaskC, m_SplatOrPoisonMask(SplatIndex)) ||
2654 X->getType() != Inst.getType() ||
2655 !match(RHS, m_OneUse(m_BinOp(Opcode, m_Value(Y), m_Value(OtherOp)))))
2656 return nullptr;
2657
2658 // FIXME: This may not be safe if the analysis allows undef elements. By
2659 // moving 'Y' before the splat shuffle, we are implicitly assuming
2660 // that it is not undef/poison at the splat index.
2661 if (isSplatValue(OtherOp, SplatIndex)) {
2662 std::swap(Y, OtherOp);
2663 } else if (!isSplatValue(Y, SplatIndex)) {
2664 return nullptr;
2665 }
2666
2667 // X and Y are splatted values, so perform the binary operation on those
2668 // values followed by a splat followed by the 2nd binary operation:
2669 // bo (splat X), (bo Y, OtherOp) --> bo (splat (bo X, Y)), OtherOp
2670 Value *NewBO = Builder.CreateBinOp(Opcode, X, Y);
2671 SmallVector<int, 8> NewMask(MaskC.size(), SplatIndex);
2672 Value *NewSplat = Builder.CreateShuffleVector(NewBO, NewMask);
2673 Instruction *R = BinaryOperator::Create(Opcode, NewSplat, OtherOp);
2674
2675 // Intersect FMF on both new binops. Other (poison-generating) flags are
2676 // dropped to be safe.
2677 if (isa<FPMathOperator>(R)) {
2678 R->copyFastMathFlags(&Inst);
2679 R->andIRFlags(RHS);
2680 }
2681 if (auto *NewInstBO = dyn_cast<BinaryOperator>(NewBO))
2682 NewInstBO->copyIRFlags(R);
2683 return R;
2684 }
2685
2686 return nullptr;
2687}
2688
2689/// Try to narrow the width of a binop if at least 1 operand is an extend of
2690/// of a value. This requires a potentially expensive known bits check to make
2691/// sure the narrow op does not overflow.
2692Instruction *InstCombinerImpl::narrowMathIfNoOverflow(BinaryOperator &BO) {
2693 // We need at least one extended operand.
2694 Value *Op0 = BO.getOperand(0), *Op1 = BO.getOperand(1);
2695
2696 // If this is a sub, we swap the operands since we always want an extension
2697 // on the RHS. The LHS can be an extension or a constant.
2698 if (BO.getOpcode() == Instruction::Sub)
2699 std::swap(Op0, Op1);
2700
2701 Value *X;
2702 bool IsSext = match(Op0, m_SExt(m_Value(X)));
2703 if (!IsSext && !match(Op0, m_ZExt(m_Value(X))))
2704 return nullptr;
2705
2706 // If both operands are the same extension from the same source type and we
2707 // can eliminate at least one (hasOneUse), this might work.
2708 CastInst::CastOps CastOpc = IsSext ? Instruction::SExt : Instruction::ZExt;
2709 Value *Y;
2710 if (!(match(Op1, m_ZExtOrSExt(m_Value(Y))) && X->getType() == Y->getType() &&
2711 cast<Operator>(Op1)->getOpcode() == CastOpc &&
2712 (Op0->hasOneUse() || Op1->hasOneUse()))) {
2713 // If that did not match, see if we have a suitable constant operand.
2714 // Truncating and extending must produce the same constant.
2715 Constant *WideC;
2716 if (!Op0->hasOneUse() || !match(Op1, m_Constant(WideC)))
2717 return nullptr;
2718 Constant *NarrowC = getLosslessInvCast(WideC, X->getType(), CastOpc, DL);
2719 if (!NarrowC)
2720 return nullptr;
2721 Y = NarrowC;
2722 }
2723
2724 // Swap back now that we found our operands.
2725 if (BO.getOpcode() == Instruction::Sub)
2726 std::swap(X, Y);
2727
2728 // Both operands have narrow versions. Last step: the math must not overflow
2729 // in the narrow width.
2730 if (!willNotOverflow(BO.getOpcode(), X, Y, BO, IsSext))
2731 return nullptr;
2732
2733 // bo (ext X), (ext Y) --> ext (bo X, Y)
2734 // bo (ext X), C --> ext (bo X, C')
2735 Value *NarrowBO = Builder.CreateBinOp(BO.getOpcode(), X, Y, "narrow");
2736 if (auto *NewBinOp = dyn_cast<BinaryOperator>(NarrowBO)) {
2737 if (IsSext)
2738 NewBinOp->setHasNoSignedWrap();
2739 else
2740 NewBinOp->setHasNoUnsignedWrap();
2741 }
2742 return CastInst::Create(CastOpc, NarrowBO, BO.getType());
2743}
2744
2745/// Determine nowrap flags for (gep (gep p, x), y) to (gep p, (x + y))
2746/// transform.
2751
2752/// Thread a GEP operation with constant indices through the constant true/false
2753/// arms of a select.
2755 InstCombiner::BuilderTy &Builder) {
2756 if (!GEP.hasAllConstantIndices())
2757 return nullptr;
2758
2759 Instruction *Sel;
2760 Value *Cond;
2761 Constant *TrueC, *FalseC;
2762 if (!match(GEP.getPointerOperand(), m_Instruction(Sel)) ||
2763 !match(Sel,
2764 m_Select(m_Value(Cond), m_Constant(TrueC), m_Constant(FalseC))))
2765 return nullptr;
2766
2767 // gep (select Cond, TrueC, FalseC), IndexC --> select Cond, TrueC', FalseC'
2768 // Propagate 'inbounds' and metadata from existing instructions.
2769 // Note: using IRBuilder to create the constants for efficiency.
2770 SmallVector<Value *, 4> IndexC(GEP.indices());
2771 GEPNoWrapFlags NW = GEP.getNoWrapFlags();
2772 Type *Ty = GEP.getSourceElementType();
2773 Value *NewTrueC = Builder.CreateGEP(Ty, TrueC, IndexC, "", NW);
2774 Value *NewFalseC = Builder.CreateGEP(Ty, FalseC, IndexC, "", NW);
2775 return SelectInst::Create(Cond, NewTrueC, NewFalseC, "", nullptr, Sel);
2776}
2777
2778// Canonicalization:
2779// gep T, (gep i8, base, C1), (Index + C2) into
2780// gep T, (gep i8, base, C1 + C2 * sizeof(T)), Index
2782 GEPOperator *Src,
2783 InstCombinerImpl &IC) {
2784 if (GEP.getNumIndices() != 1)
2785 return nullptr;
2786 auto &DL = IC.getDataLayout();
2787 Value *Base;
2788 const APInt *C1;
2789 if (!match(Src, m_PtrAdd(m_Value(Base), m_APInt(C1))))
2790 return nullptr;
2791 Value *VarIndex;
2792 const APInt *C2;
2793 Type *PtrTy = Src->getType()->getScalarType();
2794 unsigned IndexSizeInBits = DL.getIndexTypeSizeInBits(PtrTy);
2795 if (!match(GEP.getOperand(1), m_AddLike(m_Value(VarIndex), m_APInt(C2))))
2796 return nullptr;
2797 if (C1->getBitWidth() != IndexSizeInBits ||
2798 C2->getBitWidth() != IndexSizeInBits)
2799 return nullptr;
2800 Type *BaseType = GEP.getSourceElementType();
2802 return nullptr;
2803 APInt TypeSize(IndexSizeInBits, DL.getTypeAllocSize(BaseType));
2804 APInt NewOffset = TypeSize * *C2 + *C1;
2805 if (NewOffset.isZero() ||
2806 (Src->hasOneUse() && GEP.getOperand(1)->hasOneUse())) {
2808 if (GEP.hasNoUnsignedWrap() &&
2809 cast<GEPOperator>(Src)->hasNoUnsignedWrap() &&
2810 match(GEP.getOperand(1), m_NUWAddLike(m_Value(), m_Value()))) {
2812 if (GEP.isInBounds() && cast<GEPOperator>(Src)->isInBounds())
2813 Flags |= GEPNoWrapFlags::inBounds();
2814 }
2815
2816 Value *GEPConst =
2817 IC.Builder.CreatePtrAdd(Base, IC.Builder.getInt(NewOffset), "", Flags);
2818 return GetElementPtrInst::Create(BaseType, GEPConst, VarIndex, Flags);
2819 }
2820
2821 return nullptr;
2822}
2823
2824/// Combine constant offsets separated by variable offsets.
2825/// ptradd (ptradd (ptradd p, C1), x), C2 -> ptradd (ptradd p, x), C1+C2
2827 InstCombinerImpl &IC) {
2828 if (!GEP.hasAllConstantIndices())
2829 return nullptr;
2830
2833 auto *InnerGEP = dyn_cast<GetElementPtrInst>(GEP.getPointerOperand());
2834 while (true) {
2835 if (!InnerGEP)
2836 return nullptr;
2837
2838 NW = NW.intersectForReassociate(InnerGEP->getNoWrapFlags());
2839 if (InnerGEP->hasAllConstantIndices())
2840 break;
2841
2842 if (!InnerGEP->hasOneUse())
2843 return nullptr;
2844
2845 Skipped.push_back(InnerGEP);
2846 InnerGEP = dyn_cast<GetElementPtrInst>(InnerGEP->getPointerOperand());
2847 }
2848
2849 // The two constant offset GEPs are directly adjacent: Let normal offset
2850 // merging handle it.
2851 if (Skipped.empty())
2852 return nullptr;
2853
2854 // FIXME: This one-use check is not strictly necessary. Consider relaxing it
2855 // if profitable.
2856 if (!InnerGEP->hasOneUse())
2857 return nullptr;
2858
2859 // Don't bother with vector splats.
2860 Type *Ty = GEP.getType();
2861 if (InnerGEP->getType() != Ty)
2862 return nullptr;
2863
2864 const DataLayout &DL = IC.getDataLayout();
2865 APInt Offset(DL.getIndexTypeSizeInBits(Ty), 0);
2866 if (!GEP.accumulateConstantOffset(DL, Offset) ||
2867 !InnerGEP->accumulateConstantOffset(DL, Offset))
2868 return nullptr;
2869
2870 IC.replaceOperand(*Skipped.back(), 0, InnerGEP->getPointerOperand());
2871 for (GetElementPtrInst *SkippedGEP : Skipped)
2872 SkippedGEP->setNoWrapFlags(NW);
2873
2874 return IC.replaceInstUsesWith(
2875 GEP,
2876 IC.Builder.CreatePtrAdd(Skipped.front(), IC.Builder.getInt(Offset), "",
2877 NW.intersectForOffsetAdd(GEP.getNoWrapFlags())));
2878}
2879
2881 GEPOperator *Src) {
2882 // Combine Indices - If the source pointer to this getelementptr instruction
2883 // is a getelementptr instruction with matching element type, combine the
2884 // indices of the two getelementptr instructions into a single instruction.
2885 if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src))
2886 return nullptr;
2887
2888 if (auto *I = canonicalizeGEPOfConstGEPI8(GEP, Src, *this))
2889 return I;
2890
2891 if (auto *I = combineConstantOffsets(GEP, *this))
2892 return I;
2893
2894 if (Src->getResultElementType() != GEP.getSourceElementType())
2895 return nullptr;
2896
2897 // Fold chained GEP with constant base into single GEP:
2898 // gep i8, (gep i8, %base, C1), (select Cond, C2, C3)
2899 // -> gep i8, %base, (select Cond, C1+C2, C1+C3)
2900 if (Src->hasOneUse() && GEP.getNumIndices() == 1 &&
2901 Src->getNumIndices() == 1) {
2902 Value *SrcIdx = *Src->idx_begin();
2903 Value *GEPIdx = *GEP.idx_begin();
2904 const APInt *ConstOffset, *TrueVal, *FalseVal;
2905 Value *Cond;
2906
2907 if ((match(SrcIdx, m_APInt(ConstOffset)) &&
2908 match(GEPIdx,
2909 m_Select(m_Value(Cond), m_APInt(TrueVal), m_APInt(FalseVal)))) ||
2910 (match(GEPIdx, m_APInt(ConstOffset)) &&
2911 match(SrcIdx,
2912 m_Select(m_Value(Cond), m_APInt(TrueVal), m_APInt(FalseVal))))) {
2913 auto *Select = isa<SelectInst>(GEPIdx) ? cast<SelectInst>(GEPIdx)
2914 : cast<SelectInst>(SrcIdx);
2915
2916 // Make sure the select has only one use.
2917 if (!Select->hasOneUse())
2918 return nullptr;
2919
2920 if (TrueVal->getBitWidth() != ConstOffset->getBitWidth() ||
2921 FalseVal->getBitWidth() != ConstOffset->getBitWidth())
2922 return nullptr;
2923
2924 APInt NewTrueVal = *ConstOffset + *TrueVal;
2925 APInt NewFalseVal = *ConstOffset + *FalseVal;
2926 Constant *NewTrue = ConstantInt::get(Select->getType(), NewTrueVal);
2927 Constant *NewFalse = ConstantInt::get(Select->getType(), NewFalseVal);
2928 Value *NewSelect = Builder.CreateSelect(
2929 Cond, NewTrue, NewFalse, /*Name=*/"",
2930 /*MDFrom=*/(ProfcheckDisableMetadataFixes ? nullptr : Select));
2931 GEPNoWrapFlags Flags =
2933 return replaceInstUsesWith(GEP,
2934 Builder.CreateGEP(GEP.getResultElementType(),
2935 Src->getPointerOperand(),
2936 NewSelect, "", Flags));
2937 }
2938 }
2939
2940 // Find out whether the last index in the source GEP is a sequential idx.
2941 bool EndsWithSequential = false;
2942 for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
2943 I != E; ++I)
2944 EndsWithSequential = I.isSequential();
2945 if (!EndsWithSequential)
2946 return nullptr;
2947
2948 // Replace: gep (gep %P, long B), long A, ...
2949 // With: T = long A+B; gep %P, T, ...
2950 Value *SO1 = Src->getOperand(Src->getNumOperands() - 1);
2951 Value *GO1 = GEP.getOperand(1);
2952
2953 // If they aren't the same type, then the input hasn't been processed
2954 // by the loop above yet (which canonicalizes sequential index types to
2955 // intptr_t). Just avoid transforming this until the input has been
2956 // normalized.
2957 if (SO1->getType() != GO1->getType())
2958 return nullptr;
2959
2960 Value *Sum =
2961 simplifyAddInst(GO1, SO1, false, false, SQ.getWithInstruction(&GEP));
2962 // Only do the combine when we are sure the cost after the
2963 // merge is never more than that before the merge.
2964 if (Sum == nullptr)
2965 return nullptr;
2966
2968 Indices.append(Src->op_begin() + 1, Src->op_end() - 1);
2969 Indices.push_back(Sum);
2970 Indices.append(GEP.op_begin() + 2, GEP.op_end());
2971
2972 // Don't create GEPs with more than one non-zero index.
2973 unsigned NumNonZeroIndices = count_if(Indices, [](Value *Idx) {
2974 auto *C = dyn_cast<Constant>(Idx);
2975 return !C || !C->isNullValue();
2976 });
2977 if (NumNonZeroIndices > 1)
2978 return nullptr;
2979
2980 return replaceInstUsesWith(
2981 GEP, Builder.CreateGEP(
2982 Src->getSourceElementType(), Src->getOperand(0), Indices, "",
2984}
2985
2988 bool &DoesConsume, unsigned Depth) {
2989 static Value *const NonNull = reinterpret_cast<Value *>(uintptr_t(1));
2990 // ~(~(X)) -> X.
2991 Value *A, *B;
2992 if (match(V, m_Not(m_Value(A)))) {
2993 DoesConsume = true;
2994 return A;
2995 }
2996
2997 Constant *C;
2998 // Constants can be considered to be not'ed values.
2999 if (match(V, m_ImmConstant(C)))
3000 return ConstantExpr::getNot(C);
3001
3003 return nullptr;
3004
3005 // The rest of the cases require that we invert all uses so don't bother
3006 // doing the analysis if we know we can't use the result.
3007 if (!WillInvertAllUses)
3008 return nullptr;
3009
3010 // Compares can be inverted if all of their uses are being modified to use
3011 // the ~V.
3012 if (auto *I = dyn_cast<CmpInst>(V)) {
3013 if (Builder != nullptr)
3014 return Builder->CreateCmp(I->getInversePredicate(), I->getOperand(0),
3015 I->getOperand(1));
3016 return NonNull;
3017 }
3018
3019 // If `V` is of the form `A + B` then `-1 - V` can be folded into
3020 // `(-1 - B) - A` if we are willing to invert all of the uses.
3021 if (match(V, m_Add(m_Value(A), m_Value(B)))) {
3022 if (auto *BV = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
3023 DoesConsume, Depth))
3024 return Builder ? Builder->CreateSub(BV, A) : NonNull;
3025 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3026 DoesConsume, Depth))
3027 return Builder ? Builder->CreateSub(AV, B) : NonNull;
3028 return nullptr;
3029 }
3030
3031 // If `V` is of the form `A ^ ~B` then `~(A ^ ~B)` can be folded
3032 // into `A ^ B` if we are willing to invert all of the uses.
3033 if (match(V, m_Xor(m_Value(A), m_Value(B)))) {
3034 if (auto *BV = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
3035 DoesConsume, Depth))
3036 return Builder ? Builder->CreateXor(A, BV) : NonNull;
3037 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3038 DoesConsume, Depth))
3039 return Builder ? Builder->CreateXor(AV, B) : NonNull;
3040 return nullptr;
3041 }
3042
3043 // If `V` is of the form `B - A` then `-1 - V` can be folded into
3044 // `A + (-1 - B)` if we are willing to invert all of the uses.
3045 if (match(V, m_Sub(m_Value(A), m_Value(B)))) {
3046 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3047 DoesConsume, Depth))
3048 return Builder ? Builder->CreateAdd(AV, B) : NonNull;
3049 return nullptr;
3050 }
3051
3052 // If `V` is of the form `(~A) s>> B` then `~((~A) s>> B)` can be folded
3053 // into `A s>> B` if we are willing to invert all of the uses.
3054 if (match(V, m_AShr(m_Value(A), m_Value(B)))) {
3055 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3056 DoesConsume, Depth))
3057 return Builder ? Builder->CreateAShr(AV, B) : NonNull;
3058 return nullptr;
3059 }
3060
3061 Value *Cond;
3062 // LogicOps are special in that we canonicalize them at the cost of an
3063 // instruction.
3064 bool IsSelect = match(V, m_Select(m_Value(Cond), m_Value(A), m_Value(B))) &&
3066 // Selects/min/max with invertible operands are freely invertible
3067 if (IsSelect || match(V, m_MaxOrMin(m_Value(A), m_Value(B)))) {
3068 bool LocalDoesConsume = DoesConsume;
3069 if (!getFreelyInvertedImpl(B, B->hasOneUse(), /*Builder*/ nullptr,
3070 LocalDoesConsume, Depth))
3071 return nullptr;
3072 if (Value *NotA = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3073 LocalDoesConsume, Depth)) {
3074 DoesConsume = LocalDoesConsume;
3075 if (Builder != nullptr) {
3076 Value *NotB = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
3077 DoesConsume, Depth);
3078 assert(NotB != nullptr &&
3079 "Unable to build inverted value for known freely invertable op");
3080 if (auto *II = dyn_cast<IntrinsicInst>(V))
3081 return Builder->CreateBinaryIntrinsic(
3082 getInverseMinMaxIntrinsic(II->getIntrinsicID()), NotA, NotB);
3083 return Builder->CreateSelect(
3084 Cond, NotA, NotB, "",
3086 }
3087 return NonNull;
3088 }
3089 }
3090
3091 if (PHINode *PN = dyn_cast<PHINode>(V)) {
3092 bool LocalDoesConsume = DoesConsume;
3094 for (Use &U : PN->operands()) {
3095 BasicBlock *IncomingBlock = PN->getIncomingBlock(U);
3096 Value *NewIncomingVal = getFreelyInvertedImpl(
3097 U.get(), /*WillInvertAllUses=*/false,
3098 /*Builder=*/nullptr, LocalDoesConsume, MaxAnalysisRecursionDepth - 1);
3099 if (NewIncomingVal == nullptr)
3100 return nullptr;
3101 // Make sure that we can safely erase the original PHI node.
3102 if (NewIncomingVal == V)
3103 return nullptr;
3104 if (Builder != nullptr)
3105 IncomingValues.emplace_back(NewIncomingVal, IncomingBlock);
3106 }
3107
3108 DoesConsume = LocalDoesConsume;
3109 if (Builder != nullptr) {
3111 Builder->SetInsertPoint(PN);
3112 PHINode *NewPN =
3113 Builder->CreatePHI(PN->getType(), PN->getNumIncomingValues());
3114 for (auto [Val, Pred] : IncomingValues)
3115 NewPN->addIncoming(Val, Pred);
3116 return NewPN;
3117 }
3118 return NonNull;
3119 }
3120
3121 if (match(V, m_SExtLike(m_Value(A)))) {
3122 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3123 DoesConsume, Depth))
3124 return Builder ? Builder->CreateSExt(AV, V->getType()) : NonNull;
3125 return nullptr;
3126 }
3127
3128 if (match(V, m_Trunc(m_Value(A)))) {
3129 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3130 DoesConsume, Depth))
3131 return Builder ? Builder->CreateTrunc(AV, V->getType()) : NonNull;
3132 return nullptr;
3133 }
3134
3135 // De Morgan's Laws:
3136 // (~(A | B)) -> (~A & ~B)
3137 // (~(A & B)) -> (~A | ~B)
3138 auto TryInvertAndOrUsingDeMorgan = [&](Instruction::BinaryOps Opcode,
3139 bool IsLogical, Value *A,
3140 Value *B) -> Value * {
3141 bool LocalDoesConsume = DoesConsume;
3142 if (!getFreelyInvertedImpl(B, B->hasOneUse(), /*Builder=*/nullptr,
3143 LocalDoesConsume, Depth))
3144 return nullptr;
3145 if (auto *NotA = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3146 LocalDoesConsume, Depth)) {
3147 auto *NotB = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
3148 LocalDoesConsume, Depth);
3149 DoesConsume = LocalDoesConsume;
3150 if (IsLogical)
3151 return Builder ? Builder->CreateLogicalOp(Opcode, NotA, NotB) : NonNull;
3152 return Builder ? Builder->CreateBinOp(Opcode, NotA, NotB) : NonNull;
3153 }
3154
3155 return nullptr;
3156 };
3157
3158 if (match(V, m_Or(m_Value(A), m_Value(B))))
3159 return TryInvertAndOrUsingDeMorgan(Instruction::And, /*IsLogical=*/false, A,
3160 B);
3161
3162 if (match(V, m_And(m_Value(A), m_Value(B))))
3163 return TryInvertAndOrUsingDeMorgan(Instruction::Or, /*IsLogical=*/false, A,
3164 B);
3165
3166 if (match(V, m_LogicalOr(m_Value(A), m_Value(B))))
3167 return TryInvertAndOrUsingDeMorgan(Instruction::And, /*IsLogical=*/true, A,
3168 B);
3169
3170 if (match(V, m_LogicalAnd(m_Value(A), m_Value(B))))
3171 return TryInvertAndOrUsingDeMorgan(Instruction::Or, /*IsLogical=*/true, A,
3172 B);
3173
3174 return nullptr;
3175}
3176
3177/// Return true if we should canonicalize the gep to an i8 ptradd.
3179 Value *PtrOp = GEP.getOperand(0);
3180 Type *GEPEltType = GEP.getSourceElementType();
3181 if (GEPEltType->isIntegerTy(8))
3182 return false;
3183
3184 // Canonicalize scalable GEPs to an explicit offset using the llvm.vscale
3185 // intrinsic. This has better support in BasicAA.
3186 if (GEPEltType->isScalableTy())
3187 return true;
3188
3189 // gep i32 p, mul(O, C) -> gep i8, p, mul(O, C*4) to fold the two multiplies
3190 // together.
3191 if (GEP.getNumIndices() == 1 &&
3192 match(GEP.getOperand(1),
3194 m_Shl(m_Value(), m_ConstantInt())))))
3195 return true;
3196
3197 // gep (gep %p, C1), %x, C2 is expanded so the two constants can
3198 // possibly be merged together.
3199 auto PtrOpGep = dyn_cast<GEPOperator>(PtrOp);
3200 return PtrOpGep && PtrOpGep->hasAllConstantIndices() &&
3201 any_of(GEP.indices(), [](Value *V) {
3202 const APInt *C;
3203 return match(V, m_APInt(C)) && !C->isZero();
3204 });
3205}
3206
3208 IRBuilderBase &Builder) {
3209 auto *Op1 = dyn_cast<GetElementPtrInst>(PN->getOperand(0));
3210 if (!Op1)
3211 return nullptr;
3212
3213 // Don't fold a GEP into itself through a PHI node. This can only happen
3214 // through the back-edge of a loop. Folding a GEP into itself means that
3215 // the value of the previous iteration needs to be stored in the meantime,
3216 // thus requiring an additional register variable to be live, but not
3217 // actually achieving anything (the GEP still needs to be executed once per
3218 // loop iteration).
3219 if (Op1 == &GEP)
3220 return nullptr;
3221 GEPNoWrapFlags NW = Op1->getNoWrapFlags();
3222
3223 int DI = -1;
3224
3225 for (auto I = PN->op_begin()+1, E = PN->op_end(); I !=E; ++I) {
3226 auto *Op2 = dyn_cast<GetElementPtrInst>(*I);
3227 if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands() ||
3228 Op1->getSourceElementType() != Op2->getSourceElementType())
3229 return nullptr;
3230
3231 // As for Op1 above, don't try to fold a GEP into itself.
3232 if (Op2 == &GEP)
3233 return nullptr;
3234
3235 // Keep track of the type as we walk the GEP.
3236 Type *CurTy = nullptr;
3237
3238 for (unsigned J = 0, F = Op1->getNumOperands(); J != F; ++J) {
3239 if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType())
3240 return nullptr;
3241
3242 if (Op1->getOperand(J) != Op2->getOperand(J)) {
3243 if (DI == -1) {
3244 // We have not seen any differences yet in the GEPs feeding the
3245 // PHI yet, so we record this one if it is allowed to be a
3246 // variable.
3247
3248 // The first two arguments can vary for any GEP, the rest have to be
3249 // static for struct slots
3250 if (J > 1) {
3251 assert(CurTy && "No current type?");
3252 if (CurTy->isStructTy())
3253 return nullptr;
3254 }
3255
3256 DI = J;
3257 } else {
3258 // The GEP is different by more than one input. While this could be
3259 // extended to support GEPs that vary by more than one variable it
3260 // doesn't make sense since it greatly increases the complexity and
3261 // would result in an R+R+R addressing mode which no backend
3262 // directly supports and would need to be broken into several
3263 // simpler instructions anyway.
3264 return nullptr;
3265 }
3266 }
3267
3268 // Sink down a layer of the type for the next iteration.
3269 if (J > 0) {
3270 if (J == 1) {
3271 CurTy = Op1->getSourceElementType();
3272 } else {
3273 CurTy =
3274 GetElementPtrInst::getTypeAtIndex(CurTy, Op1->getOperand(J));
3275 }
3276 }
3277 }
3278
3279 NW &= Op2->getNoWrapFlags();
3280 }
3281
3282 // If not all GEPs are identical we'll have to create a new PHI node.
3283 // Check that the old PHI node has only one use so that it will get
3284 // removed.
3285 if (DI != -1 && !PN->hasOneUse())
3286 return nullptr;
3287
3288 auto *NewGEP = cast<GetElementPtrInst>(Op1->clone());
3289 NewGEP->setNoWrapFlags(NW);
3290
3291 if (DI == -1) {
3292 // All the GEPs feeding the PHI are identical. Clone one down into our
3293 // BB so that it can be merged with the current GEP.
3294 } else {
3295 // All the GEPs feeding the PHI differ at a single offset. Clone a GEP
3296 // into the current block so it can be merged, and create a new PHI to
3297 // set that index.
3298 PHINode *NewPN;
3299 {
3300 IRBuilderBase::InsertPointGuard Guard(Builder);
3301 Builder.SetInsertPoint(PN);
3302 NewPN = Builder.CreatePHI(Op1->getOperand(DI)->getType(),
3303 PN->getNumOperands());
3304 }
3305
3306 for (auto &I : PN->operands())
3307 NewPN->addIncoming(cast<GEPOperator>(I)->getOperand(DI),
3308 PN->getIncomingBlock(I));
3309
3310 NewGEP->setOperand(DI, NewPN);
3311 }
3312
3313 NewGEP->insertBefore(*GEP.getParent(), GEP.getParent()->getFirstInsertionPt());
3314 return NewGEP;
3315}
3316
3318 Value *PtrOp = GEP.getOperand(0);
3319 SmallVector<Value *, 8> Indices(GEP.indices());
3320 Type *GEPType = GEP.getType();
3321 Type *GEPEltType = GEP.getSourceElementType();
3322 if (Value *V =
3323 simplifyGEPInst(GEPEltType, PtrOp, Indices, GEP.getNoWrapFlags(),
3324 SQ.getWithInstruction(&GEP)))
3325 return replaceInstUsesWith(GEP, V);
3326
3327 // For vector geps, use the generic demanded vector support.
3328 // Skip if GEP return type is scalable. The number of elements is unknown at
3329 // compile-time.
3330 if (auto *GEPFVTy = dyn_cast<FixedVectorType>(GEPType)) {
3331 auto VWidth = GEPFVTy->getNumElements();
3332 APInt PoisonElts(VWidth, 0);
3333 APInt AllOnesEltMask(APInt::getAllOnes(VWidth));
3334 if (Value *V = SimplifyDemandedVectorElts(&GEP, AllOnesEltMask,
3335 PoisonElts)) {
3336 if (V != &GEP)
3337 return replaceInstUsesWith(GEP, V);
3338 return &GEP;
3339 }
3340 }
3341
3342 // Eliminate unneeded casts for indices, and replace indices which displace
3343 // by multiples of a zero size type with zero.
3344 bool MadeChange = false;
3345
3346 // Index width may not be the same width as pointer width.
3347 // Data layout chooses the right type based on supported integer types.
3348 Type *NewScalarIndexTy =
3349 DL.getIndexType(GEP.getPointerOperandType()->getScalarType());
3350
3352 for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E;
3353 ++I, ++GTI) {
3354 // Skip indices into struct types.
3355 if (GTI.isStruct())
3356 continue;
3357
3358 Type *IndexTy = (*I)->getType();
3359 Type *NewIndexType =
3360 IndexTy->isVectorTy()
3361 ? VectorType::get(NewScalarIndexTy,
3362 cast<VectorType>(IndexTy)->getElementCount())
3363 : NewScalarIndexTy;
3364
3365 // If the element type has zero size then any index over it is equivalent
3366 // to an index of zero, so replace it with zero if it is not zero already.
3367 Type *EltTy = GTI.getIndexedType();
3368 if (EltTy->isSized() && DL.getTypeAllocSize(EltTy).isZero())
3369 if (!isa<Constant>(*I) || !match(I->get(), m_Zero())) {
3370 *I = Constant::getNullValue(NewIndexType);
3371 MadeChange = true;
3372 }
3373
3374 if (IndexTy != NewIndexType) {
3375 // If we are using a wider index than needed for this platform, shrink
3376 // it to what we need. If narrower, sign-extend it to what we need.
3377 // This explicit cast can make subsequent optimizations more obvious.
3378 if (IndexTy->getScalarSizeInBits() <
3379 NewIndexType->getScalarSizeInBits()) {
3380 if (GEP.hasNoUnsignedWrap() && GEP.hasNoUnsignedSignedWrap())
3381 *I = Builder.CreateZExt(*I, NewIndexType, "", /*IsNonNeg=*/true);
3382 else
3383 *I = Builder.CreateSExt(*I, NewIndexType);
3384 } else {
3385 *I = Builder.CreateTrunc(*I, NewIndexType, "", GEP.hasNoUnsignedWrap(),
3386 GEP.hasNoUnsignedSignedWrap());
3387 }
3388 MadeChange = true;
3389 }
3390 }
3391 if (MadeChange)
3392 return &GEP;
3393
3394 // Canonicalize constant GEPs to i8 type.
3395 if (!GEPEltType->isIntegerTy(8) && GEP.hasAllConstantIndices()) {
3396 APInt Offset(DL.getIndexTypeSizeInBits(GEPType), 0);
3397 if (GEP.accumulateConstantOffset(DL, Offset))
3398 return replaceInstUsesWith(
3399 GEP, Builder.CreatePtrAdd(PtrOp, Builder.getInt(Offset), "",
3400 GEP.getNoWrapFlags()));
3401 }
3402
3404 Value *Offset = EmitGEPOffset(cast<GEPOperator>(&GEP));
3405 Value *NewGEP =
3406 Builder.CreatePtrAdd(PtrOp, Offset, "", GEP.getNoWrapFlags());
3407 return replaceInstUsesWith(GEP, NewGEP);
3408 }
3409
3410 // Strip trailing zero indices.
3411 auto *LastIdx = dyn_cast<Constant>(Indices.back());
3412 if (LastIdx && LastIdx->isNullValue() && !LastIdx->getType()->isVectorTy()) {
3413 return replaceInstUsesWith(
3414 GEP, Builder.CreateGEP(GEP.getSourceElementType(), PtrOp,
3415 drop_end(Indices), "", GEP.getNoWrapFlags()));
3416 }
3417
3418 // Strip leading zero indices.
3419 auto *FirstIdx = dyn_cast<Constant>(Indices.front());
3420 if (FirstIdx && FirstIdx->isNullValue() &&
3421 !FirstIdx->getType()->isVectorTy()) {
3423 ++GTI;
3424 if (!GTI.isStruct() && GTI.getSequentialElementStride(DL) ==
3425 DL.getTypeAllocSize(GTI.getIndexedType()))
3426 return replaceInstUsesWith(GEP, Builder.CreateGEP(GTI.getIndexedType(),
3427 GEP.getPointerOperand(),
3428 drop_begin(Indices), "",
3429 GEP.getNoWrapFlags()));
3430 }
3431
3432 // Scalarize vector operands; prefer splat-of-gep.as canonical form.
3433 // Note that this looses information about undef lanes; we run it after
3434 // demanded bits to partially mitigate that loss.
3435 if (GEPType->isVectorTy() && llvm::any_of(GEP.operands(), [](Value *Op) {
3436 return Op->getType()->isVectorTy() && getSplatValue(Op);
3437 })) {
3438 SmallVector<Value *> NewOps;
3439 for (auto &Op : GEP.operands()) {
3440 if (Op->getType()->isVectorTy())
3441 if (Value *Scalar = getSplatValue(Op)) {
3442 NewOps.push_back(Scalar);
3443 continue;
3444 }
3445 NewOps.push_back(Op);
3446 }
3447
3448 Value *Res = Builder.CreateGEP(GEP.getSourceElementType(), NewOps[0],
3449 ArrayRef(NewOps).drop_front(), GEP.getName(),
3450 GEP.getNoWrapFlags());
3451 if (!Res->getType()->isVectorTy()) {
3452 ElementCount EC = cast<VectorType>(GEPType)->getElementCount();
3453 Res = Builder.CreateVectorSplat(EC, Res);
3454 }
3455 return replaceInstUsesWith(GEP, Res);
3456 }
3457
3458 bool SeenNonZeroIndex = false;
3459 for (auto [IdxNum, Idx] : enumerate(Indices)) {
3460 // Ignore one leading zero index.
3461 auto *C = dyn_cast<Constant>(Idx);
3462 if (C && C->isNullValue() && IdxNum == 0)
3463 continue;
3464
3465 if (!SeenNonZeroIndex) {
3466 SeenNonZeroIndex = true;
3467 continue;
3468 }
3469
3470 // GEP has multiple non-zero indices: Split it.
3471 ArrayRef<Value *> FrontIndices = ArrayRef(Indices).take_front(IdxNum);
3472 Value *FrontGEP =
3473 Builder.CreateGEP(GEPEltType, PtrOp, FrontIndices,
3474 GEP.getName() + ".split", GEP.getNoWrapFlags());
3475
3476 SmallVector<Value *> BackIndices;
3477 BackIndices.push_back(Constant::getNullValue(NewScalarIndexTy));
3478 append_range(BackIndices, drop_begin(Indices, IdxNum));
3480 GetElementPtrInst::getIndexedType(GEPEltType, FrontIndices), FrontGEP,
3481 BackIndices, GEP.getNoWrapFlags());
3482 }
3483
3484 // Canonicalize gep %T to gep [sizeof(%T) x i8]:
3485 auto IsCanonicalType = [](Type *Ty) {
3486 if (auto *AT = dyn_cast<ArrayType>(Ty))
3487 Ty = AT->getElementType();
3488 return Ty->isIntegerTy(8);
3489 };
3490 if (Indices.size() == 1 && !IsCanonicalType(GEPEltType)) {
3491 TypeSize Scale = DL.getTypeAllocSize(GEPEltType);
3492 assert(!Scale.isScalable() && "Should have been handled earlier");
3493 Type *NewElemTy = Builder.getInt8Ty();
3494 if (Scale.getFixedValue() != 1)
3495 NewElemTy = ArrayType::get(NewElemTy, Scale.getFixedValue());
3496 GEP.setSourceElementType(NewElemTy);
3497 GEP.setResultElementType(NewElemTy);
3498 // Don't bother revisiting the GEP after this change.
3499 MadeIRChange = true;
3500 }
3501
3502 // Check to see if the inputs to the PHI node are getelementptr instructions.
3503 if (auto *PN = dyn_cast<PHINode>(PtrOp)) {
3504 if (Value *NewPtrOp = foldGEPOfPhi(GEP, PN, Builder))
3505 return replaceOperand(GEP, 0, NewPtrOp);
3506 }
3507
3508 if (auto *Src = dyn_cast<GEPOperator>(PtrOp))
3509 if (Instruction *I = visitGEPOfGEP(GEP, Src))
3510 return I;
3511
3512 if (GEP.getNumIndices() == 1) {
3513 unsigned AS = GEP.getPointerAddressSpace();
3514 if (GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
3515 DL.getIndexSizeInBits(AS)) {
3516 uint64_t TyAllocSize = DL.getTypeAllocSize(GEPEltType).getFixedValue();
3517
3518 if (TyAllocSize == 1) {
3519 // Canonicalize (gep i8* X, (ptrtoint Y)-(ptrtoint X)) to (bitcast Y),
3520 // but only if the result pointer is only used as if it were an integer.
3521 // (The case where the underlying object is the same is handled by
3522 // InstSimplify.)
3523 Value *X = GEP.getPointerOperand();
3524 Value *Y;
3525 if (match(GEP.getOperand(1), m_Sub(m_PtrToIntOrAddr(m_Value(Y)),
3527 GEPType == Y->getType()) {
3528 bool HasNonAddressBits =
3529 DL.getAddressSizeInBits(AS) != DL.getPointerSizeInBits(AS);
3530 bool Changed = GEP.replaceUsesWithIf(Y, [&](Use &U) {
3531 return isa<PtrToAddrInst, ICmpInst>(U.getUser()) ||
3532 (!HasNonAddressBits && isa<PtrToIntInst>(U.getUser()));
3533 });
3534 return Changed ? &GEP : nullptr;
3535 }
3536 } else if (auto *ExactIns =
3537 dyn_cast<PossiblyExactOperator>(GEP.getOperand(1))) {
3538 // Canonicalize (gep T* X, V / sizeof(T)) to (gep i8* X, V)
3539 Value *V;
3540 if (ExactIns->isExact()) {
3541 if ((has_single_bit(TyAllocSize) &&
3542 match(GEP.getOperand(1),
3543 m_Shr(m_Value(V),
3544 m_SpecificInt(countr_zero(TyAllocSize))))) ||
3545 match(GEP.getOperand(1),
3546 m_IDiv(m_Value(V), m_SpecificInt(TyAllocSize)))) {
3547 return GetElementPtrInst::Create(Builder.getInt8Ty(),
3548 GEP.getPointerOperand(), V,
3549 GEP.getNoWrapFlags());
3550 }
3551 }
3552 if (ExactIns->isExact() && ExactIns->hasOneUse()) {
3553 // Try to canonicalize non-i8 element type to i8 if the index is an
3554 // exact instruction. If the index is an exact instruction (div/shr)
3555 // with a constant RHS, we can fold the non-i8 element scale into the
3556 // div/shr (similiar to the mul case, just inverted).
3557 const APInt *C;
3558 std::optional<APInt> NewC;
3559 if (has_single_bit(TyAllocSize) &&
3560 match(ExactIns, m_Shr(m_Value(V), m_APInt(C))) &&
3561 C->uge(countr_zero(TyAllocSize)))
3562 NewC = *C - countr_zero(TyAllocSize);
3563 else if (match(ExactIns, m_UDiv(m_Value(V), m_APInt(C)))) {
3564 APInt Quot;
3565 uint64_t Rem;
3566 APInt::udivrem(*C, TyAllocSize, Quot, Rem);
3567 if (Rem == 0)
3568 NewC = Quot;
3569 } else if (match(ExactIns, m_SDiv(m_Value(V), m_APInt(C)))) {
3570 APInt Quot;
3571 int64_t Rem;
3572 APInt::sdivrem(*C, TyAllocSize, Quot, Rem);
3573 // For sdiv we need to make sure we arent creating INT_MIN / -1.
3574 if (!Quot.isAllOnes() && Rem == 0)
3575 NewC = Quot;
3576 }
3577
3578 if (NewC.has_value()) {
3579 Value *NewOp = Builder.CreateBinOp(
3580 static_cast<Instruction::BinaryOps>(ExactIns->getOpcode()), V,
3581 ConstantInt::get(V->getType(), *NewC));
3582 cast<BinaryOperator>(NewOp)->setIsExact();
3583 return GetElementPtrInst::Create(Builder.getInt8Ty(),
3584 GEP.getPointerOperand(), NewOp,
3585 GEP.getNoWrapFlags());
3586 }
3587 }
3588 }
3589 }
3590 }
3591 // We do not handle pointer-vector geps here.
3592 if (GEPType->isVectorTy())
3593 return nullptr;
3594
3595 if (!GEP.isInBounds()) {
3596 unsigned IdxWidth =
3597 DL.getIndexSizeInBits(PtrOp->getType()->getPointerAddressSpace());
3598 APInt BasePtrOffset(IdxWidth, 0);
3599 Value *UnderlyingPtrOp =
3600 PtrOp->stripAndAccumulateInBoundsConstantOffsets(DL, BasePtrOffset);
3601 bool CanBeNull, CanBeFreed;
3602 uint64_t DerefBytes = UnderlyingPtrOp->getPointerDereferenceableBytes(
3603 DL, CanBeNull, CanBeFreed);
3604 if (!CanBeNull && !CanBeFreed && DerefBytes != 0) {
3605 if (GEP.accumulateConstantOffset(DL, BasePtrOffset) &&
3606 BasePtrOffset.isNonNegative()) {
3607 APInt AllocSize(IdxWidth, DerefBytes);
3608 if (BasePtrOffset.ule(AllocSize)) {
3610 GEP.getSourceElementType(), PtrOp, Indices, GEP.getName());
3611 }
3612 }
3613 }
3614 }
3615
3616 // nusw + nneg -> nuw
3617 if (GEP.hasNoUnsignedSignedWrap() && !GEP.hasNoUnsignedWrap() &&
3618 all_of(GEP.indices(), [&](Value *Idx) {
3619 return isKnownNonNegative(Idx, SQ.getWithInstruction(&GEP));
3620 })) {
3621 GEP.setNoWrapFlags(GEP.getNoWrapFlags() | GEPNoWrapFlags::noUnsignedWrap());
3622 return &GEP;
3623 }
3624
3625 // These rewrites are trying to preserve inbounds/nuw attributes. So we want
3626 // to do this after having tried to derive "nuw" above.
3627 if (GEP.getNumIndices() == 1) {
3628 // Given (gep p, x+y) we want to determine the common nowrap flags for both
3629 // geps if transforming into (gep (gep p, x), y).
3630 auto GetPreservedNoWrapFlags = [&](bool AddIsNUW) {
3631 // We can preserve both "inbounds nuw", "nusw nuw" and "nuw" if we know
3632 // that x + y does not have unsigned wrap.
3633 if (GEP.hasNoUnsignedWrap() && AddIsNUW)
3634 return GEP.getNoWrapFlags();
3635 return GEPNoWrapFlags::none();
3636 };
3637
3638 // Try to replace ADD + GEP with GEP + GEP.
3639 Value *Idx1, *Idx2;
3640 if (match(GEP.getOperand(1),
3641 m_OneUse(m_AddLike(m_Value(Idx1), m_Value(Idx2))))) {
3642 // %idx = add i64 %idx1, %idx2
3643 // %gep = getelementptr i32, ptr %ptr, i64 %idx
3644 // as:
3645 // %newptr = getelementptr i32, ptr %ptr, i64 %idx1
3646 // %newgep = getelementptr i32, ptr %newptr, i64 %idx2
3647 bool NUW = match(GEP.getOperand(1), m_NUWAddLike(m_Value(), m_Value()));
3648 GEPNoWrapFlags NWFlags = GetPreservedNoWrapFlags(NUW);
3649 auto *NewPtr =
3650 Builder.CreateGEP(GEP.getSourceElementType(), GEP.getPointerOperand(),
3651 Idx1, "", NWFlags);
3652 return replaceInstUsesWith(GEP,
3653 Builder.CreateGEP(GEP.getSourceElementType(),
3654 NewPtr, Idx2, "", NWFlags));
3655 }
3656 ConstantInt *C;
3657 if (match(GEP.getOperand(1), m_OneUse(m_SExtLike(m_OneUse(m_NSWAddLike(
3658 m_Value(Idx1), m_ConstantInt(C))))))) {
3659 // %add = add nsw i32 %idx1, idx2
3660 // %sidx = sext i32 %add to i64
3661 // %gep = getelementptr i32, ptr %ptr, i64 %sidx
3662 // as:
3663 // %newptr = getelementptr i32, ptr %ptr, i32 %idx1
3664 // %newgep = getelementptr i32, ptr %newptr, i32 idx2
3665 bool NUW = match(GEP.getOperand(1),
3667 GEPNoWrapFlags NWFlags = GetPreservedNoWrapFlags(NUW);
3668 auto *NewPtr = Builder.CreateGEP(
3669 GEP.getSourceElementType(), GEP.getPointerOperand(),
3670 Builder.CreateSExt(Idx1, GEP.getOperand(1)->getType()), "", NWFlags);
3671 return replaceInstUsesWith(
3672 GEP,
3673 Builder.CreateGEP(GEP.getSourceElementType(), NewPtr,
3674 Builder.CreateSExt(C, GEP.getOperand(1)->getType()),
3675 "", NWFlags));
3676 }
3677 }
3678
3680 return R;
3681
3682 // srem -> (and/urem) for inbounds+nuw GEP
3683 if (Indices.size() == 1 && GEP.isInBounds() && GEP.hasNoUnsignedWrap()) {
3684 Value *X, *Y;
3685
3686 // Match: idx = srem X, Y -- where Y is a power-of-two value.
3687 if (match(Indices[0], m_OneUse(m_SRem(m_Value(X), m_Value(Y)))) &&
3688 isKnownToBeAPowerOfTwo(Y, /*OrZero=*/true, &GEP)) {
3689 // If GEP is inbounds+nuw, the offset cannot be negative
3690 // -> srem by power-of-two can be treated as urem,
3691 // and urem by power-of-two folds to 'and' later.
3692 // OrZero=true is fine here because division by zero is UB.
3693 Instruction *OldIdxI = cast<Instruction>(Indices[0]);
3694 Value *NewIdx = Builder.CreateURem(X, Y, OldIdxI->getName());
3695
3696 return GetElementPtrInst::Create(GEPEltType, PtrOp, {NewIdx},
3697 GEP.getNoWrapFlags());
3698 }
3699 }
3700
3701 return nullptr;
3702}
3703
3705 Instruction *AI) {
3707 return true;
3708 if (auto *LI = dyn_cast<LoadInst>(V))
3709 return isa<GlobalVariable>(LI->getPointerOperand());
3710 // Two distinct allocations will never be equal.
3711 return isAllocLikeFn(V, &TLI) && V != AI;
3712}
3713
3714/// Given a call CB which uses an address UsedV, return true if we can prove the
3715/// call's only possible effect is storing to V.
3716static bool isRemovableWrite(CallBase &CB, Value *UsedV,
3717 const TargetLibraryInfo &TLI) {
3718 if (!CB.use_empty())
3719 // TODO: add recursion if returned attribute is present
3720 return false;
3721
3722 if (CB.isTerminator())
3723 // TODO: remove implementation restriction
3724 return false;
3725
3726 if (!CB.willReturn() || !CB.doesNotThrow())
3727 return false;
3728
3729 // If the only possible side effect of the call is writing to the alloca,
3730 // and the result isn't used, we can safely remove any reads implied by the
3731 // call including those which might read the alloca itself.
3732 std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(&CB, TLI);
3733 return Dest && Dest->Ptr == UsedV;
3734}
3735
3736static std::optional<ModRefInfo>
3738 const TargetLibraryInfo &TLI, bool KnowInit) {
3740 const std::optional<StringRef> Family = getAllocationFamily(AI, &TLI);
3741 Worklist.push_back(AI);
3743
3744 do {
3745 Instruction *PI = Worklist.pop_back_val();
3746 for (User *U : PI->users()) {
3748 switch (I->getOpcode()) {
3749 default:
3750 // Give up the moment we see something we can't handle.
3751 return std::nullopt;
3752
3753 case Instruction::AddrSpaceCast:
3754 case Instruction::BitCast:
3755 case Instruction::GetElementPtr:
3756 Users.emplace_back(I);
3757 Worklist.push_back(I);
3758 continue;
3759
3760 case Instruction::ICmp: {
3761 ICmpInst *ICI = cast<ICmpInst>(I);
3762 // We can fold eq/ne comparisons with null to false/true, respectively.
3763 // We also fold comparisons in some conditions provided the alloc has
3764 // not escaped (see isNeverEqualToUnescapedAlloc).
3765 if (!ICI->isEquality())
3766 return std::nullopt;
3767 unsigned OtherIndex = (ICI->getOperand(0) == PI) ? 1 : 0;
3768 if (!isNeverEqualToUnescapedAlloc(ICI->getOperand(OtherIndex), TLI, AI))
3769 return std::nullopt;
3770
3771 // Do not fold compares to aligned_alloc calls, as they may have to
3772 // return null in case the required alignment cannot be satisfied,
3773 // unless we can prove that both alignment and size are valid.
3774 auto AlignmentAndSizeKnownValid = [](CallBase *CB) {
3775 // Check if alignment and size of a call to aligned_alloc is valid,
3776 // that is alignment is a power-of-2 and the size is a multiple of the
3777 // alignment.
3778 const APInt *Alignment;
3779 const APInt *Size;
3780 return match(CB->getArgOperand(0), m_APInt(Alignment)) &&
3781 match(CB->getArgOperand(1), m_APInt(Size)) &&
3782 Alignment->isPowerOf2() && Size->urem(*Alignment).isZero();
3783 };
3784 auto *CB = dyn_cast<CallBase>(AI);
3785 LibFunc TheLibFunc;
3786 if (CB && TLI.getLibFunc(*CB->getCalledFunction(), TheLibFunc) &&
3787 TLI.has(TheLibFunc) && TheLibFunc == LibFunc_aligned_alloc &&
3788 !AlignmentAndSizeKnownValid(CB))
3789 return std::nullopt;
3790 Users.emplace_back(I);
3791 continue;
3792 }
3793
3794 case Instruction::Call:
3795 // Ignore no-op and store intrinsics.
3797 switch (II->getIntrinsicID()) {
3798 default:
3799 return std::nullopt;
3800
3801 case Intrinsic::memmove:
3802 case Intrinsic::memcpy:
3803 case Intrinsic::memset: {
3805 if (MI->isVolatile())
3806 return std::nullopt;
3807 // Note: this could also be ModRef, but we can still interpret that
3808 // as just Mod in that case.
3809 ModRefInfo NewAccess =
3810 MI->getRawDest() == PI ? ModRefInfo::Mod : ModRefInfo::Ref;
3811 if ((Access & ~NewAccess) != ModRefInfo::NoModRef)
3812 return std::nullopt;
3813 Access |= NewAccess;
3814 [[fallthrough]];
3815 }
3816 case Intrinsic::assume:
3817 case Intrinsic::invariant_start:
3818 case Intrinsic::invariant_end:
3819 case Intrinsic::lifetime_start:
3820 case Intrinsic::lifetime_end:
3821 case Intrinsic::objectsize:
3822 Users.emplace_back(I);
3823 continue;
3824 case Intrinsic::launder_invariant_group:
3825 case Intrinsic::strip_invariant_group:
3826 Users.emplace_back(I);
3827 Worklist.push_back(I);
3828 continue;
3829 }
3830 }
3831
3832 if (Family && getFreedOperand(cast<CallBase>(I), &TLI) == PI &&
3833 getAllocationFamily(I, &TLI) == Family) {
3834 Users.emplace_back(I);
3835 continue;
3836 }
3837
3838 if (Family && getReallocatedOperand(cast<CallBase>(I)) == PI &&
3839 getAllocationFamily(I, &TLI) == Family) {
3840 Users.emplace_back(I);
3841 Worklist.push_back(I);
3842 continue;
3843 }
3844
3845 if (!isRefSet(Access) &&
3846 isRemovableWrite(*cast<CallBase>(I), PI, TLI)) {
3848 Users.emplace_back(I);
3849 continue;
3850 }
3851
3852 return std::nullopt;
3853
3854 case Instruction::Store: {
3856 if (SI->isVolatile() || SI->getPointerOperand() != PI)
3857 return std::nullopt;
3858 if (isRefSet(Access))
3859 return std::nullopt;
3861 Users.emplace_back(I);
3862 continue;
3863 }
3864
3865 case Instruction::Load: {
3866 LoadInst *LI = cast<LoadInst>(I);
3867 if (LI->isVolatile() || LI->getPointerOperand() != PI)
3868 return std::nullopt;
3869 if (isModSet(Access))
3870 return std::nullopt;
3872 Users.emplace_back(I);
3873 continue;
3874 }
3875 }
3876 llvm_unreachable("missing a return?");
3877 }
3878 } while (!Worklist.empty());
3879
3881 return Access;
3882}
3883
3886
3887 // If we have a malloc call which is only used in any amount of comparisons to
3888 // null and free calls, delete the calls and replace the comparisons with true
3889 // or false as appropriate.
3890
3891 // This is based on the principle that we can substitute our own allocation
3892 // function (which will never return null) rather than knowledge of the
3893 // specific function being called. In some sense this can change the permitted
3894 // outputs of a program (when we convert a malloc to an alloca, the fact that
3895 // the allocation is now on the stack is potentially visible, for example),
3896 // but we believe in a permissible manner.
3898
3899 // If we are removing an alloca with a dbg.declare, insert dbg.value calls
3900 // before each store.
3902 std::unique_ptr<DIBuilder> DIB;
3903 if (isa<AllocaInst>(MI)) {
3904 findDbgUsers(&MI, DVRs);
3905 DIB.reset(new DIBuilder(*MI.getModule(), /*AllowUnresolved=*/false));
3906 }
3907
3908 // Determine what getInitialValueOfAllocation would return without actually
3909 // allocating the result.
3910 bool KnowInitUndef = false;
3911 bool KnowInitZero = false;
3912 Constant *Init =
3914 if (Init) {
3915 if (isa<UndefValue>(Init))
3916 KnowInitUndef = true;
3917 else if (Init->isNullValue())
3918 KnowInitZero = true;
3919 }
3920 // The various sanitizers don't actually return undef memory, but rather
3921 // memory initialized with special forms of runtime poison
3922 auto &F = *MI.getFunction();
3923 if (F.hasFnAttribute(Attribute::SanitizeMemory) ||
3924 F.hasFnAttribute(Attribute::SanitizeAddress))
3925 KnowInitUndef = false;
3926
3927 auto Removable =
3928 isAllocSiteRemovable(&MI, Users, TLI, KnowInitZero | KnowInitUndef);
3929 if (Removable) {
3930 for (WeakTrackingVH &User : Users) {
3931 // Lowering all @llvm.objectsize and MTI calls first because they may use
3932 // a bitcast/GEP of the alloca we are removing.
3933 if (!User)
3934 continue;
3935
3937
3939 if (II->getIntrinsicID() == Intrinsic::objectsize) {
3940 SmallVector<Instruction *> InsertedInstructions;
3941 Value *Result = lowerObjectSizeCall(
3942 II, DL, &TLI, AA, /*MustSucceed=*/true, &InsertedInstructions);
3943 for (Instruction *Inserted : InsertedInstructions)
3944 Worklist.add(Inserted);
3945 replaceInstUsesWith(*I, Result);
3947 User = nullptr; // Skip examining in the next loop.
3948 continue;
3949 }
3950 if (auto *MTI = dyn_cast<MemTransferInst>(I)) {
3951 if (KnowInitZero && isRefSet(*Removable)) {
3953 Builder.SetInsertPoint(MTI);
3954 auto *M = Builder.CreateMemSet(
3955 MTI->getRawDest(),
3956 ConstantInt::get(Type::getInt8Ty(MI.getContext()), 0),
3957 MTI->getLength(), MTI->getDestAlign());
3958 M->copyMetadata(*MTI);
3959 }
3960 }
3961 }
3962 }
3963 for (WeakTrackingVH &User : Users) {
3964 if (!User)
3965 continue;
3966
3968
3969 if (ICmpInst *C = dyn_cast<ICmpInst>(I)) {
3971 ConstantInt::get(Type::getInt1Ty(C->getContext()),
3972 C->isFalseWhenEqual()));
3973 } else if (auto *SI = dyn_cast<StoreInst>(I)) {
3974 for (auto *DVR : DVRs)
3975 if (DVR->isAddressOfVariable())
3977 } else {
3978 // Casts, GEP, or anything else: we're about to delete this instruction,
3979 // so it can not have any valid uses.
3981 if (isa<LoadInst>(I)) {
3982 assert(KnowInitZero || KnowInitUndef);
3983 Replace = KnowInitUndef ? UndefValue::get(I->getType())
3984 : Constant::getNullValue(I->getType());
3985 } else
3986 Replace = PoisonValue::get(I->getType());
3988 }
3990 }
3991
3993 // Replace invoke with a NOP intrinsic to maintain the original CFG
3994 Module *M = II->getModule();
3995 Function *F = Intrinsic::getOrInsertDeclaration(M, Intrinsic::donothing);
3996 auto *NewII = InvokeInst::Create(
3997 F, II->getNormalDest(), II->getUnwindDest(), {}, "", II->getParent());
3998 NewII->setDebugLoc(II->getDebugLoc());
3999 }
4000
4001 // Remove debug intrinsics which describe the value contained within the
4002 // alloca. In addition to removing dbg.{declare,addr} which simply point to
4003 // the alloca, remove dbg.value(<alloca>, ..., DW_OP_deref)'s as well, e.g.:
4004 //
4005 // ```
4006 // define void @foo(i32 %0) {
4007 // %a = alloca i32 ; Deleted.
4008 // store i32 %0, i32* %a
4009 // dbg.value(i32 %0, "arg0") ; Not deleted.
4010 // dbg.value(i32* %a, "arg0", DW_OP_deref) ; Deleted.
4011 // call void @trivially_inlinable_no_op(i32* %a)
4012 // ret void
4013 // }
4014 // ```
4015 //
4016 // This may not be required if we stop describing the contents of allocas
4017 // using dbg.value(<alloca>, ..., DW_OP_deref), but we currently do this in
4018 // the LowerDbgDeclare utility.
4019 //
4020 // If there is a dead store to `%a` in @trivially_inlinable_no_op, the
4021 // "arg0" dbg.value may be stale after the call. However, failing to remove
4022 // the DW_OP_deref dbg.value causes large gaps in location coverage.
4023 //
4024 // FIXME: the Assignment Tracking project has now likely made this
4025 // redundant (and it's sometimes harmful).
4026 for (auto *DVR : DVRs)
4027 if (DVR->isAddressOfVariable() || DVR->getExpression()->startsWithDeref())
4028 DVR->eraseFromParent();
4029
4030 return eraseInstFromFunction(MI);
4031 }
4032 return nullptr;
4033}
4034
4035/// Move the call to free before a NULL test.
4036///
4037/// Check if this free is accessed after its argument has been test
4038/// against NULL (property 0).
4039/// If yes, it is legal to move this call in its predecessor block.
4040///
4041/// The move is performed only if the block containing the call to free
4042/// will be removed, i.e.:
4043/// 1. it has only one predecessor P, and P has two successors
4044/// 2. it contains the call, noops, and an unconditional branch
4045/// 3. its successor is the same as its predecessor's successor
4046///
4047/// The profitability is out-of concern here and this function should
4048/// be called only if the caller knows this transformation would be
4049/// profitable (e.g., for code size).
4051 const DataLayout &DL) {
4052 Value *Op = FI.getArgOperand(0);
4053 BasicBlock *FreeInstrBB = FI.getParent();
4054 BasicBlock *PredBB = FreeInstrBB->getSinglePredecessor();
4055
4056 // Validate part of constraint #1: Only one predecessor
4057 // FIXME: We can extend the number of predecessor, but in that case, we
4058 // would duplicate the call to free in each predecessor and it may
4059 // not be profitable even for code size.
4060 if (!PredBB)
4061 return nullptr;
4062
4063 // Validate constraint #2: Does this block contains only the call to
4064 // free, noops, and an unconditional branch?
4065 BasicBlock *SuccBB;
4066 Instruction *FreeInstrBBTerminator = FreeInstrBB->getTerminator();
4067 if (!match(FreeInstrBBTerminator, m_UnconditionalBr(SuccBB)))
4068 return nullptr;
4069
4070 // If there are only 2 instructions in the block, at this point,
4071 // this is the call to free and unconditional.
4072 // If there are more than 2 instructions, check that they are noops
4073 // i.e., they won't hurt the performance of the generated code.
4074 if (FreeInstrBB->size() != 2) {
4075 for (const Instruction &Inst : FreeInstrBB->instructionsWithoutDebug()) {
4076 if (&Inst == &FI || &Inst == FreeInstrBBTerminator)
4077 continue;
4078 auto *Cast = dyn_cast<CastInst>(&Inst);
4079 if (!Cast || !Cast->isNoopCast(DL))
4080 return nullptr;
4081 }
4082 }
4083 // Validate the rest of constraint #1 by matching on the pred branch.
4084 Instruction *TI = PredBB->getTerminator();
4085 BasicBlock *TrueBB, *FalseBB;
4086 CmpPredicate Pred;
4087 if (!match(TI, m_Br(m_ICmp(Pred,
4089 m_Specific(Op->stripPointerCasts())),
4090 m_Zero()),
4091 TrueBB, FalseBB)))
4092 return nullptr;
4093 if (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)
4094 return nullptr;
4095
4096 // Validate constraint #3: Ensure the null case just falls through.
4097 if (SuccBB != (Pred == ICmpInst::ICMP_EQ ? TrueBB : FalseBB))
4098 return nullptr;
4099 assert(FreeInstrBB == (Pred == ICmpInst::ICMP_EQ ? FalseBB : TrueBB) &&
4100 "Broken CFG: missing edge from predecessor to successor");
4101
4102 // At this point, we know that everything in FreeInstrBB can be moved
4103 // before TI.
4104 for (Instruction &Instr : llvm::make_early_inc_range(*FreeInstrBB)) {
4105 if (&Instr == FreeInstrBBTerminator)
4106 break;
4107 Instr.moveBeforePreserving(TI->getIterator());
4108 }
4109 assert(FreeInstrBB->size() == 1 &&
4110 "Only the branch instruction should remain");
4111
4112 // Now that we've moved the call to free before the NULL check, we have to
4113 // remove any attributes on its parameter that imply it's non-null, because
4114 // those attributes might have only been valid because of the NULL check, and
4115 // we can get miscompiles if we keep them. This is conservative if non-null is
4116 // also implied by something other than the NULL check, but it's guaranteed to
4117 // be correct, and the conservativeness won't matter in practice, since the
4118 // attributes are irrelevant for the call to free itself and the pointer
4119 // shouldn't be used after the call.
4120 AttributeList Attrs = FI.getAttributes();
4121 Attrs = Attrs.removeParamAttribute(FI.getContext(), 0, Attribute::NonNull);
4122 Attribute Dereferenceable = Attrs.getParamAttr(0, Attribute::Dereferenceable);
4123 if (Dereferenceable.isValid()) {
4124 uint64_t Bytes = Dereferenceable.getDereferenceableBytes();
4125 Attrs = Attrs.removeParamAttribute(FI.getContext(), 0,
4126 Attribute::Dereferenceable);
4127 Attrs = Attrs.addDereferenceableOrNullParamAttr(FI.getContext(), 0, Bytes);
4128 }
4129 FI.setAttributes(Attrs);
4130
4131 return &FI;
4132}
4133
4135 // free undef -> unreachable.
4136 if (isa<UndefValue>(Op)) {
4137 // Leave a marker since we can't modify the CFG here.
4139 return eraseInstFromFunction(FI);
4140 }
4141
4142 // If we have 'free null' delete the instruction. This can happen in stl code
4143 // when lots of inlining happens.
4145 return eraseInstFromFunction(FI);
4146
4147 // If we had free(realloc(...)) with no intervening uses, then eliminate the
4148 // realloc() entirely.
4150 if (CI && CI->hasOneUse())
4151 if (Value *ReallocatedOp = getReallocatedOperand(CI))
4152 return eraseInstFromFunction(*replaceInstUsesWith(*CI, ReallocatedOp));
4153
4154 // If we optimize for code size, try to move the call to free before the null
4155 // test so that simplify cfg can remove the empty block and dead code
4156 // elimination the branch. I.e., helps to turn something like:
4157 // if (foo) free(foo);
4158 // into
4159 // free(foo);
4160 //
4161 // Note that we can only do this for 'free' and not for any flavor of
4162 // 'operator delete'; there is no 'operator delete' symbol for which we are
4163 // permitted to invent a call, even if we're passing in a null pointer.
4164 if (MinimizeSize) {
4165 LibFunc Func;
4166 if (TLI.getLibFunc(FI, Func) && TLI.has(Func) && Func == LibFunc_free)
4168 return I;
4169 }
4170
4171 return nullptr;
4172}
4173
4175 Value *RetVal = RI.getReturnValue();
4176 if (!RetVal)
4177 return nullptr;
4178
4179 Function *F = RI.getFunction();
4180 Type *RetTy = RetVal->getType();
4181 if (RetTy->isPointerTy()) {
4182 bool HasDereferenceable =
4183 F->getAttributes().getRetDereferenceableBytes() > 0;
4184 if (F->hasRetAttribute(Attribute::NonNull) ||
4185 (HasDereferenceable &&
4187 if (Value *V = simplifyNonNullOperand(RetVal, HasDereferenceable))
4188 return replaceOperand(RI, 0, V);
4189 }
4190 }
4191
4192 if (!AttributeFuncs::isNoFPClassCompatibleType(RetTy))
4193 return nullptr;
4194
4195 FPClassTest ReturnClass = F->getAttributes().getRetNoFPClass();
4196 if (ReturnClass == fcNone)
4197 return nullptr;
4198
4199 KnownFPClass KnownClass;
4200 if (SimplifyDemandedFPClass(&RI, 0, ~ReturnClass, KnownClass))
4201 return &RI;
4202
4203 return nullptr;
4204}
4205
4206// WARNING: keep in sync with SimplifyCFGOpt::simplifyUnreachable()!
4208 // Try to remove the previous instruction if it must lead to unreachable.
4209 // This includes instructions like stores and "llvm.assume" that may not get
4210 // removed by simple dead code elimination.
4211 bool Changed = false;
4212 while (Instruction *Prev = I.getPrevNode()) {
4213 // While we theoretically can erase EH, that would result in a block that
4214 // used to start with an EH no longer starting with EH, which is invalid.
4215 // To make it valid, we'd need to fixup predecessors to no longer refer to
4216 // this block, but that changes CFG, which is not allowed in InstCombine.
4217 if (Prev->isEHPad())
4218 break; // Can not drop any more instructions. We're done here.
4219
4221 break; // Can not drop any more instructions. We're done here.
4222 // Otherwise, this instruction can be freely erased,
4223 // even if it is not side-effect free.
4224
4225 // A value may still have uses before we process it here (for example, in
4226 // another unreachable block), so convert those to poison.
4227 replaceInstUsesWith(*Prev, PoisonValue::get(Prev->getType()));
4228 eraseInstFromFunction(*Prev);
4229 Changed = true;
4230 }
4231 return Changed;
4232}
4233
4238
4240 assert(BI.isUnconditional() && "Only for unconditional branches.");
4241
4242 // If this store is the second-to-last instruction in the basic block
4243 // (excluding debug info) and if the block ends with
4244 // an unconditional branch, try to move the store to the successor block.
4245
4246 auto GetLastSinkableStore = [](BasicBlock::iterator BBI) {
4247 BasicBlock::iterator FirstInstr = BBI->getParent()->begin();
4248 do {
4249 if (BBI != FirstInstr)
4250 --BBI;
4251 } while (BBI != FirstInstr && BBI->isDebugOrPseudoInst());
4252
4253 return dyn_cast<StoreInst>(BBI);
4254 };
4255
4256 if (StoreInst *SI = GetLastSinkableStore(BasicBlock::iterator(BI)))
4258 return &BI;
4259
4260 return nullptr;
4261}
4262
4265 if (!DeadEdges.insert({From, To}).second)
4266 return;
4267
4268 // Replace phi node operands in successor with poison.
4269 for (PHINode &PN : To->phis())
4270 for (Use &U : PN.incoming_values())
4271 if (PN.getIncomingBlock(U) == From && !isa<PoisonValue>(U)) {
4272 replaceUse(U, PoisonValue::get(PN.getType()));
4273 addToWorklist(&PN);
4274 MadeIRChange = true;
4275 }
4276
4277 Worklist.push_back(To);
4278}
4279
4280// Under the assumption that I is unreachable, remove it and following
4281// instructions. Changes are reported directly to MadeIRChange.
4284 BasicBlock *BB = I->getParent();
4285 for (Instruction &Inst : make_early_inc_range(
4286 make_range(std::next(BB->getTerminator()->getReverseIterator()),
4287 std::next(I->getReverseIterator())))) {
4288 if (!Inst.use_empty() && !Inst.getType()->isTokenTy()) {
4289 replaceInstUsesWith(Inst, PoisonValue::get(Inst.getType()));
4290 MadeIRChange = true;
4291 }
4292 if (Inst.isEHPad() || Inst.getType()->isTokenTy())
4293 continue;
4294 // RemoveDIs: erase debug-info on this instruction manually.
4295 Inst.dropDbgRecords();
4297 MadeIRChange = true;
4298 }
4299
4302 MadeIRChange = true;
4303 for (Value *V : Changed)
4305 }
4306
4307 // Handle potentially dead successors.
4308 for (BasicBlock *Succ : successors(BB))
4309 addDeadEdge(BB, Succ, Worklist);
4310}
4311
4314 while (!Worklist.empty()) {
4315 BasicBlock *BB = Worklist.pop_back_val();
4316 if (!all_of(predecessors(BB), [&](BasicBlock *Pred) {
4317 return DeadEdges.contains({Pred, BB}) || DT.dominates(BB, Pred);
4318 }))
4319 continue;
4320
4322 }
4323}
4324
4326 BasicBlock *LiveSucc) {
4328 for (BasicBlock *Succ : successors(BB)) {
4329 // The live successor isn't dead.
4330 if (Succ == LiveSucc)
4331 continue;
4332
4333 addDeadEdge(BB, Succ, Worklist);
4334 }
4335
4337}
4338
4340 if (BI.isUnconditional())
4342
4343 // Change br (not X), label True, label False to: br X, label False, True
4344 Value *Cond = BI.getCondition();
4345 Value *X;
4346 if (match(Cond, m_Not(m_Value(X))) && !isa<Constant>(X)) {
4347 // Swap Destinations and condition...
4348 BI.swapSuccessors();
4349 if (BPI)
4350 BPI->swapSuccEdgesProbabilities(BI.getParent());
4351 return replaceOperand(BI, 0, X);
4352 }
4353
4354 // Canonicalize logical-and-with-invert as logical-or-with-invert.
4355 // This is done by inverting the condition and swapping successors:
4356 // br (X && !Y), T, F --> br !(X && !Y), F, T --> br (!X || Y), F, T
4357 Value *Y;
4358 if (isa<SelectInst>(Cond) &&
4359 match(Cond,
4361 Value *NotX = Builder.CreateNot(X, "not." + X->getName());
4362 Value *Or = Builder.CreateLogicalOr(NotX, Y);
4363
4364 // Set weights for the new OR select instruction too.
4366 if (auto *OrInst = dyn_cast<Instruction>(Or)) {
4367 if (auto *CondInst = dyn_cast<Instruction>(Cond)) {
4368 SmallVector<uint32_t> Weights;
4369 if (extractBranchWeights(*CondInst, Weights)) {
4370 assert(Weights.size() == 2 &&
4371 "Unexpected number of branch weights!");
4372 std::swap(Weights[0], Weights[1]);
4373 setBranchWeights(*OrInst, Weights, /*IsExpected=*/false);
4374 }
4375 }
4376 }
4377 }
4378 BI.swapSuccessors();
4379 if (BPI)
4380 BPI->swapSuccEdgesProbabilities(BI.getParent());
4381 return replaceOperand(BI, 0, Or);
4382 }
4383
4384 // If the condition is irrelevant, remove the use so that other
4385 // transforms on the condition become more effective.
4386 if (!isa<ConstantInt>(Cond) && BI.getSuccessor(0) == BI.getSuccessor(1))
4387 return replaceOperand(BI, 0, ConstantInt::getFalse(Cond->getType()));
4388
4389 // Canonicalize, for example, fcmp_one -> fcmp_oeq.
4390 CmpPredicate Pred;
4391 if (match(Cond, m_OneUse(m_FCmp(Pred, m_Value(), m_Value()))) &&
4392 !isCanonicalPredicate(Pred)) {
4393 // Swap destinations and condition.
4394 auto *Cmp = cast<CmpInst>(Cond);
4395 Cmp->setPredicate(CmpInst::getInversePredicate(Pred));
4396 BI.swapSuccessors();
4397 if (BPI)
4398 BPI->swapSuccEdgesProbabilities(BI.getParent());
4399 Worklist.push(Cmp);
4400 return &BI;
4401 }
4402
4403 if (isa<UndefValue>(Cond)) {
4404 handlePotentiallyDeadSuccessors(BI.getParent(), /*LiveSucc*/ nullptr);
4405 return nullptr;
4406 }
4407 if (auto *CI = dyn_cast<ConstantInt>(Cond)) {
4409 BI.getSuccessor(!CI->getZExtValue()));
4410 return nullptr;
4411 }
4412
4413 // Replace all dominated uses of the condition with true/false
4414 // Ignore constant expressions to avoid iterating over uses on other
4415 // functions.
4416 if (!isa<Constant>(Cond) && BI.getSuccessor(0) != BI.getSuccessor(1)) {
4417 for (auto &U : make_early_inc_range(Cond->uses())) {
4418 BasicBlockEdge Edge0(BI.getParent(), BI.getSuccessor(0));
4419 if (DT.dominates(Edge0, U)) {
4420 replaceUse(U, ConstantInt::getTrue(Cond->getType()));
4421 addToWorklist(cast<Instruction>(U.getUser()));
4422 continue;
4423 }
4424 BasicBlockEdge Edge1(BI.getParent(), BI.getSuccessor(1));
4425 if (DT.dominates(Edge1, U)) {
4426 replaceUse(U, ConstantInt::getFalse(Cond->getType()));
4427 addToWorklist(cast<Instruction>(U.getUser()));
4428 }
4429 }
4430 }
4431
4432 DC.registerBranch(&BI);
4433 return nullptr;
4434}
4435
4436// Replaces (switch (select cond, X, C)/(select cond, C, X)) with (switch X) if
4437// we can prove that both (switch C) and (switch X) go to the default when cond
4438// is false/true.
4441 bool IsTrueArm) {
4442 unsigned CstOpIdx = IsTrueArm ? 1 : 2;
4443 auto *C = dyn_cast<ConstantInt>(Select->getOperand(CstOpIdx));
4444 if (!C)
4445 return nullptr;
4446
4447 BasicBlock *CstBB = SI.findCaseValue(C)->getCaseSuccessor();
4448 if (CstBB != SI.getDefaultDest())
4449 return nullptr;
4450 Value *X = Select->getOperand(3 - CstOpIdx);
4451 CmpPredicate Pred;
4452 const APInt *RHSC;
4453 if (!match(Select->getCondition(),
4454 m_ICmp(Pred, m_Specific(X), m_APInt(RHSC))))
4455 return nullptr;
4456 if (IsTrueArm)
4457 Pred = ICmpInst::getInversePredicate(Pred);
4458
4459 // See whether we can replace the select with X
4461 for (auto Case : SI.cases())
4462 if (!CR.contains(Case.getCaseValue()->getValue()))
4463 return nullptr;
4464
4465 return X;
4466}
4467
4469 Value *Cond = SI.getCondition();
4470 Value *Op0;
4471 const APInt *CondOpC;
4472 using InvertFn = std::function<APInt(const APInt &Case, const APInt &C)>;
4473
4474 auto MaybeInvertible = [&](Value *Cond) -> InvertFn {
4475 if (match(Cond, m_Add(m_Value(Op0), m_APInt(CondOpC))))
4476 // Change 'switch (X+C) case Case:' into 'switch (X) case Case-C'.
4477 return [](const APInt &Case, const APInt &C) { return Case - C; };
4478
4479 if (match(Cond, m_Sub(m_APInt(CondOpC), m_Value(Op0))))
4480 // Change 'switch (C-X) case Case:' into 'switch (X) case C-Case'.
4481 return [](const APInt &Case, const APInt &C) { return C - Case; };
4482
4483 if (match(Cond, m_Xor(m_Value(Op0), m_APInt(CondOpC))) &&
4484 !CondOpC->isMinSignedValue() && !CondOpC->isMaxSignedValue())
4485 // Change 'switch (X^C) case Case:' into 'switch (X) case Case^C'.
4486 // Prevent creation of large case values by excluding extremes.
4487 return [](const APInt &Case, const APInt &C) { return Case ^ C; };
4488
4489 return nullptr;
4490 };
4491
4492 // Attempt to invert and simplify the switch condition, as long as the
4493 // condition is not used further, as it may not be profitable otherwise.
4494 if (auto InvertFn = MaybeInvertible(Cond); InvertFn && Cond->hasOneUse()) {
4495 for (auto &Case : SI.cases()) {
4496 const APInt &New = InvertFn(Case.getCaseValue()->getValue(), *CondOpC);
4497 Case.setValue(ConstantInt::get(SI.getContext(), New));
4498 }
4499 return replaceOperand(SI, 0, Op0);
4500 }
4501
4502 uint64_t ShiftAmt;
4503 if (match(Cond, m_Shl(m_Value(Op0), m_ConstantInt(ShiftAmt))) &&
4504 ShiftAmt < Op0->getType()->getScalarSizeInBits() &&
4505 all_of(SI.cases(), [&](const auto &Case) {
4506 return Case.getCaseValue()->getValue().countr_zero() >= ShiftAmt;
4507 })) {
4508 // Change 'switch (X << 2) case 4:' into 'switch (X) case 1:'.
4510 if (Shl->hasNoUnsignedWrap() || Shl->hasNoSignedWrap() ||
4511 Shl->hasOneUse()) {
4512 Value *NewCond = Op0;
4513 if (!Shl->hasNoUnsignedWrap() && !Shl->hasNoSignedWrap()) {
4514 // If the shift may wrap, we need to mask off the shifted bits.
4515 unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
4516 NewCond = Builder.CreateAnd(
4517 Op0, APInt::getLowBitsSet(BitWidth, BitWidth - ShiftAmt));
4518 }
4519 for (auto Case : SI.cases()) {
4520 const APInt &CaseVal = Case.getCaseValue()->getValue();
4521 APInt ShiftedCase = Shl->hasNoSignedWrap() ? CaseVal.ashr(ShiftAmt)
4522 : CaseVal.lshr(ShiftAmt);
4523 Case.setValue(ConstantInt::get(SI.getContext(), ShiftedCase));
4524 }
4525 return replaceOperand(SI, 0, NewCond);
4526 }
4527 }
4528
4529 // Fold switch(zext/sext(X)) into switch(X) if possible.
4530 if (match(Cond, m_ZExtOrSExt(m_Value(Op0)))) {
4531 bool IsZExt = isa<ZExtInst>(Cond);
4532 Type *SrcTy = Op0->getType();
4533 unsigned NewWidth = SrcTy->getScalarSizeInBits();
4534
4535 if (all_of(SI.cases(), [&](const auto &Case) {
4536 const APInt &CaseVal = Case.getCaseValue()->getValue();
4537 return IsZExt ? CaseVal.isIntN(NewWidth)
4538 : CaseVal.isSignedIntN(NewWidth);
4539 })) {
4540 for (auto &Case : SI.cases()) {
4541 APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);
4542 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
4543 }
4544 return replaceOperand(SI, 0, Op0);
4545 }
4546 }
4547
4548 // Fold switch(select cond, X, Y) into switch(X/Y) if possible
4549 if (auto *Select = dyn_cast<SelectInst>(Cond)) {
4550 if (Value *V =
4551 simplifySwitchOnSelectUsingRanges(SI, Select, /*IsTrueArm=*/true))
4552 return replaceOperand(SI, 0, V);
4553 if (Value *V =
4554 simplifySwitchOnSelectUsingRanges(SI, Select, /*IsTrueArm=*/false))
4555 return replaceOperand(SI, 0, V);
4556 }
4557
4558 KnownBits Known = computeKnownBits(Cond, &SI);
4559 unsigned LeadingKnownZeros = Known.countMinLeadingZeros();
4560 unsigned LeadingKnownOnes = Known.countMinLeadingOnes();
4561
4562 // Compute the number of leading bits we can ignore.
4563 // TODO: A better way to determine this would use ComputeNumSignBits().
4564 for (const auto &C : SI.cases()) {
4565 LeadingKnownZeros =
4566 std::min(LeadingKnownZeros, C.getCaseValue()->getValue().countl_zero());
4567 LeadingKnownOnes =
4568 std::min(LeadingKnownOnes, C.getCaseValue()->getValue().countl_one());
4569 }
4570
4571 unsigned NewWidth = Known.getBitWidth() - std::max(LeadingKnownZeros, LeadingKnownOnes);
4572
4573 // Shrink the condition operand if the new type is smaller than the old type.
4574 // But do not shrink to a non-standard type, because backend can't generate
4575 // good code for that yet.
4576 // TODO: We can make it aggressive again after fixing PR39569.
4577 if (NewWidth > 0 && NewWidth < Known.getBitWidth() &&
4578 shouldChangeType(Known.getBitWidth(), NewWidth)) {
4579 IntegerType *Ty = IntegerType::get(SI.getContext(), NewWidth);
4580 Builder.SetInsertPoint(&SI);
4581 Value *NewCond = Builder.CreateTrunc(Cond, Ty, "trunc");
4582
4583 for (auto Case : SI.cases()) {
4584 APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);
4585 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
4586 }
4587 return replaceOperand(SI, 0, NewCond);
4588 }
4589
4590 if (isa<UndefValue>(Cond)) {
4591 handlePotentiallyDeadSuccessors(SI.getParent(), /*LiveSucc*/ nullptr);
4592 return nullptr;
4593 }
4594 if (auto *CI = dyn_cast<ConstantInt>(Cond)) {
4596 SI.findCaseValue(CI)->getCaseSuccessor());
4597 return nullptr;
4598 }
4599
4600 return nullptr;
4601}
4602
4604InstCombinerImpl::foldExtractOfOverflowIntrinsic(ExtractValueInst &EV) {
4606 if (!WO)
4607 return nullptr;
4608
4609 Intrinsic::ID OvID = WO->getIntrinsicID();
4610 const APInt *C = nullptr;
4611 if (match(WO->getRHS(), m_APIntAllowPoison(C))) {
4612 if (*EV.idx_begin() == 0 && (OvID == Intrinsic::smul_with_overflow ||
4613 OvID == Intrinsic::umul_with_overflow)) {
4614 // extractvalue (any_mul_with_overflow X, -1), 0 --> -X
4615 if (C->isAllOnes())
4616 return BinaryOperator::CreateNeg(WO->getLHS());
4617 // extractvalue (any_mul_with_overflow X, 2^n), 0 --> X << n
4618 if (C->isPowerOf2()) {
4619 return BinaryOperator::CreateShl(
4620 WO->getLHS(),
4621 ConstantInt::get(WO->getLHS()->getType(), C->logBase2()));
4622 }
4623 }
4624 }
4625
4626 // We're extracting from an overflow intrinsic. See if we're the only user.
4627 // That allows us to simplify multiple result intrinsics to simpler things
4628 // that just get one value.
4629 if (!WO->hasOneUse())
4630 return nullptr;
4631
4632 // Check if we're grabbing only the result of a 'with overflow' intrinsic
4633 // and replace it with a traditional binary instruction.
4634 if (*EV.idx_begin() == 0) {
4635 Instruction::BinaryOps BinOp = WO->getBinaryOp();
4636 Value *LHS = WO->getLHS(), *RHS = WO->getRHS();
4637 // Replace the old instruction's uses with poison.
4638 replaceInstUsesWith(*WO, PoisonValue::get(WO->getType()));
4640 return BinaryOperator::Create(BinOp, LHS, RHS);
4641 }
4642
4643 assert(*EV.idx_begin() == 1 && "Unexpected extract index for overflow inst");
4644
4645 // (usub LHS, RHS) overflows when LHS is unsigned-less-than RHS.
4646 if (OvID == Intrinsic::usub_with_overflow)
4647 return new ICmpInst(ICmpInst::ICMP_ULT, WO->getLHS(), WO->getRHS());
4648
4649 // smul with i1 types overflows when both sides are set: -1 * -1 == +1, but
4650 // +1 is not possible because we assume signed values.
4651 if (OvID == Intrinsic::smul_with_overflow &&
4652 WO->getLHS()->getType()->isIntOrIntVectorTy(1))
4653 return BinaryOperator::CreateAnd(WO->getLHS(), WO->getRHS());
4654
4655 // extractvalue (umul_with_overflow X, X), 1 -> X u> 2^(N/2)-1
4656 if (OvID == Intrinsic::umul_with_overflow && WO->getLHS() == WO->getRHS()) {
4657 unsigned BitWidth = WO->getLHS()->getType()->getScalarSizeInBits();
4658 // Only handle even bitwidths for performance reasons.
4659 if (BitWidth % 2 == 0)
4660 return new ICmpInst(
4661 ICmpInst::ICMP_UGT, WO->getLHS(),
4662 ConstantInt::get(WO->getLHS()->getType(),
4664 }
4665
4666 // If only the overflow result is used, and the right hand side is a
4667 // constant (or constant splat), we can remove the intrinsic by directly
4668 // checking for overflow.
4669 if (C) {
4670 // Compute the no-wrap range for LHS given RHS=C, then construct an
4671 // equivalent icmp, potentially using an offset.
4672 ConstantRange NWR = ConstantRange::makeExactNoWrapRegion(
4673 WO->getBinaryOp(), *C, WO->getNoWrapKind());
4674
4675 CmpInst::Predicate Pred;
4676 APInt NewRHSC, Offset;
4677 NWR.getEquivalentICmp(Pred, NewRHSC, Offset);
4678 auto *OpTy = WO->getRHS()->getType();
4679 auto *NewLHS = WO->getLHS();
4680 if (Offset != 0)
4681 NewLHS = Builder.CreateAdd(NewLHS, ConstantInt::get(OpTy, Offset));
4682 return new ICmpInst(ICmpInst::getInversePredicate(Pred), NewLHS,
4683 ConstantInt::get(OpTy, NewRHSC));
4684 }
4685
4686 return nullptr;
4687}
4688
4691 InstCombiner::BuilderTy &Builder) {
4692 // Helper to fold frexp of select to select of frexp.
4693
4694 if (!SelectInst->hasOneUse() || !FrexpCall->hasOneUse())
4695 return nullptr;
4697 Value *TrueVal = SelectInst->getTrueValue();
4698 Value *FalseVal = SelectInst->getFalseValue();
4699
4700 const APFloat *ConstVal = nullptr;
4701 Value *VarOp = nullptr;
4702 bool ConstIsTrue = false;
4703
4704 if (match(TrueVal, m_APFloat(ConstVal))) {
4705 VarOp = FalseVal;
4706 ConstIsTrue = true;
4707 } else if (match(FalseVal, m_APFloat(ConstVal))) {
4708 VarOp = TrueVal;
4709 ConstIsTrue = false;
4710 } else {
4711 return nullptr;
4712 }
4713
4714 Builder.SetInsertPoint(&EV);
4715
4716 CallInst *NewFrexp =
4717 Builder.CreateCall(FrexpCall->getCalledFunction(), {VarOp}, "frexp");
4718 NewFrexp->copyIRFlags(FrexpCall);
4719
4720 Value *NewEV = Builder.CreateExtractValue(NewFrexp, 0, "mantissa");
4721
4722 int Exp;
4723 APFloat Mantissa = frexp(*ConstVal, Exp, APFloat::rmNearestTiesToEven);
4724
4725 Constant *ConstantMantissa = ConstantFP::get(TrueVal->getType(), Mantissa);
4726
4727 Value *NewSel = Builder.CreateSelectFMF(
4728 Cond, ConstIsTrue ? ConstantMantissa : NewEV,
4729 ConstIsTrue ? NewEV : ConstantMantissa, SelectInst, "select.frexp");
4730 return NewSel;
4731}
4733 Value *Agg = EV.getAggregateOperand();
4734
4735 if (!EV.hasIndices())
4736 return replaceInstUsesWith(EV, Agg);
4737
4738 if (Value *V = simplifyExtractValueInst(Agg, EV.getIndices(),
4739 SQ.getWithInstruction(&EV)))
4740 return replaceInstUsesWith(EV, V);
4741
4742 Value *Cond, *TrueVal, *FalseVal;
4744 m_Value(Cond), m_Value(TrueVal), m_Value(FalseVal)))))) {
4745 auto *SelInst =
4746 cast<SelectInst>(cast<IntrinsicInst>(Agg)->getArgOperand(0));
4747 if (Value *Result =
4748 foldFrexpOfSelect(EV, cast<IntrinsicInst>(Agg), SelInst, Builder))
4749 return replaceInstUsesWith(EV, Result);
4750 }
4752 // We're extracting from an insertvalue instruction, compare the indices
4753 const unsigned *exti, *exte, *insi, *inse;
4754 for (exti = EV.idx_begin(), insi = IV->idx_begin(),
4755 exte = EV.idx_end(), inse = IV->idx_end();
4756 exti != exte && insi != inse;
4757 ++exti, ++insi) {
4758 if (*insi != *exti)
4759 // The insert and extract both reference distinctly different elements.
4760 // This means the extract is not influenced by the insert, and we can
4761 // replace the aggregate operand of the extract with the aggregate
4762 // operand of the insert. i.e., replace
4763 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
4764 // %E = extractvalue { i32, { i32 } } %I, 0
4765 // with
4766 // %E = extractvalue { i32, { i32 } } %A, 0
4767 return ExtractValueInst::Create(IV->getAggregateOperand(),
4768 EV.getIndices());
4769 }
4770 if (exti == exte && insi == inse)
4771 // Both iterators are at the end: Index lists are identical. Replace
4772 // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
4773 // %C = extractvalue { i32, { i32 } } %B, 1, 0
4774 // with "i32 42"
4775 return replaceInstUsesWith(EV, IV->getInsertedValueOperand());
4776 if (exti == exte) {
4777 // The extract list is a prefix of the insert list. i.e. replace
4778 // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
4779 // %E = extractvalue { i32, { i32 } } %I, 1
4780 // with
4781 // %X = extractvalue { i32, { i32 } } %A, 1
4782 // %E = insertvalue { i32 } %X, i32 42, 0
4783 // by switching the order of the insert and extract (though the
4784 // insertvalue should be left in, since it may have other uses).
4785 Value *NewEV = Builder.CreateExtractValue(IV->getAggregateOperand(),
4786 EV.getIndices());
4787 return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
4788 ArrayRef(insi, inse));
4789 }
4790 if (insi == inse)
4791 // The insert list is a prefix of the extract list
4792 // We can simply remove the common indices from the extract and make it
4793 // operate on the inserted value instead of the insertvalue result.
4794 // i.e., replace
4795 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
4796 // %E = extractvalue { i32, { i32 } } %I, 1, 0
4797 // with
4798 // %E extractvalue { i32 } { i32 42 }, 0
4799 return ExtractValueInst::Create(IV->getInsertedValueOperand(),
4800 ArrayRef(exti, exte));
4801 }
4802
4803 if (Instruction *R = foldExtractOfOverflowIntrinsic(EV))
4804 return R;
4805
4806 if (LoadInst *L = dyn_cast<LoadInst>(Agg)) {
4807 // Bail out if the aggregate contains scalable vector type
4808 if (auto *STy = dyn_cast<StructType>(Agg->getType());
4809 STy && STy->isScalableTy())
4810 return nullptr;
4811
4812 // If the (non-volatile) load only has one use, we can rewrite this to a
4813 // load from a GEP. This reduces the size of the load. If a load is used
4814 // only by extractvalue instructions then this either must have been
4815 // optimized before, or it is a struct with padding, in which case we
4816 // don't want to do the transformation as it loses padding knowledge.
4817 if (L->isSimple() && L->hasOneUse()) {
4818 // extractvalue has integer indices, getelementptr has Value*s. Convert.
4819 SmallVector<Value*, 4> Indices;
4820 // Prefix an i32 0 since we need the first element.
4821 Indices.push_back(Builder.getInt32(0));
4822 for (unsigned Idx : EV.indices())
4823 Indices.push_back(Builder.getInt32(Idx));
4824
4825 // We need to insert these at the location of the old load, not at that of
4826 // the extractvalue.
4827 Builder.SetInsertPoint(L);
4828 Value *GEP = Builder.CreateInBoundsGEP(L->getType(),
4829 L->getPointerOperand(), Indices);
4830 Instruction *NL = Builder.CreateLoad(EV.getType(), GEP);
4831 // Whatever aliasing information we had for the orignal load must also
4832 // hold for the smaller load, so propagate the annotations.
4833 NL->setAAMetadata(L->getAAMetadata());
4834 // Returning the load directly will cause the main loop to insert it in
4835 // the wrong spot, so use replaceInstUsesWith().
4836 return replaceInstUsesWith(EV, NL);
4837 }
4838 }
4839
4840 if (auto *PN = dyn_cast<PHINode>(Agg))
4841 if (Instruction *Res = foldOpIntoPhi(EV, PN))
4842 return Res;
4843
4844 // Canonicalize extract (select Cond, TV, FV)
4845 // -> select cond, (extract TV), (extract FV)
4846 if (auto *SI = dyn_cast<SelectInst>(Agg))
4847 if (Instruction *R = FoldOpIntoSelect(EV, SI, /*FoldWithMultiUse=*/true))
4848 return R;
4849
4850 // We could simplify extracts from other values. Note that nested extracts may
4851 // already be simplified implicitly by the above: extract (extract (insert) )
4852 // will be translated into extract ( insert ( extract ) ) first and then just
4853 // the value inserted, if appropriate. Similarly for extracts from single-use
4854 // loads: extract (extract (load)) will be translated to extract (load (gep))
4855 // and if again single-use then via load (gep (gep)) to load (gep).
4856 // However, double extracts from e.g. function arguments or return values
4857 // aren't handled yet.
4858 return nullptr;
4859}
4860
4861/// Return 'true' if the given typeinfo will match anything.
4862static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo) {
4863 switch (Personality) {
4867 // The GCC C EH and Rust personality only exists to support cleanups, so
4868 // it's not clear what the semantics of catch clauses are.
4869 return false;
4871 return false;
4873 // While __gnat_all_others_value will match any Ada exception, it doesn't
4874 // match foreign exceptions (or didn't, before gcc-4.7).
4875 return false;
4886 return TypeInfo->isNullValue();
4887 }
4888 llvm_unreachable("invalid enum");
4889}
4890
4891static bool shorter_filter(const Value *LHS, const Value *RHS) {
4892 return
4893 cast<ArrayType>(LHS->getType())->getNumElements()
4894 <
4895 cast<ArrayType>(RHS->getType())->getNumElements();
4896}
4897
4899 // The logic here should be correct for any real-world personality function.
4900 // However if that turns out not to be true, the offending logic can always
4901 // be conditioned on the personality function, like the catch-all logic is.
4902 EHPersonality Personality =
4903 classifyEHPersonality(LI.getParent()->getParent()->getPersonalityFn());
4904
4905 // Simplify the list of clauses, eg by removing repeated catch clauses
4906 // (these are often created by inlining).
4907 bool MakeNewInstruction = false; // If true, recreate using the following:
4908 SmallVector<Constant *, 16> NewClauses; // - Clauses for the new instruction;
4909 bool CleanupFlag = LI.isCleanup(); // - The new instruction is a cleanup.
4910
4911 SmallPtrSet<Value *, 16> AlreadyCaught; // Typeinfos known caught already.
4912 for (unsigned i = 0, e = LI.getNumClauses(); i != e; ++i) {
4913 bool isLastClause = i + 1 == e;
4914 if (LI.isCatch(i)) {
4915 // A catch clause.
4916 Constant *CatchClause = LI.getClause(i);
4917 Constant *TypeInfo = CatchClause->stripPointerCasts();
4918
4919 // If we already saw this clause, there is no point in having a second
4920 // copy of it.
4921 if (AlreadyCaught.insert(TypeInfo).second) {
4922 // This catch clause was not already seen.
4923 NewClauses.push_back(CatchClause);
4924 } else {
4925 // Repeated catch clause - drop the redundant copy.
4926 MakeNewInstruction = true;
4927 }
4928
4929 // If this is a catch-all then there is no point in keeping any following
4930 // clauses or marking the landingpad as having a cleanup.
4931 if (isCatchAll(Personality, TypeInfo)) {
4932 if (!isLastClause)
4933 MakeNewInstruction = true;
4934 CleanupFlag = false;
4935 break;
4936 }
4937 } else {
4938 // A filter clause. If any of the filter elements were already caught
4939 // then they can be dropped from the filter. It is tempting to try to
4940 // exploit the filter further by saying that any typeinfo that does not
4941 // occur in the filter can't be caught later (and thus can be dropped).
4942 // However this would be wrong, since typeinfos can match without being
4943 // equal (for example if one represents a C++ class, and the other some
4944 // class derived from it).
4945 assert(LI.isFilter(i) && "Unsupported landingpad clause!");
4946 Constant *FilterClause = LI.getClause(i);
4947 ArrayType *FilterType = cast<ArrayType>(FilterClause->getType());
4948 unsigned NumTypeInfos = FilterType->getNumElements();
4949
4950 // An empty filter catches everything, so there is no point in keeping any
4951 // following clauses or marking the landingpad as having a cleanup. By
4952 // dealing with this case here the following code is made a bit simpler.
4953 if (!NumTypeInfos) {
4954 NewClauses.push_back(FilterClause);
4955 if (!isLastClause)
4956 MakeNewInstruction = true;
4957 CleanupFlag = false;
4958 break;
4959 }
4960
4961 bool MakeNewFilter = false; // If true, make a new filter.
4962 SmallVector<Constant *, 16> NewFilterElts; // New elements.
4963 if (isa<ConstantAggregateZero>(FilterClause)) {
4964 // Not an empty filter - it contains at least one null typeinfo.
4965 assert(NumTypeInfos > 0 && "Should have handled empty filter already!");
4966 Constant *TypeInfo =
4968 // If this typeinfo is a catch-all then the filter can never match.
4969 if (isCatchAll(Personality, TypeInfo)) {
4970 // Throw the filter away.
4971 MakeNewInstruction = true;
4972 continue;
4973 }
4974
4975 // There is no point in having multiple copies of this typeinfo, so
4976 // discard all but the first copy if there is more than one.
4977 NewFilterElts.push_back(TypeInfo);
4978 if (NumTypeInfos > 1)
4979 MakeNewFilter = true;
4980 } else {
4981 ConstantArray *Filter = cast<ConstantArray>(FilterClause);
4982 SmallPtrSet<Value *, 16> SeenInFilter; // For uniquing the elements.
4983 NewFilterElts.reserve(NumTypeInfos);
4984
4985 // Remove any filter elements that were already caught or that already
4986 // occurred in the filter. While there, see if any of the elements are
4987 // catch-alls. If so, the filter can be discarded.
4988 bool SawCatchAll = false;
4989 for (unsigned j = 0; j != NumTypeInfos; ++j) {
4990 Constant *Elt = Filter->getOperand(j);
4991 Constant *TypeInfo = Elt->stripPointerCasts();
4992 if (isCatchAll(Personality, TypeInfo)) {
4993 // This element is a catch-all. Bail out, noting this fact.
4994 SawCatchAll = true;
4995 break;
4996 }
4997
4998 // Even if we've seen a type in a catch clause, we don't want to
4999 // remove it from the filter. An unexpected type handler may be
5000 // set up for a call site which throws an exception of the same
5001 // type caught. In order for the exception thrown by the unexpected
5002 // handler to propagate correctly, the filter must be correctly
5003 // described for the call site.
5004 //
5005 // Example:
5006 //
5007 // void unexpected() { throw 1;}
5008 // void foo() throw (int) {
5009 // std::set_unexpected(unexpected);
5010 // try {
5011 // throw 2.0;
5012 // } catch (int i) {}
5013 // }
5014
5015 // There is no point in having multiple copies of the same typeinfo in
5016 // a filter, so only add it if we didn't already.
5017 if (SeenInFilter.insert(TypeInfo).second)
5018 NewFilterElts.push_back(cast<Constant>(Elt));
5019 }
5020 // A filter containing a catch-all cannot match anything by definition.
5021 if (SawCatchAll) {
5022 // Throw the filter away.
5023 MakeNewInstruction = true;
5024 continue;
5025 }
5026
5027 // If we dropped something from the filter, make a new one.
5028 if (NewFilterElts.size() < NumTypeInfos)
5029 MakeNewFilter = true;
5030 }
5031 if (MakeNewFilter) {
5032 FilterType = ArrayType::get(FilterType->getElementType(),
5033 NewFilterElts.size());
5034 FilterClause = ConstantArray::get(FilterType, NewFilterElts);
5035 MakeNewInstruction = true;
5036 }
5037
5038 NewClauses.push_back(FilterClause);
5039
5040 // If the new filter is empty then it will catch everything so there is
5041 // no point in keeping any following clauses or marking the landingpad
5042 // as having a cleanup. The case of the original filter being empty was
5043 // already handled above.
5044 if (MakeNewFilter && !NewFilterElts.size()) {
5045 assert(MakeNewInstruction && "New filter but not a new instruction!");
5046 CleanupFlag = false;
5047 break;
5048 }
5049 }
5050 }
5051
5052 // If several filters occur in a row then reorder them so that the shortest
5053 // filters come first (those with the smallest number of elements). This is
5054 // advantageous because shorter filters are more likely to match, speeding up
5055 // unwinding, but mostly because it increases the effectiveness of the other
5056 // filter optimizations below.
5057 for (unsigned i = 0, e = NewClauses.size(); i + 1 < e; ) {
5058 unsigned j;
5059 // Find the maximal 'j' s.t. the range [i, j) consists entirely of filters.
5060 for (j = i; j != e; ++j)
5061 if (!isa<ArrayType>(NewClauses[j]->getType()))
5062 break;
5063
5064 // Check whether the filters are already sorted by length. We need to know
5065 // if sorting them is actually going to do anything so that we only make a
5066 // new landingpad instruction if it does.
5067 for (unsigned k = i; k + 1 < j; ++k)
5068 if (shorter_filter(NewClauses[k+1], NewClauses[k])) {
5069 // Not sorted, so sort the filters now. Doing an unstable sort would be
5070 // correct too but reordering filters pointlessly might confuse users.
5071 std::stable_sort(NewClauses.begin() + i, NewClauses.begin() + j,
5073 MakeNewInstruction = true;
5074 break;
5075 }
5076
5077 // Look for the next batch of filters.
5078 i = j + 1;
5079 }
5080
5081 // If typeinfos matched if and only if equal, then the elements of a filter L
5082 // that occurs later than a filter F could be replaced by the intersection of
5083 // the elements of F and L. In reality two typeinfos can match without being
5084 // equal (for example if one represents a C++ class, and the other some class
5085 // derived from it) so it would be wrong to perform this transform in general.
5086 // However the transform is correct and useful if F is a subset of L. In that
5087 // case L can be replaced by F, and thus removed altogether since repeating a
5088 // filter is pointless. So here we look at all pairs of filters F and L where
5089 // L follows F in the list of clauses, and remove L if every element of F is
5090 // an element of L. This can occur when inlining C++ functions with exception
5091 // specifications.
5092 for (unsigned i = 0; i + 1 < NewClauses.size(); ++i) {
5093 // Examine each filter in turn.
5094 Value *Filter = NewClauses[i];
5095 ArrayType *FTy = dyn_cast<ArrayType>(Filter->getType());
5096 if (!FTy)
5097 // Not a filter - skip it.
5098 continue;
5099 unsigned FElts = FTy->getNumElements();
5100 // Examine each filter following this one. Doing this backwards means that
5101 // we don't have to worry about filters disappearing under us when removed.
5102 for (unsigned j = NewClauses.size() - 1; j != i; --j) {
5103 Value *LFilter = NewClauses[j];
5104 ArrayType *LTy = dyn_cast<ArrayType>(LFilter->getType());
5105 if (!LTy)
5106 // Not a filter - skip it.
5107 continue;
5108 // If Filter is a subset of LFilter, i.e. every element of Filter is also
5109 // an element of LFilter, then discard LFilter.
5110 SmallVectorImpl<Constant *>::iterator J = NewClauses.begin() + j;
5111 // If Filter is empty then it is a subset of LFilter.
5112 if (!FElts) {
5113 // Discard LFilter.
5114 NewClauses.erase(J);
5115 MakeNewInstruction = true;
5116 // Move on to the next filter.
5117 continue;
5118 }
5119 unsigned LElts = LTy->getNumElements();
5120 // If Filter is longer than LFilter then it cannot be a subset of it.
5121 if (FElts > LElts)
5122 // Move on to the next filter.
5123 continue;
5124 // At this point we know that LFilter has at least one element.
5125 if (isa<ConstantAggregateZero>(LFilter)) { // LFilter only contains zeros.
5126 // Filter is a subset of LFilter iff Filter contains only zeros (as we
5127 // already know that Filter is not longer than LFilter).
5129 assert(FElts <= LElts && "Should have handled this case earlier!");
5130 // Discard LFilter.
5131 NewClauses.erase(J);
5132 MakeNewInstruction = true;
5133 }
5134 // Move on to the next filter.
5135 continue;
5136 }
5137 ConstantArray *LArray = cast<ConstantArray>(LFilter);
5138 if (isa<ConstantAggregateZero>(Filter)) { // Filter only contains zeros.
5139 // Since Filter is non-empty and contains only zeros, it is a subset of
5140 // LFilter iff LFilter contains a zero.
5141 assert(FElts > 0 && "Should have eliminated the empty filter earlier!");
5142 for (unsigned l = 0; l != LElts; ++l)
5143 if (LArray->getOperand(l)->isNullValue()) {
5144 // LFilter contains a zero - discard it.
5145 NewClauses.erase(J);
5146 MakeNewInstruction = true;
5147 break;
5148 }
5149 // Move on to the next filter.
5150 continue;
5151 }
5152 // At this point we know that both filters are ConstantArrays. Loop over
5153 // operands to see whether every element of Filter is also an element of
5154 // LFilter. Since filters tend to be short this is probably faster than
5155 // using a method that scales nicely.
5157 bool AllFound = true;
5158 for (unsigned f = 0; f != FElts; ++f) {
5159 Value *FTypeInfo = FArray->getOperand(f)->stripPointerCasts();
5160 AllFound = false;
5161 for (unsigned l = 0; l != LElts; ++l) {
5162 Value *LTypeInfo = LArray->getOperand(l)->stripPointerCasts();
5163 if (LTypeInfo == FTypeInfo) {
5164 AllFound = true;
5165 break;
5166 }
5167 }
5168 if (!AllFound)
5169 break;
5170 }
5171 if (AllFound) {
5172 // Discard LFilter.
5173 NewClauses.erase(J);
5174 MakeNewInstruction = true;
5175 }
5176 // Move on to the next filter.
5177 }
5178 }
5179
5180 // If we changed any of the clauses, replace the old landingpad instruction
5181 // with a new one.
5182 if (MakeNewInstruction) {
5184 NewClauses.size());
5185 for (Constant *C : NewClauses)
5186 NLI->addClause(C);
5187 // A landing pad with no clauses must have the cleanup flag set. It is
5188 // theoretically possible, though highly unlikely, that we eliminated all
5189 // clauses. If so, force the cleanup flag to true.
5190 if (NewClauses.empty())
5191 CleanupFlag = true;
5192 NLI->setCleanup(CleanupFlag);
5193 return NLI;
5194 }
5195
5196 // Even if none of the clauses changed, we may nonetheless have understood
5197 // that the cleanup flag is pointless. Clear it if so.
5198 if (LI.isCleanup() != CleanupFlag) {
5199 assert(!CleanupFlag && "Adding a cleanup, not removing one?!");
5200 LI.setCleanup(CleanupFlag);
5201 return &LI;
5202 }
5203
5204 return nullptr;
5205}
5206
5207Value *
5209 // Try to push freeze through instructions that propagate but don't produce
5210 // poison as far as possible. If an operand of freeze follows three
5211 // conditions 1) one-use, 2) does not produce poison, and 3) has all but one
5212 // guaranteed-non-poison operands then push the freeze through to the one
5213 // operand that is not guaranteed non-poison. The actual transform is as
5214 // follows.
5215 // Op1 = ... ; Op1 can be posion
5216 // Op0 = Inst(Op1, NonPoisonOps...) ; Op0 has only one use and only have
5217 // ; single guaranteed-non-poison operands
5218 // ... = Freeze(Op0)
5219 // =>
5220 // Op1 = ...
5221 // Op1.fr = Freeze(Op1)
5222 // ... = Inst(Op1.fr, NonPoisonOps...)
5223 auto *OrigOp = OrigFI.getOperand(0);
5224 auto *OrigOpInst = dyn_cast<Instruction>(OrigOp);
5225
5226 // While we could change the other users of OrigOp to use freeze(OrigOp), that
5227 // potentially reduces their optimization potential, so let's only do this iff
5228 // the OrigOp is only used by the freeze.
5229 if (!OrigOpInst || !OrigOpInst->hasOneUse() || isa<PHINode>(OrigOp))
5230 return nullptr;
5231
5232 // We can't push the freeze through an instruction which can itself create
5233 // poison. If the only source of new poison is flags, we can simply
5234 // strip them (since we know the only use is the freeze and nothing can
5235 // benefit from them.)
5237 /*ConsiderFlagsAndMetadata*/ false))
5238 return nullptr;
5239
5240 // If operand is guaranteed not to be poison, there is no need to add freeze
5241 // to the operand. So we first find the operand that is not guaranteed to be
5242 // poison.
5243 Value *MaybePoisonOperand = nullptr;
5244 for (Value *V : OrigOpInst->operands()) {
5246 // Treat identical operands as a single operand.
5247 (MaybePoisonOperand && MaybePoisonOperand == V))
5248 continue;
5249 if (!MaybePoisonOperand)
5250 MaybePoisonOperand = V;
5251 else
5252 return nullptr;
5253 }
5254
5255 OrigOpInst->dropPoisonGeneratingAnnotations();
5256
5257 // If all operands are guaranteed to be non-poison, we can drop freeze.
5258 if (!MaybePoisonOperand)
5259 return OrigOp;
5260
5261 Builder.SetInsertPoint(OrigOpInst);
5262 Value *FrozenMaybePoisonOperand = Builder.CreateFreeze(
5263 MaybePoisonOperand, MaybePoisonOperand->getName() + ".fr");
5264
5265 OrigOpInst->replaceUsesOfWith(MaybePoisonOperand, FrozenMaybePoisonOperand);
5266 return OrigOp;
5267}
5268
5270 PHINode *PN) {
5271 // Detect whether this is a recurrence with a start value and some number of
5272 // backedge values. We'll check whether we can push the freeze through the
5273 // backedge values (possibly dropping poison flags along the way) until we
5274 // reach the phi again. In that case, we can move the freeze to the start
5275 // value.
5276 Use *StartU = nullptr;
5278 for (Use &U : PN->incoming_values()) {
5279 if (DT.dominates(PN->getParent(), PN->getIncomingBlock(U))) {
5280 // Add backedge value to worklist.
5281 Worklist.push_back(U.get());
5282 continue;
5283 }
5284
5285 // Don't bother handling multiple start values.
5286 if (StartU)
5287 return nullptr;
5288 StartU = &U;
5289 }
5290
5291 if (!StartU || Worklist.empty())
5292 return nullptr; // Not a recurrence.
5293
5294 Value *StartV = StartU->get();
5295 BasicBlock *StartBB = PN->getIncomingBlock(*StartU);
5296 bool StartNeedsFreeze = !isGuaranteedNotToBeUndefOrPoison(StartV);
5297 // We can't insert freeze if the start value is the result of the
5298 // terminator (e.g. an invoke).
5299 if (StartNeedsFreeze && StartBB->getTerminator() == StartV)
5300 return nullptr;
5301
5304 while (!Worklist.empty()) {
5305 Value *V = Worklist.pop_back_val();
5306 if (!Visited.insert(V).second)
5307 continue;
5308
5309 if (Visited.size() > 32)
5310 return nullptr; // Limit the total number of values we inspect.
5311
5312 // Assume that PN is non-poison, because it will be after the transform.
5313 if (V == PN || isGuaranteedNotToBeUndefOrPoison(V))
5314 continue;
5315
5318 /*ConsiderFlagsAndMetadata*/ false))
5319 return nullptr;
5320
5321 DropFlags.push_back(I);
5322 append_range(Worklist, I->operands());
5323 }
5324
5325 for (Instruction *I : DropFlags)
5326 I->dropPoisonGeneratingAnnotations();
5327
5328 if (StartNeedsFreeze) {
5329 Builder.SetInsertPoint(StartBB->getTerminator());
5330 Value *FrozenStartV = Builder.CreateFreeze(StartV,
5331 StartV->getName() + ".fr");
5332 replaceUse(*StartU, FrozenStartV);
5333 }
5334 return replaceInstUsesWith(FI, PN);
5335}
5336
5338 Value *Op = FI.getOperand(0);
5339
5340 if (isa<Constant>(Op) || Op->hasOneUse())
5341 return false;
5342
5343 // Move the freeze directly after the definition of its operand, so that
5344 // it dominates the maximum number of uses. Note that it may not dominate
5345 // *all* uses if the operand is an invoke/callbr and the use is in a phi on
5346 // the normal/default destination. This is why the domination check in the
5347 // replacement below is still necessary.
5348 BasicBlock::iterator MoveBefore;
5349 if (isa<Argument>(Op)) {
5350 MoveBefore =
5352 } else {
5353 auto MoveBeforeOpt = cast<Instruction>(Op)->getInsertionPointAfterDef();
5354 if (!MoveBeforeOpt)
5355 return false;
5356 MoveBefore = *MoveBeforeOpt;
5357 }
5358
5359 // Re-point iterator to come after any debug-info records.
5360 MoveBefore.setHeadBit(false);
5361
5362 bool Changed = false;
5363 if (&FI != &*MoveBefore) {
5364 FI.moveBefore(*MoveBefore->getParent(), MoveBefore);
5365 Changed = true;
5366 }
5367
5368 Changed |= Op->replaceUsesWithIf(
5369 &FI, [&](Use &U) -> bool { return DT.dominates(&FI, U); });
5370
5371 return Changed;
5372}
5373
5374// Check if any direct or bitcast user of this value is a shuffle instruction.
5376 for (auto *U : V->users()) {
5378 return true;
5379 else if (match(U, m_BitCast(m_Specific(V))) && isUsedWithinShuffleVector(U))
5380 return true;
5381 }
5382 return false;
5383}
5384
5386 Value *Op0 = I.getOperand(0);
5387
5388 if (Value *V = simplifyFreezeInst(Op0, SQ.getWithInstruction(&I)))
5389 return replaceInstUsesWith(I, V);
5390
5391 // freeze (phi const, x) --> phi const, (freeze x)
5392 if (auto *PN = dyn_cast<PHINode>(Op0)) {
5393 if (Instruction *NV = foldOpIntoPhi(I, PN))
5394 return NV;
5395 if (Instruction *NV = foldFreezeIntoRecurrence(I, PN))
5396 return NV;
5397 }
5398
5400 return replaceInstUsesWith(I, NI);
5401
5402 // If I is freeze(undef), check its uses and fold it to a fixed constant.
5403 // - or: pick -1
5404 // - select's condition: if the true value is constant, choose it by making
5405 // the condition true.
5406 // - phi: pick the common constant across operands
5407 // - default: pick 0
5408 //
5409 // Note that this transform is intentionally done here rather than
5410 // via an analysis in InstSimplify or at individual user sites. That is
5411 // because we must produce the same value for all uses of the freeze -
5412 // it's the reason "freeze" exists!
5413 //
5414 // TODO: This could use getBinopAbsorber() / getBinopIdentity() to avoid
5415 // duplicating logic for binops at least.
5416 auto getUndefReplacement = [&](Type *Ty) {
5417 auto pickCommonConstantFromPHI = [](PHINode &PN) -> Value * {
5418 // phi(freeze(undef), C, C). Choose C for freeze so the PHI can be
5419 // removed.
5420 Constant *BestValue = nullptr;
5421 for (Value *V : PN.incoming_values()) {
5422 if (match(V, m_Freeze(m_Undef())))
5423 continue;
5424
5426 if (!C)
5427 return nullptr;
5428
5430 return nullptr;
5431
5432 if (BestValue && BestValue != C)
5433 return nullptr;
5434
5435 BestValue = C;
5436 }
5437 return BestValue;
5438 };
5439
5440 Value *NullValue = Constant::getNullValue(Ty);
5441 Value *BestValue = nullptr;
5442 for (auto *U : I.users()) {
5443 Value *V = NullValue;
5444 if (match(U, m_Or(m_Value(), m_Value())))
5446 else if (match(U, m_Select(m_Specific(&I), m_Constant(), m_Value())))
5447 V = ConstantInt::getTrue(Ty);
5448 else if (match(U, m_c_Select(m_Specific(&I), m_Value(V)))) {
5449 if (V == &I || !isGuaranteedNotToBeUndefOrPoison(V, &AC, &I, &DT))
5450 V = NullValue;
5451 } else if (auto *PHI = dyn_cast<PHINode>(U)) {
5452 if (Value *MaybeV = pickCommonConstantFromPHI(*PHI))
5453 V = MaybeV;
5454 }
5455
5456 if (!BestValue)
5457 BestValue = V;
5458 else if (BestValue != V)
5459 BestValue = NullValue;
5460 }
5461 assert(BestValue && "Must have at least one use");
5462 assert(BestValue != &I && "Cannot replace with itself");
5463 return BestValue;
5464 };
5465
5466 if (match(Op0, m_Undef())) {
5467 // Don't fold freeze(undef/poison) if it's used as a vector operand in
5468 // a shuffle. This may improve codegen for shuffles that allow
5469 // unspecified inputs.
5471 return nullptr;
5472 return replaceInstUsesWith(I, getUndefReplacement(I.getType()));
5473 }
5474
5475 auto getFreezeVectorReplacement = [](Constant *C) -> Constant * {
5476 Type *Ty = C->getType();
5477 auto *VTy = dyn_cast<FixedVectorType>(Ty);
5478 if (!VTy)
5479 return nullptr;
5480 unsigned NumElts = VTy->getNumElements();
5481 Constant *BestValue = Constant::getNullValue(VTy->getScalarType());
5482 for (unsigned i = 0; i != NumElts; ++i) {
5483 Constant *EltC = C->getAggregateElement(i);
5484 if (EltC && !match(EltC, m_Undef())) {
5485 BestValue = EltC;
5486 break;
5487 }
5488 }
5489 return Constant::replaceUndefsWith(C, BestValue);
5490 };
5491
5492 Constant *C;
5493 if (match(Op0, m_Constant(C)) && C->containsUndefOrPoisonElement() &&
5494 !C->containsConstantExpression()) {
5495 if (Constant *Repl = getFreezeVectorReplacement(C))
5496 return replaceInstUsesWith(I, Repl);
5497 }
5498
5499 // Replace uses of Op with freeze(Op).
5500 if (freezeOtherUses(I))
5501 return &I;
5502
5503 return nullptr;
5504}
5505
5506/// Check for case where the call writes to an otherwise dead alloca. This
5507/// shows up for unused out-params in idiomatic C/C++ code. Note that this
5508/// helper *only* analyzes the write; doesn't check any other legality aspect.
5510 auto *CB = dyn_cast<CallBase>(I);
5511 if (!CB)
5512 // TODO: handle e.g. store to alloca here - only worth doing if we extend
5513 // to allow reload along used path as described below. Otherwise, this
5514 // is simply a store to a dead allocation which will be removed.
5515 return false;
5516 std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(CB, TLI);
5517 if (!Dest)
5518 return false;
5519 auto *AI = dyn_cast<AllocaInst>(getUnderlyingObject(Dest->Ptr));
5520 if (!AI)
5521 // TODO: allow malloc?
5522 return false;
5523 // TODO: allow memory access dominated by move point? Note that since AI
5524 // could have a reference to itself captured by the call, we would need to
5525 // account for cycles in doing so.
5526 SmallVector<const User *> AllocaUsers;
5528 auto pushUsers = [&](const Instruction &I) {
5529 for (const User *U : I.users()) {
5530 if (Visited.insert(U).second)
5531 AllocaUsers.push_back(U);
5532 }
5533 };
5534 pushUsers(*AI);
5535 while (!AllocaUsers.empty()) {
5536 auto *UserI = cast<Instruction>(AllocaUsers.pop_back_val());
5537 if (isa<GetElementPtrInst>(UserI) || isa<AddrSpaceCastInst>(UserI)) {
5538 pushUsers(*UserI);
5539 continue;
5540 }
5541 if (UserI == CB)
5542 continue;
5543 // TODO: support lifetime.start/end here
5544 return false;
5545 }
5546 return true;
5547}
5548
5549/// Try to move the specified instruction from its current block into the
5550/// beginning of DestBlock, which can only happen if it's safe to move the
5551/// instruction past all of the instructions between it and the end of its
5552/// block.
5554 BasicBlock *DestBlock) {
5555 BasicBlock *SrcBlock = I->getParent();
5556
5557 // Cannot move control-flow-involving, volatile loads, vaarg, etc.
5558 if (isa<PHINode>(I) || I->isEHPad() || I->mayThrow() || !I->willReturn() ||
5559 I->isTerminator())
5560 return false;
5561
5562 // Do not sink static or dynamic alloca instructions. Static allocas must
5563 // remain in the entry block, and dynamic allocas must not be sunk in between
5564 // a stacksave / stackrestore pair, which would incorrectly shorten its
5565 // lifetime.
5566 if (isa<AllocaInst>(I))
5567 return false;
5568
5569 // Do not sink into catchswitch blocks.
5570 if (isa<CatchSwitchInst>(DestBlock->getTerminator()))
5571 return false;
5572
5573 // Do not sink convergent call instructions.
5574 if (auto *CI = dyn_cast<CallInst>(I)) {
5575 if (CI->isConvergent())
5576 return false;
5577 }
5578
5579 // Unless we can prove that the memory write isn't visibile except on the
5580 // path we're sinking to, we must bail.
5581 if (I->mayWriteToMemory()) {
5582 if (!SoleWriteToDeadLocal(I, TLI))
5583 return false;
5584 }
5585
5586 // We can only sink load instructions if there is nothing between the load and
5587 // the end of block that could change the value.
5588 if (I->mayReadFromMemory() &&
5589 !I->hasMetadata(LLVMContext::MD_invariant_load)) {
5590 // We don't want to do any sophisticated alias analysis, so we only check
5591 // the instructions after I in I's parent block if we try to sink to its
5592 // successor block.
5593 if (DestBlock->getUniquePredecessor() != I->getParent())
5594 return false;
5595 for (BasicBlock::iterator Scan = std::next(I->getIterator()),
5596 E = I->getParent()->end();
5597 Scan != E; ++Scan)
5598 if (Scan->mayWriteToMemory())
5599 return false;
5600 }
5601
5602 I->dropDroppableUses([&](const Use *U) {
5603 auto *I = dyn_cast<Instruction>(U->getUser());
5604 if (I && I->getParent() != DestBlock) {
5605 Worklist.add(I);
5606 return true;
5607 }
5608 return false;
5609 });
5610 /// FIXME: We could remove droppable uses that are not dominated by
5611 /// the new position.
5612
5613 BasicBlock::iterator InsertPos = DestBlock->getFirstInsertionPt();
5614 I->moveBefore(*DestBlock, InsertPos);
5615 ++NumSunkInst;
5616
5617 // Also sink all related debug uses from the source basic block. Otherwise we
5618 // get debug use before the def. Attempt to salvage debug uses first, to
5619 // maximise the range variables have location for. If we cannot salvage, then
5620 // mark the location undef: we know it was supposed to receive a new location
5621 // here, but that computation has been sunk.
5622 SmallVector<DbgVariableRecord *, 2> DbgVariableRecords;
5623 findDbgUsers(I, DbgVariableRecords);
5624 if (!DbgVariableRecords.empty())
5625 tryToSinkInstructionDbgVariableRecords(I, InsertPos, SrcBlock, DestBlock,
5626 DbgVariableRecords);
5627
5628 // PS: there are numerous flaws with this behaviour, not least that right now
5629 // assignments can be re-ordered past other assignments to the same variable
5630 // if they use different Values. Creating more undef assignements can never be
5631 // undone. And salvaging all users outside of this block can un-necessarily
5632 // alter the lifetime of the live-value that the variable refers to.
5633 // Some of these things can be resolved by tolerating debug use-before-defs in
5634 // LLVM-IR, however it depends on the instruction-referencing CodeGen backend
5635 // being used for more architectures.
5636
5637 return true;
5638}
5639
5641 Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock,
5642 BasicBlock *DestBlock,
5643 SmallVectorImpl<DbgVariableRecord *> &DbgVariableRecords) {
5644 // For all debug values in the destination block, the sunk instruction
5645 // will still be available, so they do not need to be dropped.
5646
5647 // Fetch all DbgVariableRecords not already in the destination.
5648 SmallVector<DbgVariableRecord *, 2> DbgVariableRecordsToSalvage;
5649 for (auto &DVR : DbgVariableRecords)
5650 if (DVR->getParent() != DestBlock)
5651 DbgVariableRecordsToSalvage.push_back(DVR);
5652
5653 // Fetch a second collection, of DbgVariableRecords in the source block that
5654 // we're going to sink.
5655 SmallVector<DbgVariableRecord *> DbgVariableRecordsToSink;
5656 for (DbgVariableRecord *DVR : DbgVariableRecordsToSalvage)
5657 if (DVR->getParent() == SrcBlock)
5658 DbgVariableRecordsToSink.push_back(DVR);
5659
5660 // Sort DbgVariableRecords according to their position in the block. This is a
5661 // partial order: DbgVariableRecords attached to different instructions will
5662 // be ordered by the instruction order, but DbgVariableRecords attached to the
5663 // same instruction won't have an order.
5664 auto Order = [](DbgVariableRecord *A, DbgVariableRecord *B) -> bool {
5665 return B->getInstruction()->comesBefore(A->getInstruction());
5666 };
5667 llvm::stable_sort(DbgVariableRecordsToSink, Order);
5668
5669 // If there are two assignments to the same variable attached to the same
5670 // instruction, the ordering between the two assignments is important. Scan
5671 // for this (rare) case and establish which is the last assignment.
5672 using InstVarPair = std::pair<const Instruction *, DebugVariable>;
5674 if (DbgVariableRecordsToSink.size() > 1) {
5676 // Count how many assignments to each variable there is per instruction.
5677 for (DbgVariableRecord *DVR : DbgVariableRecordsToSink) {
5678 DebugVariable DbgUserVariable =
5679 DebugVariable(DVR->getVariable(), DVR->getExpression(),
5680 DVR->getDebugLoc()->getInlinedAt());
5681 CountMap[std::make_pair(DVR->getInstruction(), DbgUserVariable)] += 1;
5682 }
5683
5684 // If there are any instructions with two assignments, add them to the
5685 // FilterOutMap to record that they need extra filtering.
5687 for (auto It : CountMap) {
5688 if (It.second > 1) {
5689 FilterOutMap[It.first] = nullptr;
5690 DupSet.insert(It.first.first);
5691 }
5692 }
5693
5694 // For all instruction/variable pairs needing extra filtering, find the
5695 // latest assignment.
5696 for (const Instruction *Inst : DupSet) {
5697 for (DbgVariableRecord &DVR :
5698 llvm::reverse(filterDbgVars(Inst->getDbgRecordRange()))) {
5699 DebugVariable DbgUserVariable =
5700 DebugVariable(DVR.getVariable(), DVR.getExpression(),
5701 DVR.getDebugLoc()->getInlinedAt());
5702 auto FilterIt =
5703 FilterOutMap.find(std::make_pair(Inst, DbgUserVariable));
5704 if (FilterIt == FilterOutMap.end())
5705 continue;
5706 if (FilterIt->second != nullptr)
5707 continue;
5708 FilterIt->second = &DVR;
5709 }
5710 }
5711 }
5712
5713 // Perform cloning of the DbgVariableRecords that we plan on sinking, filter
5714 // out any duplicate assignments identified above.
5716 SmallSet<DebugVariable, 4> SunkVariables;
5717 for (DbgVariableRecord *DVR : DbgVariableRecordsToSink) {
5719 continue;
5720
5721 DebugVariable DbgUserVariable =
5722 DebugVariable(DVR->getVariable(), DVR->getExpression(),
5723 DVR->getDebugLoc()->getInlinedAt());
5724
5725 // For any variable where there were multiple assignments in the same place,
5726 // ignore all but the last assignment.
5727 if (!FilterOutMap.empty()) {
5728 InstVarPair IVP = std::make_pair(DVR->getInstruction(), DbgUserVariable);
5729 auto It = FilterOutMap.find(IVP);
5730
5731 // Filter out.
5732 if (It != FilterOutMap.end() && It->second != DVR)
5733 continue;
5734 }
5735
5736 if (!SunkVariables.insert(DbgUserVariable).second)
5737 continue;
5738
5739 if (DVR->isDbgAssign())
5740 continue;
5741
5742 DVRClones.emplace_back(DVR->clone());
5743 LLVM_DEBUG(dbgs() << "CLONE: " << *DVRClones.back() << '\n');
5744 }
5745
5746 // Perform salvaging without the clones, then sink the clones.
5747 if (DVRClones.empty())
5748 return;
5749
5750 salvageDebugInfoForDbgValues(*I, DbgVariableRecordsToSalvage);
5751
5752 // The clones are in reverse order of original appearance. Assert that the
5753 // head bit is set on the iterator as we _should_ have received it via
5754 // getFirstInsertionPt. Inserting like this will reverse the clone order as
5755 // we'll repeatedly insert at the head, such as:
5756 // DVR-3 (third insertion goes here)
5757 // DVR-2 (second insertion goes here)
5758 // DVR-1 (first insertion goes here)
5759 // Any-Prior-DVRs
5760 // InsertPtInst
5761 assert(InsertPos.getHeadBit());
5762 for (DbgVariableRecord *DVRClone : DVRClones) {
5763 InsertPos->getParent()->insertDbgRecordBefore(DVRClone, InsertPos);
5764 LLVM_DEBUG(dbgs() << "SINK: " << *DVRClone << '\n');
5765 }
5766}
5767
5769 while (!Worklist.isEmpty()) {
5770 // Walk deferred instructions in reverse order, and push them to the
5771 // worklist, which means they'll end up popped from the worklist in-order.
5772 while (Instruction *I = Worklist.popDeferred()) {
5773 // Check to see if we can DCE the instruction. We do this already here to
5774 // reduce the number of uses and thus allow other folds to trigger.
5775 // Note that eraseInstFromFunction() may push additional instructions on
5776 // the deferred worklist, so this will DCE whole instruction chains.
5779 ++NumDeadInst;
5780 continue;
5781 }
5782
5783 Worklist.push(I);
5784 }
5785
5786 Instruction *I = Worklist.removeOne();
5787 if (I == nullptr) continue; // skip null values.
5788
5789 // Check to see if we can DCE the instruction.
5792 ++NumDeadInst;
5793 continue;
5794 }
5795
5796 if (!DebugCounter::shouldExecute(VisitCounter))
5797 continue;
5798
5799 // See if we can trivially sink this instruction to its user if we can
5800 // prove that the successor is not executed more frequently than our block.
5801 // Return the UserBlock if successful.
5802 auto getOptionalSinkBlockForInst =
5803 [this](Instruction *I) -> std::optional<BasicBlock *> {
5804 if (!EnableCodeSinking)
5805 return std::nullopt;
5806
5807 BasicBlock *BB = I->getParent();
5808 BasicBlock *UserParent = nullptr;
5809 unsigned NumUsers = 0;
5810
5811 for (Use &U : I->uses()) {
5812 User *User = U.getUser();
5813 if (User->isDroppable()) {
5814 // Do not sink if there are dereferenceable assumes that would be
5815 // removed.
5817 if (II->getIntrinsicID() != Intrinsic::assume ||
5818 !II->getOperandBundle("dereferenceable"))
5819 continue;
5820 }
5821
5822 if (NumUsers > MaxSinkNumUsers)
5823 return std::nullopt;
5824
5825 Instruction *UserInst = cast<Instruction>(User);
5826 // Special handling for Phi nodes - get the block the use occurs in.
5827 BasicBlock *UserBB = UserInst->getParent();
5828 if (PHINode *PN = dyn_cast<PHINode>(UserInst))
5829 UserBB = PN->getIncomingBlock(U);
5830 // Bail out if we have uses in different blocks. We don't do any
5831 // sophisticated analysis (i.e finding NearestCommonDominator of these
5832 // use blocks).
5833 if (UserParent && UserParent != UserBB)
5834 return std::nullopt;
5835 UserParent = UserBB;
5836
5837 // Make sure these checks are done only once, naturally we do the checks
5838 // the first time we get the userparent, this will save compile time.
5839 if (NumUsers == 0) {
5840 // Try sinking to another block. If that block is unreachable, then do
5841 // not bother. SimplifyCFG should handle it.
5842 if (UserParent == BB || !DT.isReachableFromEntry(UserParent))
5843 return std::nullopt;
5844
5845 auto *Term = UserParent->getTerminator();
5846 // See if the user is one of our successors that has only one
5847 // predecessor, so that we don't have to split the critical edge.
5848 // Another option where we can sink is a block that ends with a
5849 // terminator that does not pass control to other block (such as
5850 // return or unreachable or resume). In this case:
5851 // - I dominates the User (by SSA form);
5852 // - the User will be executed at most once.
5853 // So sinking I down to User is always profitable or neutral.
5854 if (UserParent->getUniquePredecessor() != BB && !succ_empty(Term))
5855 return std::nullopt;
5856
5857 assert(DT.dominates(BB, UserParent) && "Dominance relation broken?");
5858 }
5859
5860 NumUsers++;
5861 }
5862
5863 // No user or only has droppable users.
5864 if (!UserParent)
5865 return std::nullopt;
5866
5867 return UserParent;
5868 };
5869
5870 auto OptBB = getOptionalSinkBlockForInst(I);
5871 if (OptBB) {
5872 auto *UserParent = *OptBB;
5873 // Okay, the CFG is simple enough, try to sink this instruction.
5874 if (tryToSinkInstruction(I, UserParent)) {
5875 LLVM_DEBUG(dbgs() << "IC: Sink: " << *I << '\n');
5876 MadeIRChange = true;
5877 // We'll add uses of the sunk instruction below, but since
5878 // sinking can expose opportunities for it's *operands* add
5879 // them to the worklist
5880 for (Use &U : I->operands())
5881 if (Instruction *OpI = dyn_cast<Instruction>(U.get()))
5882 Worklist.push(OpI);
5883 }
5884 }
5885
5886 // Now that we have an instruction, try combining it to simplify it.
5887 Builder.SetInsertPoint(I);
5888 Builder.CollectMetadataToCopy(
5889 I, {LLVMContext::MD_dbg, LLVMContext::MD_annotation});
5890
5891#ifndef NDEBUG
5892 std::string OrigI;
5893#endif
5894 LLVM_DEBUG(raw_string_ostream SS(OrigI); I->print(SS););
5895 LLVM_DEBUG(dbgs() << "IC: Visiting: " << OrigI << '\n');
5896
5897 if (Instruction *Result = visit(*I)) {
5898 ++NumCombined;
5899 // Should we replace the old instruction with a new one?
5900 if (Result != I) {
5901 LLVM_DEBUG(dbgs() << "IC: Old = " << *I << '\n'
5902 << " New = " << *Result << '\n');
5903
5904 // We copy the old instruction's DebugLoc to the new instruction, unless
5905 // InstCombine already assigned a DebugLoc to it, in which case we
5906 // should trust the more specifically selected DebugLoc.
5907 Result->setDebugLoc(Result->getDebugLoc().orElse(I->getDebugLoc()));
5908 // We also copy annotation metadata to the new instruction.
5909 Result->copyMetadata(*I, LLVMContext::MD_annotation);
5910 // Everything uses the new instruction now.
5911 I->replaceAllUsesWith(Result);
5912
5913 // Move the name to the new instruction first.
5914 Result->takeName(I);
5915
5916 // Insert the new instruction into the basic block...
5917 BasicBlock *InstParent = I->getParent();
5918 BasicBlock::iterator InsertPos = I->getIterator();
5919
5920 // Are we replace a PHI with something that isn't a PHI, or vice versa?
5921 if (isa<PHINode>(Result) != isa<PHINode>(I)) {
5922 // We need to fix up the insertion point.
5923 if (isa<PHINode>(I)) // PHI -> Non-PHI
5924 InsertPos = InstParent->getFirstInsertionPt();
5925 else // Non-PHI -> PHI
5926 InsertPos = InstParent->getFirstNonPHIIt();
5927 }
5928
5929 Result->insertInto(InstParent, InsertPos);
5930
5931 // Push the new instruction and any users onto the worklist.
5932 Worklist.pushUsersToWorkList(*Result);
5933 Worklist.push(Result);
5934
5936 } else {
5937 LLVM_DEBUG(dbgs() << "IC: Mod = " << OrigI << '\n'
5938 << " New = " << *I << '\n');
5939
5940 // If the instruction was modified, it's possible that it is now dead.
5941 // if so, remove it.
5944 } else {
5945 Worklist.pushUsersToWorkList(*I);
5946 Worklist.push(I);
5947 }
5948 }
5949 MadeIRChange = true;
5950 }
5951 }
5952
5953 Worklist.zap();
5954 return MadeIRChange;
5955}
5956
5957// Track the scopes used by !alias.scope and !noalias. In a function, a
5958// @llvm.experimental.noalias.scope.decl is only useful if that scope is used
5959// by both sets. If not, the declaration of the scope can be safely omitted.
5960// The MDNode of the scope can be omitted as well for the instructions that are
5961// part of this function. We do not do that at this point, as this might become
5962// too time consuming to do.
5964 SmallPtrSet<const MDNode *, 8> UsedAliasScopesAndLists;
5965 SmallPtrSet<const MDNode *, 8> UsedNoAliasScopesAndLists;
5966
5967public:
5969 // This seems to be faster than checking 'mayReadOrWriteMemory()'.
5970 if (!I->hasMetadataOtherThanDebugLoc())
5971 return;
5972
5973 auto Track = [](Metadata *ScopeList, auto &Container) {
5974 const auto *MDScopeList = dyn_cast_or_null<MDNode>(ScopeList);
5975 if (!MDScopeList || !Container.insert(MDScopeList).second)
5976 return;
5977 for (const auto &MDOperand : MDScopeList->operands())
5978 if (auto *MDScope = dyn_cast<MDNode>(MDOperand))
5979 Container.insert(MDScope);
5980 };
5981
5982 Track(I->getMetadata(LLVMContext::MD_alias_scope), UsedAliasScopesAndLists);
5983 Track(I->getMetadata(LLVMContext::MD_noalias), UsedNoAliasScopesAndLists);
5984 }
5985
5988 if (!Decl)
5989 return false;
5990
5991 assert(Decl->use_empty() &&
5992 "llvm.experimental.noalias.scope.decl in use ?");
5993 const MDNode *MDSL = Decl->getScopeList();
5994 assert(MDSL->getNumOperands() == 1 &&
5995 "llvm.experimental.noalias.scope should refer to a single scope");
5996 auto &MDOperand = MDSL->getOperand(0);
5997 if (auto *MD = dyn_cast<MDNode>(MDOperand))
5998 return !UsedAliasScopesAndLists.contains(MD) ||
5999 !UsedNoAliasScopesAndLists.contains(MD);
6000
6001 // Not an MDNode ? throw away.
6002 return true;
6003 }
6004};
6005
6006/// Populate the IC worklist from a function, by walking it in reverse
6007/// post-order and adding all reachable code to the worklist.
6008///
6009/// This has a couple of tricks to make the code faster and more powerful. In
6010/// particular, we constant fold and DCE instructions as we go, to avoid adding
6011/// them to the worklist (this significantly speeds up instcombine on code where
6012/// many instructions are dead or constant). Additionally, if we find a branch
6013/// whose condition is a known constant, we only visit the reachable successors.
6015 bool MadeIRChange = false;
6017 SmallVector<Instruction *, 128> InstrsForInstructionWorklist;
6018 DenseMap<Constant *, Constant *> FoldedConstants;
6019 AliasScopeTracker SeenAliasScopes;
6020
6021 auto HandleOnlyLiveSuccessor = [&](BasicBlock *BB, BasicBlock *LiveSucc) {
6022 for (BasicBlock *Succ : successors(BB))
6023 if (Succ != LiveSucc && DeadEdges.insert({BB, Succ}).second)
6024 for (PHINode &PN : Succ->phis())
6025 for (Use &U : PN.incoming_values())
6026 if (PN.getIncomingBlock(U) == BB && !isa<PoisonValue>(U)) {
6027 U.set(PoisonValue::get(PN.getType()));
6028 MadeIRChange = true;
6029 }
6030 };
6031
6032 for (BasicBlock *BB : RPOT) {
6033 if (!BB->isEntryBlock() && all_of(predecessors(BB), [&](BasicBlock *Pred) {
6034 return DeadEdges.contains({Pred, BB}) || DT.dominates(BB, Pred);
6035 })) {
6036 HandleOnlyLiveSuccessor(BB, nullptr);
6037 continue;
6038 }
6039 LiveBlocks.insert(BB);
6040
6041 for (Instruction &Inst : llvm::make_early_inc_range(*BB)) {
6042 // ConstantProp instruction if trivially constant.
6043 if (!Inst.use_empty() &&
6044 (Inst.getNumOperands() == 0 || isa<Constant>(Inst.getOperand(0))))
6045 if (Constant *C = ConstantFoldInstruction(&Inst, DL, &TLI)) {
6046 LLVM_DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << Inst
6047 << '\n');
6048 Inst.replaceAllUsesWith(C);
6049 ++NumConstProp;
6050 if (isInstructionTriviallyDead(&Inst, &TLI))
6051 Inst.eraseFromParent();
6052 MadeIRChange = true;
6053 continue;
6054 }
6055
6056 // See if we can constant fold its operands.
6057 for (Use &U : Inst.operands()) {
6059 continue;
6060
6061 auto *C = cast<Constant>(U);
6062 Constant *&FoldRes = FoldedConstants[C];
6063 if (!FoldRes)
6064 FoldRes = ConstantFoldConstant(C, DL, &TLI);
6065
6066 if (FoldRes != C) {
6067 LLVM_DEBUG(dbgs() << "IC: ConstFold operand of: " << Inst
6068 << "\n Old = " << *C
6069 << "\n New = " << *FoldRes << '\n');
6070 U = FoldRes;
6071 MadeIRChange = true;
6072 }
6073 }
6074
6075 // Skip processing debug and pseudo intrinsics in InstCombine. Processing
6076 // these call instructions consumes non-trivial amount of time and
6077 // provides no value for the optimization.
6078 if (!Inst.isDebugOrPseudoInst()) {
6079 InstrsForInstructionWorklist.push_back(&Inst);
6080 SeenAliasScopes.analyse(&Inst);
6081 }
6082 }
6083
6084 // If this is a branch or switch on a constant, mark only the single
6085 // live successor. Otherwise assume all successors are live.
6086 Instruction *TI = BB->getTerminator();
6087 if (BranchInst *BI = dyn_cast<BranchInst>(TI); BI && BI->isConditional()) {
6088 if (isa<UndefValue>(BI->getCondition())) {
6089 // Branch on undef is UB.
6090 HandleOnlyLiveSuccessor(BB, nullptr);
6091 continue;
6092 }
6093 if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
6094 bool CondVal = Cond->getZExtValue();
6095 HandleOnlyLiveSuccessor(BB, BI->getSuccessor(!CondVal));
6096 continue;
6097 }
6098 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
6099 if (isa<UndefValue>(SI->getCondition())) {
6100 // Switch on undef is UB.
6101 HandleOnlyLiveSuccessor(BB, nullptr);
6102 continue;
6103 }
6104 if (auto *Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
6105 HandleOnlyLiveSuccessor(BB,
6106 SI->findCaseValue(Cond)->getCaseSuccessor());
6107 continue;
6108 }
6109 }
6110 }
6111
6112 // Remove instructions inside unreachable blocks. This prevents the
6113 // instcombine code from having to deal with some bad special cases, and
6114 // reduces use counts of instructions.
6115 for (BasicBlock &BB : F) {
6116 if (LiveBlocks.count(&BB))
6117 continue;
6118
6119 unsigned NumDeadInstInBB;
6120 NumDeadInstInBB = removeAllNonTerminatorAndEHPadInstructions(&BB);
6121
6122 MadeIRChange |= NumDeadInstInBB != 0;
6123 NumDeadInst += NumDeadInstInBB;
6124 }
6125
6126 // Once we've found all of the instructions to add to instcombine's worklist,
6127 // add them in reverse order. This way instcombine will visit from the top
6128 // of the function down. This jives well with the way that it adds all uses
6129 // of instructions to the worklist after doing a transformation, thus avoiding
6130 // some N^2 behavior in pathological cases.
6131 Worklist.reserve(InstrsForInstructionWorklist.size());
6132 for (Instruction *Inst : reverse(InstrsForInstructionWorklist)) {
6133 // DCE instruction if trivially dead. As we iterate in reverse program
6134 // order here, we will clean up whole chains of dead instructions.
6135 if (isInstructionTriviallyDead(Inst, &TLI) ||
6136 SeenAliasScopes.isNoAliasScopeDeclDead(Inst)) {
6137 ++NumDeadInst;
6138 LLVM_DEBUG(dbgs() << "IC: DCE: " << *Inst << '\n');
6139 salvageDebugInfo(*Inst);
6140 Inst->eraseFromParent();
6141 MadeIRChange = true;
6142 continue;
6143 }
6144
6145 Worklist.push(Inst);
6146 }
6147
6148 return MadeIRChange;
6149}
6150
6152 // Collect backedges.
6154 for (BasicBlock *BB : RPOT) {
6155 Visited.insert(BB);
6156 for (BasicBlock *Succ : successors(BB))
6157 if (Visited.contains(Succ))
6158 BackEdges.insert({BB, Succ});
6159 }
6160 ComputedBackEdges = true;
6161}
6162
6168 const InstCombineOptions &Opts) {
6169 auto &DL = F.getDataLayout();
6170 bool VerifyFixpoint = Opts.VerifyFixpoint &&
6171 !F.hasFnAttribute("instcombine-no-verify-fixpoint");
6172
6173 /// Builder - This is an IRBuilder that automatically inserts new
6174 /// instructions into the worklist when they are created.
6176 F.getContext(), TargetFolder(DL),
6177 IRBuilderCallbackInserter([&Worklist, &AC](Instruction *I) {
6178 Worklist.add(I);
6179 if (auto *Assume = dyn_cast<AssumeInst>(I))
6180 AC.registerAssumption(Assume);
6181 }));
6182
6184
6185 // Lower dbg.declare intrinsics otherwise their value may be clobbered
6186 // by instcombiner.
6187 bool MadeIRChange = false;
6189 MadeIRChange = LowerDbgDeclare(F);
6190
6191 // Iterate while there is work to do.
6192 unsigned Iteration = 0;
6193 while (true) {
6194 if (Iteration >= Opts.MaxIterations && !VerifyFixpoint) {
6195 LLVM_DEBUG(dbgs() << "\n\n[IC] Iteration limit #" << Opts.MaxIterations
6196 << " on " << F.getName()
6197 << " reached; stopping without verifying fixpoint\n");
6198 break;
6199 }
6200
6201 ++Iteration;
6202 ++NumWorklistIterations;
6203 LLVM_DEBUG(dbgs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
6204 << F.getName() << "\n");
6205
6206 InstCombinerImpl IC(Worklist, Builder, F, AA, AC, TLI, TTI, DT, ORE, BFI,
6207 BPI, PSI, DL, RPOT);
6209 bool MadeChangeInThisIteration = IC.prepareWorklist(F);
6210 MadeChangeInThisIteration |= IC.run();
6211 if (!MadeChangeInThisIteration)
6212 break;
6213
6214 MadeIRChange = true;
6215 if (Iteration > Opts.MaxIterations) {
6217 "Instruction Combining on " + Twine(F.getName()) +
6218 " did not reach a fixpoint after " + Twine(Opts.MaxIterations) +
6219 " iterations. " +
6220 "Use 'instcombine<no-verify-fixpoint>' or function attribute "
6221 "'instcombine-no-verify-fixpoint' to suppress this error.");
6222 }
6223 }
6224
6225 if (Iteration == 1)
6226 ++NumOneIteration;
6227 else if (Iteration == 2)
6228 ++NumTwoIterations;
6229 else if (Iteration == 3)
6230 ++NumThreeIterations;
6231 else
6232 ++NumFourOrMoreIterations;
6233
6234 return MadeIRChange;
6235}
6236
6238
6240 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
6241 static_cast<PassInfoMixin<InstCombinePass> *>(this)->printPipeline(
6242 OS, MapClassName2PassName);
6243 OS << '<';
6244 OS << "max-iterations=" << Options.MaxIterations << ";";
6245 OS << (Options.VerifyFixpoint ? "" : "no-") << "verify-fixpoint";
6246 OS << '>';
6247}
6248
6249char InstCombinePass::ID = 0;
6250
6253 auto &LRT = AM.getResult<LastRunTrackingAnalysis>(F);
6254 // No changes since last InstCombine pass, exit early.
6255 if (LRT.shouldSkip(&ID))
6256 return PreservedAnalyses::all();
6257
6258 auto &AC = AM.getResult<AssumptionAnalysis>(F);
6259 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
6260 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
6262 auto &TTI = AM.getResult<TargetIRAnalysis>(F);
6263
6264 auto *AA = &AM.getResult<AAManager>(F);
6265 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
6266 ProfileSummaryInfo *PSI =
6267 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
6268 auto *BFI = (PSI && PSI->hasProfileSummary()) ?
6269 &AM.getResult<BlockFrequencyAnalysis>(F) : nullptr;
6271
6272 if (!combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
6273 BFI, BPI, PSI, Options)) {
6274 // No changes, all analyses are preserved.
6275 LRT.update(&ID, /*Changed=*/false);
6276 return PreservedAnalyses::all();
6277 }
6278
6279 // Mark all the analyses that instcombine updates as preserved.
6281 LRT.update(&ID, /*Changed=*/true);
6284 return PA;
6285}
6286
6302
6304 if (skipFunction(F))
6305 return false;
6306
6307 // Required analyses.
6308 auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
6309 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
6310 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
6312 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
6314
6315 // Optional analyses.
6316 ProfileSummaryInfo *PSI =
6318 BlockFrequencyInfo *BFI =
6319 (PSI && PSI->hasProfileSummary()) ?
6321 nullptr;
6322 BranchProbabilityInfo *BPI = nullptr;
6323 if (auto *WrapperPass =
6325 BPI = &WrapperPass->getBPI();
6326
6327 return combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
6328 BFI, BPI, PSI, InstCombineOptions());
6329}
6330
6332
6334
6336 "Combine redundant instructions", false, false)
6347 "Combine redundant instructions", false, false)
6348
6349// Initialization Routines.
6353
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
Rewrite undef for PHI
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This is the interface for LLVM's primary stateless and local alias analysis.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static bool willNotOverflow(BinaryOpIntrinsic *BO, LazyValueInfo *LVI)
DXIL Resource Access
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
This file defines the DenseMap class.
This is the interface for a simple mod/ref and alias analysis over globals.
Hexagon Common GEP
IRTranslator LLVM IR MI
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
iv Induction Variable Users
Definition IVUsers.cpp:48
static bool rightDistributesOverLeft(Instruction::BinaryOps LOp, bool HasNUW, bool HasNSW, Intrinsic::ID ROp)
Return whether "(X ROp Y) LOp Z" is always equal to "(X LOp Z) ROp (Y LOp Z)".
static bool leftDistributesOverRight(Instruction::BinaryOps LOp, bool HasNUW, bool HasNSW, Intrinsic::ID ROp)
Return whether "X LOp (Y ROp Z)" is always equal to "(X LOp Y) ROp (X LOp Z)".
This file provides internal interfaces used to implement the InstCombine.
This file provides the primary interface to the instcombine pass.
static Value * simplifySwitchOnSelectUsingRanges(SwitchInst &SI, SelectInst *Select, bool IsTrueArm)
static bool isUsedWithinShuffleVector(Value *V)
static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo &TLI, Instruction *AI)
static Constant * constantFoldBinOpWithSplat(unsigned Opcode, Constant *Vector, Constant *Splat, bool SplatLHS, const DataLayout &DL)
static bool shorter_filter(const Value *LHS, const Value *RHS)
static Instruction * combineConstantOffsets(GetElementPtrInst &GEP, InstCombinerImpl &IC)
Combine constant offsets separated by variable offsets.
static Instruction * foldSelectGEP(GetElementPtrInst &GEP, InstCombiner::BuilderTy &Builder)
Thread a GEP operation with constant indices through the constant true/false arms of a select.
static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src)
static cl::opt< unsigned > MaxArraySize("instcombine-maxarray-size", cl::init(1024), cl::desc("Maximum array size considered when doing a combine"))
static Instruction * foldSpliceBinOp(BinaryOperator &Inst, InstCombiner::BuilderTy &Builder)
static cl::opt< unsigned > ShouldLowerDbgDeclare("instcombine-lower-dbg-declare", cl::Hidden, cl::init(true))
static bool hasNoSignedWrap(BinaryOperator &I)
static bool simplifyAssocCastAssoc(BinaryOperator *BinOp1, InstCombinerImpl &IC)
Combine constant operands of associative operations either before or after a cast to eliminate one of...
static bool combineInstructionsOverFunction(Function &F, InstructionWorklist &Worklist, AliasAnalysis *AA, AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI, DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI, BranchProbabilityInfo *BPI, ProfileSummaryInfo *PSI, const InstCombineOptions &Opts)
static Value * simplifyInstructionWithPHI(Instruction &I, PHINode *PN, Value *InValue, BasicBlock *InBB, const DataLayout &DL, const SimplifyQuery SQ)
static bool shouldCanonicalizeGEPToPtrAdd(GetElementPtrInst &GEP)
Return true if we should canonicalize the gep to an i8 ptradd.
static void ClearSubclassDataAfterReassociation(BinaryOperator &I)
Conservatively clears subclassOptionalData after a reassociation or commutation.
static Value * getIdentityValue(Instruction::BinaryOps Opcode, Value *V)
This function returns identity value for given opcode, which can be used to factor patterns like (X *...
static Value * foldFrexpOfSelect(ExtractValueInst &EV, IntrinsicInst *FrexpCall, SelectInst *SelectInst, InstCombiner::BuilderTy &Builder)
static std::optional< std::pair< Value *, Value * > > matchSymmetricPhiNodesPair(PHINode *LHS, PHINode *RHS)
static Value * foldOperationIntoSelectOperand(Instruction &I, SelectInst *SI, Value *NewOp, InstCombiner &IC)
static Instruction * canonicalizeGEPOfConstGEPI8(GetElementPtrInst &GEP, GEPOperator *Src, InstCombinerImpl &IC)
static Instruction * tryToMoveFreeBeforeNullTest(CallInst &FI, const DataLayout &DL)
Move the call to free before a NULL test.
static Value * simplifyOperationIntoSelectOperand(Instruction &I, SelectInst *SI, bool IsTrueArm)
static Value * tryFactorization(BinaryOperator &I, const SimplifyQuery &SQ, InstCombiner::BuilderTy &Builder, Instruction::BinaryOps InnerOpcode, Value *A, Value *B, Value *C, Value *D)
This tries to simplify binary operations by factorizing out common terms (e.
static bool isRemovableWrite(CallBase &CB, Value *UsedV, const TargetLibraryInfo &TLI)
Given a call CB which uses an address UsedV, return true if we can prove the call's only possible eff...
static Instruction::BinaryOps getBinOpsForFactorization(Instruction::BinaryOps TopOpcode, BinaryOperator *Op, Value *&LHS, Value *&RHS, BinaryOperator *OtherOp)
This function predicates factorization using distributive laws.
static bool hasNoUnsignedWrap(BinaryOperator &I)
static bool SoleWriteToDeadLocal(Instruction *I, TargetLibraryInfo &TLI)
Check for case where the call writes to an otherwise dead alloca.
static cl::opt< unsigned > MaxSinkNumUsers("instcombine-max-sink-users", cl::init(32), cl::desc("Maximum number of undroppable users for instruction sinking"))
static Instruction * foldGEPOfPhi(GetElementPtrInst &GEP, PHINode *PN, IRBuilderBase &Builder)
static std::optional< ModRefInfo > isAllocSiteRemovable(Instruction *AI, SmallVectorImpl< WeakTrackingVH > &Users, const TargetLibraryInfo &TLI, bool KnowInit)
static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo)
Return 'true' if the given typeinfo will match anything.
static cl::opt< bool > EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"), cl::init(true))
static bool maintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C)
static GEPNoWrapFlags getMergedGEPNoWrapFlags(GEPOperator &GEP1, GEPOperator &GEP2)
Determine nowrap flags for (gep (gep p, x), y) to (gep p, (x + y)) transform.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
This file contains the declarations for metadata subclasses.
#define T
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static bool IsSelect(MachineInstr &MI)
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
const SmallVectorImpl< MachineOperand > & Cond
static unsigned getNumElements(Type *Ty)
unsigned OpIndex
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
static unsigned getScalarSizeInBits(Type *Ty)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This pass exposes codegen information to IR-level passes.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition VPlanSLP.cpp:247
Value * RHS
Value * LHS
static const uint32_t IV[8]
Definition blake3_impl.h:83
bool isNoAliasScopeDeclDead(Instruction *Inst)
void analyse(Instruction *I)
The Input class is used to parse a yaml document into in-memory structs and vectors.
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
static constexpr roundingMode rmNearestTiesToEven
Definition APFloat.h:344
static LLVM_ABI unsigned int semanticsPrecision(const fltSemantics &)
Definition APFloat.cpp:214
Class for arbitrary precision integers.
Definition APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
static LLVM_ABI void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
Definition APInt.cpp:1769
bool isMinSignedValue() const
Determine if this is the smallest signed value.
Definition APInt.h:424
static LLVM_ABI void sdivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Definition APInt.cpp:1901
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
Definition APInt.cpp:936
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
Definition APInt.h:372
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:381
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1503
LLVM_ABI APInt sadd_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1939
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
Definition APInt.h:834
LLVM_ABI APInt smul_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1971
bool isMaxSignedValue() const
Determine if this is the largest signed value.
Definition APInt.h:406
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
Definition APInt.h:335
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1157
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:441
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition APInt.h:307
LLVM_ABI APInt ssub_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1952
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition APInt.h:858
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition Pass.cpp:270
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
ArrayRef< T > take_front(size_t N=1) const
Return a copy of *this with only the first N elements.
Definition ArrayRef.h:219
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
Class to represent array types.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
uint64_t getNumElements() const
Type * getElementType() const
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
LLVM_ABI void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:105
LLVM_ABI uint64_t getDereferenceableBytes() const
Returns the number of dereferenceable bytes from the dereferenceable attribute.
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:261
Legacy wrapper pass to provide the BasicAAResult object.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:539
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
LLVM_ABI iterator_range< filter_iterator< BasicBlock::const_iterator, std::function< bool(const Instruction &)> > > instructionsWithoutDebug(bool SkipPseudoOp=true) const
Return a const iterator range over the instructions in the block, skipping any debug instructions.
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Instruction & front() const
Definition BasicBlock.h:493
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
LLVM_ABI const_iterator getFirstNonPHIOrDbgOrAlloca() const
Returns an iterator to the first instruction in this block that is not a PHINode, a debug intrinsic,...
size_t size() const
Definition BasicBlock.h:491
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
static LLVM_ABI BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
BinaryOps getOpcode() const
Definition InstrTypes.h:374
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateNUW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
Definition InstrTypes.h:294
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
LLVM_ABI void swapSuccessors()
Swap the successors of this branch instruction.
bool isConditional() const
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
Analysis pass which computes BranchProbabilityInfo.
Analysis providing branch probability information.
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
void setAttributes(AttributeList A)
Set the attributes for this call.
bool doesNotThrow() const
Determine if the call cannot unwind.
Value * getArgOperand(unsigned i) const
AttributeList getAttributes() const
Return the attributes for this call.
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:827
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
ConstantArray - Constant Array Declarations.
Definition Constants.h:438
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
A vector constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
Definition Constants.h:781
static LLVM_ABI Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getNot(Constant *C)
static LLVM_ABI Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)
Return the identity constant for a binary opcode.
static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
This class represents a range of values.
LLVM_ABI bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
static LLVM_ABI ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
Constant Vector Declarations.
Definition Constants.h:522
static LLVM_ABI Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * replaceUndefsWith(Constant *C, Constant *Replacement)
Try to replace undefined constant C or undefined elements in C with Replacement.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
const Constant * stripPointerCasts() const
Definition Constant.h:219
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:74
static LLVM_ABI DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Record of a variable value-assignment, aka a non instruction representation of the dbg....
static bool shouldExecute(CounterInfo &Counter)
Identifies a unique instance of a variable.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
bool empty() const
Definition DenseMap.h:109
iterator end()
Definition DenseMap.h:81
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
Analysis pass which computes a DominatorTree.
Definition Dominators.h:283
Legacy analysis pass which computes a DominatorTree.
Definition Dominators.h:321
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:164
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
iterator_range< idx_iterator > indices() const
idx_iterator idx_end() const
static ExtractValueInst * Create(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
idx_iterator idx_begin() const
Utility class for floating point operations which can have information about relaxed accuracy require...
Definition Operator.h:200
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:23
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
FunctionPass(char &pid)
Definition Pass.h:316
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
Definition Pass.cpp:188
const BasicBlock & getEntryBlock() const
Definition Function.h:809
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags inBounds()
static GEPNoWrapFlags all()
static GEPNoWrapFlags noUnsignedWrap()
GEPNoWrapFlags intersectForReassociate(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep (gep, p, y), x).
bool hasNoUnsignedWrap() const
bool isInBounds() const
GEPNoWrapFlags intersectForOffsetAdd(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep p, x+y).
static GEPNoWrapFlags none()
GEPNoWrapFlags getNoWrapFlags() const
Definition Operator.h:425
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static GetElementPtrInst * CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Create an "inbounds" getelementptr.
Legacy wrapper pass to provide the GlobalsAAResult object.
This instruction compares its operands according to the predicate given to the constructor.
CmpPredicate getCmpPredicate() const
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition IRBuilder.h:2025
ConstantInt * getInt(const APInt &AI)
Get a constant integer value.
Definition IRBuilder.h:537
Provides an 'InsertHelper' that calls a user-provided callback after performing the default insertion...
Definition IRBuilder.h:75
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2788
This instruction inserts a struct field of array element value into an aggregate value.
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
LLVM_ABI InstCombinePass(InstCombineOptions Opts={})
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Instruction * foldBinOpOfSelectAndCastOfSelectCondition(BinaryOperator &I)
Tries to simplify binops of select and cast of the select condition.
Instruction * foldBinOpIntoSelectOrPhi(BinaryOperator &I)
This is a convenience wrapper function for the above two functions.
bool SimplifyAssociativeOrCommutative(BinaryOperator &I)
Performs a few simplifications for operators which are associative or commutative.
Instruction * visitGEPOfGEP(GetElementPtrInst &GEP, GEPOperator *Src)
Value * foldUsingDistributiveLaws(BinaryOperator &I)
Tries to simplify binary operations which some other binary operation distributes over.
Instruction * foldBinOpShiftWithShift(BinaryOperator &I)
Instruction * visitUnreachableInst(UnreachableInst &I)
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
void handleUnreachableFrom(Instruction *I, SmallVectorImpl< BasicBlock * > &Worklist)
Value * SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &PoisonElts, unsigned Depth=0, bool AllowMultipleUsers=false) override
The specified value produces a vector with any number of elements.
Instruction * visitFreeze(FreezeInst &I)
Instruction * foldBinOpSelectBinOp(BinaryOperator &Op)
In some cases it is beneficial to fold a select into a binary operator.
void handlePotentiallyDeadBlocks(SmallVectorImpl< BasicBlock * > &Worklist)
bool prepareWorklist(Function &F)
Perform early cleanup and prepare the InstCombine worklist.
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false, bool SimplifyBothArms=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Instruction * visitFree(CallInst &FI, Value *FreedOp)
Instruction * visitExtractValueInst(ExtractValueInst &EV)
void handlePotentiallyDeadSuccessors(BasicBlock *BB, BasicBlock *LiveSucc)
Instruction * visitUnconditionalBranchInst(BranchInst &BI)
Instruction * foldBinopWithRecurrence(BinaryOperator &BO)
Try to fold binary operators whose operands are simple interleaved recurrences to a single recurrence...
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * visitLandingPadInst(LandingPadInst &LI)
Instruction * visitReturnInst(ReturnInst &RI)
Instruction * visitSwitchInst(SwitchInst &SI)
Instruction * foldBinopWithPhiOperands(BinaryOperator &BO)
For a binary operator with 2 phi operands, try to hoist the binary operation before the phi.
bool mergeStoreIntoSuccessor(StoreInst &SI)
Try to transform: if () { *P = v1; } else { *P = v2 } or: *P = v1; if () { *P = v2; }...
Instruction * tryFoldInstWithCtpopWithNot(Instruction *I)
bool SimplifyDemandedFPClass(Instruction *I, unsigned Op, FPClassTest DemandedMask, KnownFPClass &Known, unsigned Depth=0)
void CreateNonTerminatorUnreachable(Instruction *InsertAt)
Create and insert the idiom we use to indicate a block is unreachable without having to rewrite the C...
Value * pushFreezeToPreventPoisonFromPropagating(FreezeInst &FI)
bool run()
Run the combiner over the entire worklist until it is empty.
Instruction * foldVectorBinop(BinaryOperator &Inst)
Canonicalize the position of binops relative to shufflevector.
bool removeInstructionsBeforeUnreachable(Instruction &I)
Value * SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS, Value *RHS)
void tryToSinkInstructionDbgVariableRecords(Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock, BasicBlock *DestBlock, SmallVectorImpl< DbgVariableRecord * > &DPUsers)
void addDeadEdge(BasicBlock *From, BasicBlock *To, SmallVectorImpl< BasicBlock * > &Worklist)
Constant * unshuffleConstant(ArrayRef< int > ShMask, Constant *C, VectorType *NewCTy)
Find a constant NewC that has property: shuffle(NewC, ShMask) = C Returns nullptr if such a constant ...
Instruction * visitAllocSite(Instruction &FI)
Instruction * visitGetElementPtrInst(GetElementPtrInst &GEP)
Instruction * visitBranchInst(BranchInst &BI)
Value * tryFactorizationFolds(BinaryOperator &I)
This tries to simplify binary operations by factorizing out common terms (e.
Instruction * foldFreezeIntoRecurrence(FreezeInst &I, PHINode *PN)
bool tryToSinkInstruction(Instruction *I, BasicBlock *DestBlock)
Try to move the specified instruction from its current block into the beginning of DestBlock,...
bool freezeOtherUses(FreezeInst &FI)
void freelyInvertAllUsersOf(Value *V, Value *IgnoredUser=nullptr)
Freely adapt every user of V as-if V was changed to !V.
The core instruction combiner logic.
SimplifyQuery SQ
const DataLayout & getDataLayout() const
IRBuilder< TargetFolder, IRBuilderCallbackInserter > BuilderTy
An IRBuilder that automatically inserts new instructions into the worklist.
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
static unsigned getComplexity(Value *V)
Assign a complexity or rank value to LLVM Values.
TargetLibraryInfo & TLI
unsigned ComputeNumSignBits(const Value *Op, const Instruction *CxtI=nullptr, unsigned Depth=0) const
Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)
Inserts an instruction New before instruction Old.
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
static bool shouldAvoidAbsorbingNotIntoSelect(const SelectInst &SI)
void replaceUse(Use &U, Value *NewValue)
Replace use and add the previously used value to the worklist.
static bool isCanonicalPredicate(CmpPredicate Pred)
Predicate canonicalization reduces the number of patterns that need to be matched by other transforms...
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
Instruction * InsertNewInstWith(Instruction *New, BasicBlock::iterator Old)
Same as InsertNewInstBefore, but also sets the debug loc.
BranchProbabilityInfo * BPI
ReversePostOrderTraversal< BasicBlock * > & RPOT
const DataLayout & DL
DomConditionCache DC
const bool MinimizeSize
void computeKnownBits(const Value *V, KnownBits &Known, const Instruction *CxtI, unsigned Depth=0) const
std::optional< Instruction * > targetInstCombineIntrinsic(IntrinsicInst &II)
AssumptionCache & AC
void addToWorklist(Instruction *I)
Value * getFreelyInvertedImpl(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume, unsigned Depth)
Return nonnull value if V is free to invert under the condition of WillInvertAllUses.
SmallDenseSet< std::pair< const BasicBlock *, const BasicBlock * >, 8 > BackEdges
Backedges, used to avoid pushing instructions across backedges in cases where this may result in infi...
std::optional< Value * > targetSimplifyDemandedVectorEltsIntrinsic(IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp)
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
DominatorTree & DT
static Constant * getSafeVectorConstantForBinop(BinaryOperator::BinaryOps Opcode, Constant *In, bool IsRHSConstant)
Some binary operators require special handling to avoid poison and undefined behavior.
SmallDenseSet< std::pair< BasicBlock *, BasicBlock * >, 8 > DeadEdges
Edges that are known to never be taken.
std::optional< Value * > targetSimplifyDemandedUseBitsIntrinsic(IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed)
BuilderTy & Builder
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
bool isBackEdge(const BasicBlock *From, const BasicBlock *To)
bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero=false, const Instruction *CxtI=nullptr, unsigned Depth=0)
void visit(Iterator Start, Iterator End)
Definition InstVisitor.h:87
The legacy pass manager's instcombine pass.
Definition InstCombine.h:68
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
InstructionWorklist - This is the worklist management logic for InstCombine and other simplification ...
void add(Instruction *I)
Add instruction to the worklist.
LLVM_ABI void dropUBImplyingAttrsAndMetadata(ArrayRef< unsigned > Keep={})
Drop any attributes or metadata that can cause immediate undefined behavior.
static bool isBitwiseLogicOp(unsigned Opcode)
Determine if the Opcode is and/or/xor.
LLVM_ABI void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
LLVM_ABI bool isAssociative() const LLVM_READONLY
Return true if the instruction is associative:
LLVM_ABI bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI void setFastMathFlags(FastMathFlags FMF)
Convenience function for setting multiple fast-math flags on this instruction, which must be an opera...
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
bool isTerminator() const
LLVM_ABI FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
LLVM_ABI bool willReturn() const LLVM_READONLY
Return true if the instruction will return (unwinding is considered as a form of returning control fl...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
bool isBitwiseLogicOp() const
Return true if this is and/or/xor.
bool isShift() const
LLVM_ABI void dropPoisonGeneratingFlags()
Drops flags that may cause this instruction to evaluate to poison despite having non-poison inputs.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
bool isIntDivRem() const
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:318
A wrapper class for inspecting calls to intrinsic functions.
Invoke instruction.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
static LLVM_ABI LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...
LLVM_ABI void addClause(Constant *ClauseVal)
Add a catch or filter clause to the landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
A function/module analysis which provides an empty LastRunTrackingInfo.
This is an alternative analysis pass to BlockFrequencyInfoWrapperPass.
static void getLazyBFIAnalysisUsage(AnalysisUsage &AU)
Helper for client passes to set up the analysis usage on behalf of this pass.
An instruction for reading from memory.
Value * getPointerOperand()
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Metadata node.
Definition Metadata.h:1080
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1444
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1450
Tracking metadata reference owned by Metadata.
Definition Metadata.h:902
This is the common base class for memset/memcpy/memmove.
static LLVM_ABI MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
Root of the metadata hierarchy.
Definition Metadata.h:64
Value * getLHS() const
Value * getRHS() const
static ICmpInst::Predicate getPredicate(Intrinsic::ID ID)
Returns the comparison predicate underlying the intrinsic.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
MDNode * getScopeList() const
OptimizationRemarkEmitter legacy analysis pass.
The optimization diagnostic interface.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
Definition Operator.h:78
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
Definition Operator.h:111
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
Definition Operator.h:105
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
AnalysisType * getAnalysisIfAvailable() const
getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to get analysis information tha...
In order to facilitate speculative execution, many instructions do not invoke immediate undefined beh...
Definition Constants.h:1493
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
Definition Analysis.h:151
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition Analysis.h:132
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
An analysis pass based on legacy pass manager to deliver ProfileSummaryInfo.
Analysis providing profile information.
bool hasProfileSummary() const
Returns true if profile summary is available.
A global registry used in conjunction with static constructors to make pluggable components (like tar...
Definition Registry.h:53
Return a value (possibly void), from a function.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
This class represents the LLVM 'select' instruction.
const Value * getFalseValue() const
const Value * getCondition() const
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, const Instruction *MDFrom=nullptr)
const Value * getTrueValue() const
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:151
This instruction constructs a fixed permutation of two input vectors.
size_type size() const
Definition SmallPtrSet.h:99
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:339
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition SmallSet.h:134
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition SmallSet.h:184
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
typename SuperClass::iterator iterator
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Multiway switch.
TargetFolder - Create constants with target dependent folding.
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Wrapper pass for TargetTransformInfo.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
Definition Type.cpp:61
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:294
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
bool isStructTy() const
True if this is an instance of StructType.
Definition Type.h:261
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:197
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:311
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:293
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
LLVM_ABI const fltSemantics & getFltSemantics() const
Definition Type.cpp:106
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
Use * op_iterator
Definition User.h:254
op_range operands()
Definition User.h:267
op_iterator op_begin()
Definition User.h:259
LLVM_ABI bool isDroppable() const
A droppable user is a user for which uses can be dropped without affecting correctness and should be ...
Definition User.cpp:119
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition User.cpp:25
Value * getOperand(unsigned i) const
Definition User.h:207
unsigned getNumOperands() const
Definition User.h:229
op_iterator op_end()
Definition User.h:261
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
Definition Value.h:761
LLVM_ABI bool hasOneUser() const
Return true if there is exactly one user of this value.
Definition Value.cpp:166
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:440
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:259
iterator_range< user_iterator > users()
Definition Value.h:427
bool hasUseList() const
Check if this Value has a use-list.
Definition Value.h:345
LLVM_ABI bool hasNUses(unsigned N) const
Return true if this Value has exactly N uses.
Definition Value.cpp:150
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:713
bool use_empty() const
Definition Value.h:347
LLVM_ABI uint64_t getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull, bool &CanBeFreed) const
Returns the number of bytes known to be dereferenceable for the pointer value.
Definition Value.cpp:893
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:403
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Value handle that is nullable, but tries to track the Value.
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
An efficient, type-erasing, non-owning reference to a callable.
TypeSize getSequentialElementStride(const DataLayout &DL) const
const ParentTy * getParent() const
Definition ilist_node.h:34
reverse_self_iterator getReverseIterator()
Definition ilist_node.h:126
self_iterator getIterator()
Definition ilist_node.h:123
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
A raw_ostream that writes to an std::string.
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Abstract Attribute helper functions.
Definition Attributor.h:165
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
class_match< PoisonValue > m_Poison()
Match an arbitrary poison constant.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
PtrAdd_match< PointerOpTy, OffsetOpTy > m_PtrAdd(const PointerOpTy &PointerOp, const OffsetOpTy &OffsetOp)
Matches GEP with i8 source element type.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
CmpClass_match< LHS, RHS, FCmpInst > m_FCmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
auto m_PtrToIntOrAddr(const OpTy &Op)
Matches PtrToInt or PtrToAddr.
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
OneOps_match< OpTy, Instruction::Freeze > m_Freeze(const OpTy &Op)
Matches FreezeInst.
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
br_match m_UnconditionalBr(BasicBlock *&Succ)
ap_match< APInt > m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
DisjointOr_match< LHS, RHS > m_DisjointOr(const LHS &L, const RHS &R)
constantexpr_match m_ConstantExpr()
Match a constant expression or a constant that contains a constant expression.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
ap_match< APFloat > m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
NNegZExt_match< OpTy > m_NNegZExt(const OpTy &Op)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
Splat_match< T > m_ConstantSplat(const T &SubPattern)
Match a constant splat. TODO: Extend this to non-constant splats.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
ThreeOps_match< decltype(m_Value()), LHS, RHS, Instruction::Select, true > m_c_Select(const LHS &L, const RHS &R)
Match Select(C, LHS, RHS) or Select(C, RHS, LHS)
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
SelectLike_match< CondTy, LTy, RTy > m_SelectLike(const CondTy &C, const LTy &TrueC, const RTy &FalseC)
Matches a value that behaves like a boolean-controlled select, i.e.
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
CastInst_match< OpTy, UIToFPInst > m_UIToFP(const OpTy &Op)
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
match_combine_or< CastInst_match< OpTy, SExtInst >, NNegZExt_match< OpTy > > m_SExtLike(const OpTy &Op)
Match either "sext" or "zext nneg".
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap >, DisjointOr_match< LHS, RHS > > m_NSWAddLike(const LHS &L, const RHS &R)
Match either "add nsw" or "or disjoint".
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
CastInst_match< OpTy, SIToFPInst > m_SIToFP(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
cstfp_pred_ty< is_non_zero_fp > m_NonZeroFP()
Match a floating-point non-zero.
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > > > m_MaxOrMin(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap >, DisjointOr_match< LHS, RHS > > m_NUWAddLike(const LHS &L, const RHS &R)
Match either "add nuw" or "or disjoint".
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_VectorInsert(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
initializer< Ty > init(const Ty &Val)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
LLVM_ABI Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
@ Offset
Definition DWP.cpp:532
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:831
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
void stable_sort(R &&Range)
Definition STLExtras.h:2116
LLVM_ABI void initializeInstructionCombiningPassPass(PassRegistry &)
cl::opt< bool > ProfcheckDisableMetadataFixes
Definition Metadata.cpp:64
LLVM_ABI unsigned removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB)
Remove all instructions from a basic block other than its terminator and any present EH pad instructi...
Definition Local.cpp:2503
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1739
LLVM_ABI Value * simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef< Value * > Indices, GEPNoWrapFlags NW, const SimplifyQuery &Q)
Given operands for a GetElementPtrInst, fold the result or return null.
LLVM_ABI Constant * getInitialValueOfAllocation(const Value *V, const TargetLibraryInfo *TLI, Type *Ty)
If this is a call to an allocation function that initializes memory to a fixed value,...
bool succ_empty(const Instruction *I)
Definition CFG.h:257
LLVM_ABI Value * simplifyFreezeInst(Value *Op, const SimplifyQuery &Q)
Given an operand for a Freeze, see if we can fold the result.
LLVM_ABI FunctionPass * createInstructionCombiningPass()
LLVM_ABI void findDbgValues(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the dbg.values describing a value.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2554
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition Utils.cpp:1726
auto successors(const MachineBasicBlock *BB)
LLVM_ABI Constant * ConstantFoldInstruction(const Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstruction - Try to constant fold the specified instruction.
LLVM_ABI bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI)
Return true if this is a call to an allocation function that does not have side effects that we are r...
LLVM_ABI std::optional< StringRef > getAllocationFamily(const Value *I, const TargetLibraryInfo *TLI)
If a function is part of an allocation family (e.g.
OuterAnalysisManagerProxy< ModuleAnalysisManager, Function > ModuleAnalysisManagerFunctionProxy
Provide the ModuleAnalysisManager to Function proxy.
LLVM_ABI Value * lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL, const TargetLibraryInfo *TLI, bool MustSucceed)
Try to turn a call to @llvm.objectsize into an integer value of the given Type.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_ABI Value * simplifyInstructionWithOperands(Instruction *I, ArrayRef< Value * > NewOps, const SimplifyQuery &Q)
Like simplifyInstruction but the operands of I are replaced with NewOps.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2208
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:634
gep_type_iterator gep_type_end(const User *GEP)
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
LLVM_ABI Value * getReallocatedOperand(const CallBase *CB)
If this is a call to a realloc function, return the reallocated operand.
APFloat frexp(const APFloat &X, int &Exp, APFloat::roundingMode RM)
Equivalent of C standard library function.
Definition APFloat.h:1622
LLVM_ABI bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory (either malloc,...
LLVM_ABI bool handleUnreachableTerminator(Instruction *I, SmallVectorImpl< Value * > &PoisonedValues)
If a terminator in an unreachable basic block has an operand of type Instruction, transform it into p...
Definition Local.cpp:2486
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:202
LLVM_ABI void setBranchWeights(Instruction &I, ArrayRef< uint32_t > Weights, bool IsExpected, bool ElideAllZero=false)
Create a new branch_weights metadata node and add or overwrite a prof metadata reference to instructi...
LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
LLVM_ABI Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
LLVM_ABI Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
constexpr bool has_single_bit(T Value) noexcept
Definition bit.h:147
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1746
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition Local.cpp:406
LLVM_ABI bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
LLVM_ABI Value * emitGEPOffset(IRBuilderBase *Builder, const DataLayout &DL, User *GEP, bool NoAssumptions=false)
Given a getelementptr instruction/constantexpr, emit the code necessary to compute the offset from th...
Definition Local.cpp:22
constexpr unsigned MaxAnalysisRecursionDepth
auto reverse(ContainerTy &&C)
Definition STLExtras.h:408
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI bool LowerDbgDeclare(Function &F)
Lowers dbg.declare records into appropriate set of dbg.value records.
Definition Local.cpp:1813
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
generic_gep_type_iterator<> gep_type_iterator
LLVM_ABI void ConvertDebugDeclareToDebugValue(DbgVariableRecord *DVR, StoreInst *SI, DIBuilder &Builder)
Inserts a dbg.value record before a store to an alloca'd value that has an associated dbg....
Definition Local.cpp:1680
LLVM_ABI void salvageDebugInfoForDbgValues(Instruction &I, ArrayRef< DbgVariableRecord * > DPInsns)
Implementation of salvageDebugInfo, applying only to instructions in Insns, rather than all debug use...
Definition Local.cpp:2055
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
LLVM_ABI bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an ExtractValueInst, fold the result or return null.
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_ABI bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
Definition Local.cpp:2432
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr int PoisonMaskElem
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
Definition STLExtras.h:323
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
Definition ModRef.h:28
@ Ref
The access may reference the value stored in memory.
Definition ModRef.h:32
@ ModRef
The access may reference and may modify the value stored in memory.
Definition ModRef.h:36
@ Mod
The access may modify the value stored in memory.
Definition ModRef.h:34
@ NoModRef
The access neither references nor modifies the value stored in memory.
Definition ModRef.h:30
TargetTransformInfo TTI
LLVM_ABI Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
DWARFExpression::Operation Op
bool isSafeToSpeculativelyExecuteWithVariableReplaced(const Instruction *I, bool IgnoreUBImplyingAttrs=true)
Don't use information from its non-constant operands.
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)
If this if a call to a free function, return the freed operand.
constexpr unsigned BitWidth
LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
LLVM_ABI Constant * getLosslessInvCast(Constant *C, Type *InvCastTo, unsigned CastOp, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)
Try to cast C to InvC losslessly, satisfying CastOp(InvC) equals C, or CastOp(InvC) is a refined valu...
LLVM_ABI bool extractBranchWeights(const MDNode *ProfileData, SmallVectorImpl< uint32_t > &Weights)
Extract branch weights from MD_prof metadata.
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
Definition STLExtras.h:2019
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
gep_type_iterator gep_type_begin(const User *GEP)
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1947
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
bool equal(L &&LRange, R &&RRange)
Wrapper function around std::equal to detect if pair-wise elements between two ranges are the same.
Definition STLExtras.h:2146
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
AAResults AliasAnalysis
Temporary typedef for legacy code that uses a generic AliasAnalysis pointer or reference.
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
LLVM_ABI void initializeInstCombine(PassRegistry &)
Initialize all passes linked into the InstCombine library.
LLVM_ABI void findDbgUsers(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the debug info records describing a value.
LLVM_ABI Constant * ConstantFoldBinaryInstruction(unsigned Opcode, Constant *V1, Constant *V2)
bool isRefSet(const ModRefInfo MRI)
Definition ModRef.h:52
LLVM_ABI std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:177
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define N
unsigned countMinLeadingOnes() const
Returns the minimum number of leading one bits.
Definition KnownBits.h:267
unsigned getBitWidth() const
Get the bit width of this value.
Definition KnownBits.h:44
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
Definition KnownBits.h:264
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition PassManager.h:70
SimplifyQuery getWithInstruction(const Instruction *I) const