95#define DEBUG_TYPE "dse"
97STATISTIC(NumRemainingStores,
"Number of stores remaining after DSE");
98STATISTIC(NumRedundantStores,
"Number of redundant stores deleted");
99STATISTIC(NumFastStores,
"Number of stores deleted");
100STATISTIC(NumFastOther,
"Number of other instrs removed");
101STATISTIC(NumCompletePartials,
"Number of stores dead by later partials");
102STATISTIC(NumModifiedStores,
"Number of stores modified");
107 "Number of times a valid candidate is returned from getDomMemoryDef");
109 "Number iterations check for reads in getDomMemoryDef");
112 "Controls which MemoryDefs are eliminated.");
117 cl::desc(
"Enable partial-overwrite tracking in DSE"));
122 cl::desc(
"Enable partial store merging in DSE"));
126 cl::desc(
"The number of memory instructions to scan for "
127 "dead store elimination (default = 150)"));
130 cl::desc(
"The maximum number of steps while walking upwards to find "
131 "MemoryDefs that may be killed (default = 90)"));
135 cl::desc(
"The maximum number candidates that only partially overwrite the "
136 "killing MemoryDef to consider"
141 cl::desc(
"The number of MemoryDefs we consider as candidates to eliminated "
142 "other stores per basic block (default = 5000)"));
147 "The cost of a step in the same basic block as the killing MemoryDef"
153 cl::desc(
"The cost of a step in a different basic "
154 "block than the killing MemoryDef"
159 cl::desc(
"The maximum number of blocks to check when trying to prove that "
160 "all paths to an exit go through a killing block (default = 50)"));
170 cl::desc(
"Allow DSE to optimize memory accesses."));
175 cl::desc(
"Enable the initializes attr improvement in DSE"));
191 switch (
II->getIntrinsicID()) {
192 default:
return false;
193 case Intrinsic::memset:
194 case Intrinsic::memcpy:
195 case Intrinsic::memcpy_element_unordered_atomic:
196 case Intrinsic::memset_element_unordered_atomic:
231enum OverwriteResult {
235 OW_PartialEarlierWithFullLater,
251 if (KillingII ==
nullptr || DeadII ==
nullptr)
253 if (KillingII->getIntrinsicID() != DeadII->getIntrinsicID())
256 switch (KillingII->getIntrinsicID()) {
257 case Intrinsic::masked_store:
258 case Intrinsic::vp_store: {
260 auto *KillingTy = KillingII->getArgOperand(0)->getType();
261 auto *DeadTy = DeadII->getArgOperand(0)->getType();
262 if (
DL.getTypeSizeInBits(KillingTy) !=
DL.getTypeSizeInBits(DeadTy))
269 Value *KillingPtr = KillingII->getArgOperand(1);
270 Value *DeadPtr = DeadII->getArgOperand(1);
271 if (KillingPtr != DeadPtr && !
AA.isMustAlias(KillingPtr, DeadPtr))
273 if (KillingII->getIntrinsicID() == Intrinsic::masked_store) {
276 if (KillingII->getArgOperand(2) != DeadII->getArgOperand(2))
278 }
else if (KillingII->getIntrinsicID() == Intrinsic::vp_store) {
281 if (KillingII->getArgOperand(2) != DeadII->getArgOperand(2))
284 if (KillingII->getArgOperand(3) != DeadII->getArgOperand(3))
306 int64_t KillingOff, int64_t DeadOff,
317 KillingOff < int64_t(DeadOff + DeadSize) &&
318 int64_t(KillingOff + KillingSize) >= DeadOff) {
321 auto &IM = IOL[DeadI];
322 LLVM_DEBUG(
dbgs() <<
"DSE: Partial overwrite: DeadLoc [" << DeadOff <<
", "
323 << int64_t(DeadOff + DeadSize) <<
") KillingLoc ["
324 << KillingOff <<
", " << int64_t(KillingOff + KillingSize)
331 int64_t KillingIntStart = KillingOff;
332 int64_t KillingIntEnd = KillingOff + KillingSize;
336 auto ILI = IM.lower_bound(KillingIntStart);
337 if (ILI != IM.end() && ILI->second <= KillingIntEnd) {
341 KillingIntStart = std::min(KillingIntStart, ILI->second);
342 KillingIntEnd = std::max(KillingIntEnd, ILI->first);
351 while (ILI != IM.end() && ILI->second <= KillingIntEnd) {
352 assert(ILI->second > KillingIntStart &&
"Unexpected interval");
353 KillingIntEnd = std::max(KillingIntEnd, ILI->first);
358 IM[KillingIntEnd] = KillingIntStart;
361 if (ILI->second <= DeadOff && ILI->first >= int64_t(DeadOff + DeadSize)) {
362 LLVM_DEBUG(
dbgs() <<
"DSE: Full overwrite from partials: DeadLoc ["
363 << DeadOff <<
", " << int64_t(DeadOff + DeadSize)
364 <<
") Composite KillingLoc [" << ILI->second <<
", "
365 << ILI->first <<
")\n");
366 ++NumCompletePartials;
374 int64_t(DeadOff + DeadSize) > KillingOff &&
375 uint64_t(KillingOff - DeadOff) + KillingSize <= DeadSize) {
376 LLVM_DEBUG(
dbgs() <<
"DSE: Partial overwrite a dead load [" << DeadOff
377 <<
", " << int64_t(DeadOff + DeadSize)
378 <<
") by a killing store [" << KillingOff <<
", "
379 << int64_t(KillingOff + KillingSize) <<
")\n");
381 return OW_PartialEarlierWithFullLater;
394 (KillingOff > DeadOff && KillingOff < int64_t(DeadOff + DeadSize) &&
395 int64_t(KillingOff + KillingSize) >= int64_t(DeadOff + DeadSize)))
408 (KillingOff <= DeadOff && int64_t(KillingOff + KillingSize) > DeadOff)) {
409 assert(int64_t(KillingOff + KillingSize) < int64_t(DeadOff + DeadSize) &&
410 "Expect to be handled as OW_Complete");
430 using BlockAddressPair = std::pair<BasicBlock *, PHITransAddr>;
447 auto *MemLocPtr =
const_cast<Value *
>(MemLoc.
Ptr);
452 bool isFirstBlock =
true;
455 while (!WorkList.
empty()) {
467 assert(
B == SecondBB &&
"first block is not the store block");
469 isFirstBlock =
false;
475 for (; BI != EI; ++BI) {
477 if (
I->mayWriteToMemory() &&
I != SecondI)
483 "Should not hit the entry block because SI must be dominated by LI");
493 auto Inserted = Visited.
insert(std::make_pair(Pred, TranslatedPtr));
494 if (!Inserted.second) {
497 if (TranslatedPtr != Inserted.first->second)
502 WorkList.
push_back(std::make_pair(Pred, PredAddr));
511 uint64_t NewSizeInBits,
bool IsOverwriteEnd) {
513 uint64_t DeadSliceSizeInBits = OldSizeInBits - NewSizeInBits;
515 OldOffsetInBits + (IsOverwriteEnd ? NewSizeInBits : 0);
516 auto SetDeadFragExpr = [](
auto *Assign,
520 uint64_t RelativeOffset = DeadFragment.OffsetInBits -
521 Assign->getExpression()
526 Assign->getExpression(), RelativeOffset, DeadFragment.SizeInBits)) {
527 Assign->setExpression(*
NewExpr);
534 DeadFragment.SizeInBits);
535 Assign->setExpression(Expr);
536 Assign->setKillLocation();
543 auto GetDeadLink = [&Ctx, &LinkToNothing]() {
546 return LinkToNothing;
552 std::optional<DIExpression::FragmentInfo> NewFragment;
554 DeadSliceSizeInBits, Assign,
559 Assign->setKillAddress();
560 Assign->setAssignId(GetDeadLink());
564 if (NewFragment->SizeInBits == 0)
568 auto *NewAssign =
static_cast<decltype(Assign)
>(Assign->clone());
569 NewAssign->insertAfter(Assign->getIterator());
570 NewAssign->setAssignId(GetDeadLink());
572 SetDeadFragExpr(NewAssign, *NewFragment);
573 NewAssign->setKillAddress();
587 for (
auto &Attr : OldAttrs) {
588 if (Attr.hasKindAsEnum()) {
589 switch (Attr.getKindAsEnum()) {
592 case Attribute::Alignment:
594 if (
isAligned(Attr.getAlignment().valueOrOne(), PtrOffset))
597 case Attribute::Dereferenceable:
598 case Attribute::DereferenceableOrNull:
602 case Attribute::NonNull:
603 case Attribute::NoUndef:
611 Intrinsic->removeParamAttrs(ArgNo, AttrsToRemove);
615 uint64_t &DeadSize, int64_t KillingStart,
616 uint64_t KillingSize,
bool IsOverwriteEnd) {
618 Align PrefAlign = DeadIntrinsic->getDestAlign().valueOrOne();
634 int64_t ToRemoveStart = 0;
638 if (IsOverwriteEnd) {
643 ToRemoveStart = KillingStart + Off;
644 if (DeadSize <=
uint64_t(ToRemoveStart - DeadStart))
646 ToRemoveSize = DeadSize -
uint64_t(ToRemoveStart - DeadStart);
648 ToRemoveStart = DeadStart;
650 "Not overlapping accesses?");
651 ToRemoveSize = KillingSize -
uint64_t(DeadStart - KillingStart);
656 if (ToRemoveSize <= (PrefAlign.
value() - Off))
658 ToRemoveSize -= PrefAlign.
value() - Off;
661 "Should preserve selected alignment");
664 assert(ToRemoveSize > 0 &&
"Shouldn't reach here if nothing to remove");
665 assert(DeadSize > ToRemoveSize &&
"Can't remove more than original size");
667 uint64_t NewSize = DeadSize - ToRemoveSize;
668 if (DeadIntrinsic->isAtomic()) {
671 const uint32_t ElementSize = DeadIntrinsic->getElementSizeInBytes();
672 if (0 != NewSize % ElementSize)
677 << (IsOverwriteEnd ?
"END" :
"BEGIN") <<
": " << *DeadI
678 <<
"\n KILLER [" << ToRemoveStart <<
", "
679 << int64_t(ToRemoveStart + ToRemoveSize) <<
")\n");
681 DeadIntrinsic->setLength(NewSize);
682 DeadIntrinsic->setDestAlignment(PrefAlign);
684 Value *OrigDest = DeadIntrinsic->getRawDest();
685 if (!IsOverwriteEnd) {
686 Value *Indices[1] = {
687 ConstantInt::get(DeadIntrinsic->getLength()->getType(), ToRemoveSize)};
691 NewDestGEP->
setDebugLoc(DeadIntrinsic->getDebugLoc());
692 DeadIntrinsic->setDest(NewDestGEP);
702 DeadStart += ToRemoveSize;
709 int64_t &DeadStart,
uint64_t &DeadSize) {
714 int64_t KillingStart = OII->second;
715 uint64_t KillingSize = OII->first - KillingStart;
717 assert(OII->first - KillingStart >= 0 &&
"Size expected to be positive");
719 if (KillingStart > DeadStart &&
722 (
uint64_t)(KillingStart - DeadStart) < DeadSize &&
725 KillingSize >= DeadSize - (
uint64_t)(KillingStart - DeadStart)) {
726 if (
tryToShorten(DeadI, DeadStart, DeadSize, KillingStart, KillingSize,
737 int64_t &DeadStart,
uint64_t &DeadSize) {
742 int64_t KillingStart = OII->second;
743 uint64_t KillingSize = OII->first - KillingStart;
745 assert(OII->first - KillingStart >= 0 &&
"Size expected to be positive");
747 if (KillingStart <= DeadStart &&
750 KillingSize > (
uint64_t)(DeadStart - KillingStart)) {
753 assert(KillingSize - (
uint64_t)(DeadStart - KillingStart) < DeadSize &&
754 "Should have been handled as OW_Complete");
755 if (
tryToShorten(DeadI, DeadStart, DeadSize, KillingStart, KillingSize,
766 int64_t KillingOffset, int64_t DeadOffset,
793 unsigned BitOffsetDiff = (KillingOffset - DeadOffset) * 8;
794 unsigned LShiftAmount =
795 DL.isBigEndian() ? DeadValue.
getBitWidth() - BitOffsetDiff - KillingBits
798 LShiftAmount + KillingBits);
801 APInt Merged = (DeadValue & ~Mask) | (KillingValue << LShiftAmount);
803 <<
"\n Killing: " << *KillingI
804 <<
"\n Merged Value: " << Merged <<
'\n');
813 switch (
II->getIntrinsicID()) {
814 case Intrinsic::lifetime_start:
815 case Intrinsic::lifetime_end:
816 case Intrinsic::invariant_end:
817 case Intrinsic::launder_invariant_group:
818 case Intrinsic::assume:
820 case Intrinsic::dbg_declare:
821 case Intrinsic::dbg_label:
822 case Intrinsic::dbg_value:
837 if (CB->onlyAccessesInaccessibleMemory())
842 if (DI->
mayThrow() && !DefVisibleToCaller)
864struct MemoryLocationWrapper {
865 MemoryLocationWrapper(MemoryLocation MemLoc, MemoryDef *MemDef,
866 bool DefByInitializesAttr)
867 : MemLoc(MemLoc), MemDef(MemDef),
868 DefByInitializesAttr(DefByInitializesAttr) {
869 assert(MemLoc.Ptr &&
"MemLoc should be not null");
871 DefInst = MemDef->getMemoryInst();
874 MemoryLocation MemLoc;
875 const Value *UnderlyingObject;
878 bool DefByInitializesAttr =
false;
883struct MemoryDefWrapper {
884 MemoryDefWrapper(MemoryDef *MemDef,
885 ArrayRef<std::pair<MemoryLocation, bool>> MemLocations) {
887 for (
auto &[MemLoc, DefByInitializesAttr] : MemLocations)
888 DefinedLocations.push_back(
889 MemoryLocationWrapper(MemLoc, MemDef, DefByInitializesAttr));
895struct ArgumentInitInfo {
897 bool IsDeadOrInvisibleOnUnwind;
898 ConstantRangeList Inits;
913 bool CallHasNoUnwindAttr) {
919 for (
const auto &Arg : Args) {
920 if (!CallHasNoUnwindAttr && !Arg.IsDeadOrInvisibleOnUnwind)
922 if (Arg.Inits.empty())
927 for (
auto &Arg : Args.drop_front())
928 IntersectedIntervals = IntersectedIntervals.
intersectWith(Arg.Inits);
930 return IntersectedIntervals;
938 EarliestEscapeAnalysis EA;
947 BatchAAResults BatchAA;
951 PostDominatorTree &PDT;
952 const TargetLibraryInfo &TLI;
953 const DataLayout &DL;
958 bool ContainsIrreducibleLoops;
963 SmallPtrSet<MemoryAccess *, 4> SkipStores;
965 DenseMap<const Value *, bool> CapturedBeforeReturn;
968 DenseMap<const Value *, bool> InvisibleToCallerAfterRet;
969 DenseMap<const Value *, uint64_t> InvisibleToCallerAfterRetBounded;
971 SmallPtrSet<BasicBlock *, 16> ThrowingBlocks;
974 DenseMap<BasicBlock *, unsigned> PostOrderNumbers;
978 MapVector<BasicBlock *, InstOverlapIntervalsTy> IOLs;
982 bool AnyUnreachableExit;
987 bool ShouldIterateEndOfFunctionDSE;
994 PostDominatorTree &PDT,
const TargetLibraryInfo &TLI,
996 DSEState(
const DSEState &) =
delete;
997 DSEState &operator=(
const DSEState &) =
delete;
999 LocationSize strengthenLocationSize(
const Instruction *
I,
1000 LocationSize
Size)
const;
1010 OverwriteResult isOverwrite(
const Instruction *KillingI,
1011 const Instruction *DeadI,
1012 const MemoryLocation &KillingLoc,
1013 const MemoryLocation &DeadLoc,
1014 int64_t &KillingOff, int64_t &DeadOff);
1016 bool isInvisibleToCallerAfterRet(
const Value *V,
const Value *Ptr,
1017 const LocationSize StoreSize);
1019 bool isInvisibleToCallerOnUnwind(
const Value *V);
1021 std::optional<MemoryLocation> getLocForWrite(Instruction *
I)
const;
1026 getLocForInst(Instruction *
I,
bool ConsiderInitializesAttr);
1030 bool isRemovable(Instruction *
I);
1034 bool isCompleteOverwrite(
const MemoryLocation &DefLoc, Instruction *DefInst,
1035 Instruction *UseInst);
1038 bool isWriteAtEndOfFunction(MemoryDef *Def,
const MemoryLocation &DefLoc);
1043 std::optional<std::pair<MemoryLocation, bool>>
1044 getLocForTerminator(Instruction *
I)
const;
1048 bool isMemTerminatorInst(Instruction *
I)
const;
1052 bool isMemTerminator(
const MemoryLocation &Loc, Instruction *AccessI,
1053 Instruction *MaybeTerm);
1056 bool isReadClobber(
const MemoryLocation &DefLoc, Instruction *UseInst);
1063 bool isGuaranteedLoopIndependent(
const Instruction *Current,
1064 const Instruction *KillingDef,
1065 const MemoryLocation &CurrentLoc);
1070 bool isGuaranteedLoopInvariant(
const Value *Ptr);
1078 std::optional<MemoryAccess *>
1079 getDomMemoryDef(MemoryDef *KillingDef, MemoryAccess *StartAccess,
1080 const MemoryLocation &KillingLoc,
const Value *KillingUndObj,
1081 unsigned &ScanLimit,
unsigned &WalkerStepLimit,
1082 bool IsMemTerm,
unsigned &PartialLimit,
1083 bool IsInitializesAttrMemLoc);
1089 SmallPtrSetImpl<MemoryAccess *> *
Deleted =
nullptr);
1095 bool mayThrowBetween(Instruction *KillingI, Instruction *DeadI,
1096 const Value *KillingUndObj);
1103 bool isDSEBarrier(
const Value *KillingUndObj, Instruction *DeadI);
1107 bool eliminateDeadWritesAtEndOfFunction();
1111 bool tryFoldIntoCalloc(MemoryDef *Def,
const Value *DefUO);
1115 bool dominatingConditionImpliesValue(MemoryDef *Def);
1119 bool storeIsNoop(MemoryDef *Def,
const Value *DefUO);
1125 bool eliminateRedundantStoresOfExistingValues();
1140 std::pair<bool, bool>
1141 eliminateDeadDefs(
const MemoryLocationWrapper &KillingLocWrapper);
1145 bool eliminateDeadDefs(
const MemoryDefWrapper &KillingDefWrapper);
1155 if (Visited.
insert(MA).second)
1172 :
F(
F),
AA(
AA), EA(DT, &LI), BatchAA(
AA, &EA), MSSA(MSSA), DT(DT), PDT(PDT),
1173 TLI(TLI),
DL(
F.getDataLayout()), LI(LI) {
1178 PostOrderNumbers[BB] = PO++;
1181 if (
I.mayThrow() && !MA)
1182 ThrowingBlocks.insert(
I.getParent());
1186 (getLocForWrite(&
I) || isMemTerminatorInst(&
I) ||
1188 MemDefs.push_back(MD);
1195 if (AI.hasPassPointeeByValueCopyAttr()) {
1196 InvisibleToCallerAfterRet.insert({&AI, true});
1200 if (!AI.getType()->isPointerTy())
1204 if (Info.coversAllReachableMemory())
1205 InvisibleToCallerAfterRet.insert({&AI, true});
1206 else if (
uint64_t DeadBytes = Info.getNumberOfDeadBytes())
1207 InvisibleToCallerAfterRetBounded.insert({&AI, DeadBytes});
1214 return isa<UnreachableInst>(E->getTerminator());
1223 (
F == LibFunc_memset_chk ||
F == LibFunc_memcpy_chk)) {
1239OverwriteResult DSEState::isOverwrite(
const Instruction *KillingI,
1240 const Instruction *DeadI,
1241 const MemoryLocation &KillingLoc,
1242 const MemoryLocation &DeadLoc,
1243 int64_t &KillingOff, int64_t &DeadOff) {
1247 if (!isGuaranteedLoopIndependent(DeadI, KillingI, DeadLoc))
1250 LocationSize KillingLocSize =
1251 strengthenLocationSize(KillingI, KillingLoc.
Size);
1259 if (DeadUndObj == KillingUndObj && KillingLocSize.
isPrecise() &&
1261 std::optional<TypeSize> KillingUndObjSize =
1263 if (KillingUndObjSize && *KillingUndObjSize == KillingLocSize.
getValue())
1274 if (KillingMemI && DeadMemI) {
1275 const Value *KillingV = KillingMemI->getLength();
1276 const Value *DeadV = DeadMemI->getLength();
1277 if (KillingV == DeadV && BatchAA.
isMustAlias(DeadLoc, KillingLoc))
1286 const TypeSize KillingSize = KillingLocSize.
getValue();
1295 AliasResult AAR = BatchAA.
alias(KillingLoc, DeadLoc);
1301 if (KillingSize >= DeadSize)
1308 if (Off >= 0 && (uint64_t)Off + DeadSize <= KillingSize)
1314 if (DeadUndObj != KillingUndObj) {
1330 const Value *DeadBasePtr =
1332 const Value *KillingBasePtr =
1337 if (DeadBasePtr != KillingBasePtr)
1355 if (DeadOff >= KillingOff) {
1358 if (uint64_t(DeadOff - KillingOff) + DeadSize <= KillingSize)
1362 else if ((uint64_t)(DeadOff - KillingOff) < KillingSize)
1363 return OW_MaybePartial;
1367 else if ((uint64_t)(KillingOff - DeadOff) < DeadSize) {
1368 return OW_MaybePartial;
1375bool DSEState::isInvisibleToCallerAfterRet(
const Value *V,
const Value *Ptr,
1376 const LocationSize StoreSize) {
1380 auto IBounded = InvisibleToCallerAfterRetBounded.find(V);
1381 if (IBounded != InvisibleToCallerAfterRetBounded.end()) {
1382 int64_t ValueOffset;
1383 [[maybe_unused]]
const Value *BaseValue =
1393 ValueOffset + StoreSize.
getValue() <= IBounded->second &&
1397 auto I = InvisibleToCallerAfterRet.insert({
V,
false});
1398 if (
I.second && isInvisibleToCallerOnUnwind(V) &&
isNoAliasCall(V))
1400 V,
true, CaptureComponents::Provenance));
1401 return I.first->second;
1404bool DSEState::isInvisibleToCallerOnUnwind(
const Value *V) {
1405 bool RequiresNoCaptureBeforeUnwind;
1408 if (!RequiresNoCaptureBeforeUnwind)
1411 auto I = CapturedBeforeReturn.insert({
V,
true});
1418 V,
false, CaptureComponents::Provenance));
1419 return !
I.first->second;
1422std::optional<MemoryLocation> DSEState::getLocForWrite(Instruction *
I)
const {
1423 if (!
I->mayWriteToMemory())
1424 return std::nullopt;
1433DSEState::getLocForInst(Instruction *
I,
bool ConsiderInitializesAttr) {
1435 if (isMemTerminatorInst(
I)) {
1436 if (
auto Loc = getLocForTerminator(
I))
1437 Locations.push_back(std::make_pair(Loc->first,
false));
1441 if (
auto Loc = getLocForWrite(
I))
1442 Locations.push_back(std::make_pair(*Loc,
false));
1444 if (ConsiderInitializesAttr) {
1445 for (
auto &MemLoc : getInitializesArgMemLoc(
I)) {
1446 Locations.push_back(std::make_pair(MemLoc,
true));
1452bool DSEState::isRemovable(Instruction *
I) {
1453 assert(getLocForWrite(
I) &&
"Must have analyzable write");
1457 return SI->isUnordered();
1462 return !
MI->isVolatile();
1466 if (CB->isLifetimeStartOrEnd())
1469 return CB->use_empty() && CB->willReturn() && CB->doesNotThrow() &&
1470 !CB->isTerminator();
1476bool DSEState::isCompleteOverwrite(
const MemoryLocation &DefLoc,
1477 Instruction *DefInst, Instruction *UseInst) {
1485 if (CB->onlyAccessesInaccessibleMemory())
1488 int64_t InstWriteOffset, DepWriteOffset;
1489 if (
auto CC = getLocForWrite(UseInst))
1490 return isOverwrite(UseInst, DefInst, *CC, DefLoc, InstWriteOffset,
1491 DepWriteOffset) == OW_Complete;
1495bool DSEState::isWriteAtEndOfFunction(MemoryDef *Def,
1496 const MemoryLocation &DefLoc) {
1498 << *
Def->getMemoryInst()
1499 <<
") is at the end the function \n");
1501 SmallPtrSet<MemoryAccess *, 8> Visited;
1504 for (
unsigned I = 0;
I < WorkList.
size();
I++) {
1510 MemoryAccess *UseAccess = WorkList[
I];
1515 if (!isGuaranteedLoopInvariant(DefLoc.
Ptr))
1524 if (isReadClobber(DefLoc, UseInst)) {
1525 LLVM_DEBUG(
dbgs() <<
" ... hit read clobber " << *UseInst <<
".\n");
1535std::optional<std::pair<MemoryLocation, bool>>
1536DSEState::getLocForTerminator(Instruction *
I)
const {
1538 if (CB->getIntrinsicID() == Intrinsic::lifetime_end)
1545 return std::nullopt;
1548bool DSEState::isMemTerminatorInst(Instruction *
I)
const {
1550 return CB && (CB->getIntrinsicID() == Intrinsic::lifetime_end ||
1554bool DSEState::isMemTerminator(
const MemoryLocation &Loc, Instruction *AccessI,
1555 Instruction *MaybeTerm) {
1556 std::optional<std::pair<MemoryLocation, bool>> MaybeTermLoc =
1557 getLocForTerminator(MaybeTerm);
1568 auto TermLoc = MaybeTermLoc->first;
1569 if (MaybeTermLoc->second) {
1573 int64_t InstWriteOffset = 0;
1574 int64_t DepWriteOffset = 0;
1575 return isOverwrite(MaybeTerm, AccessI, TermLoc, Loc, InstWriteOffset,
1576 DepWriteOffset) == OW_Complete;
1579bool DSEState::isReadClobber(
const MemoryLocation &DefLoc,
1580 Instruction *UseInst) {
1593 if (CB->onlyAccessesInaccessibleMemory())
1599bool DSEState::isGuaranteedLoopIndependent(
const Instruction *Current,
1600 const Instruction *KillingDef,
1601 const MemoryLocation &CurrentLoc) {
1609 if (!ContainsIrreducibleLoops && CurrentLI &&
1613 return isGuaranteedLoopInvariant(CurrentLoc.
Ptr);
1616bool DSEState::isGuaranteedLoopInvariant(
const Value *Ptr) {
1619 if (
GEP->hasAllConstantIndices())
1623 return I->getParent()->isEntryBlock() ||
1624 (!ContainsIrreducibleLoops && !LI.
getLoopFor(
I->getParent()));
1629std::optional<MemoryAccess *> DSEState::getDomMemoryDef(
1630 MemoryDef *KillingDef, MemoryAccess *StartAccess,
1631 const MemoryLocation &KillingLoc,
const Value *KillingUndObj,
1632 unsigned &ScanLimit,
unsigned &WalkerStepLimit,
bool IsMemTerm,
1633 unsigned &PartialLimit,
bool IsInitializesAttrMemLoc) {
1634 if (ScanLimit == 0 || WalkerStepLimit == 0) {
1636 return std::nullopt;
1639 MemoryAccess *Current = StartAccess;
1653 std::optional<MemoryLocation> CurrentLoc;
1656 dbgs() <<
" visiting " << *Current;
1669 return std::nullopt;
1677 if (WalkerStepLimit <= StepCost) {
1679 return std::nullopt;
1681 WalkerStepLimit -= StepCost;
1695 if (
canSkipDef(CurrentDef, !isInvisibleToCallerOnUnwind(KillingUndObj))) {
1696 CanOptimize =
false;
1702 if (mayThrowBetween(KillingI, CurrentI, KillingUndObj)) {
1704 return std::nullopt;
1709 if (isDSEBarrier(KillingUndObj, CurrentI)) {
1711 return std::nullopt;
1719 return std::nullopt;
1722 if (
any_of(Current->
uses(), [
this, &KillingLoc, StartAccess](Use &U) {
1723 if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(U.getUser()))
1724 return !MSSA.dominates(StartAccess, UseOrDef) &&
1725 isReadClobber(KillingLoc, UseOrDef->getMemoryInst());
1729 return std::nullopt;
1734 CurrentLoc = getLocForWrite(CurrentI);
1735 if (!CurrentLoc || !isRemovable(CurrentI)) {
1736 CanOptimize =
false;
1743 if (!isGuaranteedLoopIndependent(CurrentI, KillingI, *CurrentLoc)) {
1745 CanOptimize =
false;
1753 if (!isMemTerminator(*CurrentLoc, CurrentI, KillingI)) {
1754 CanOptimize =
false;
1758 int64_t KillingOffset = 0;
1759 int64_t DeadOffset = 0;
1760 auto OR = isOverwrite(KillingI, CurrentI, KillingLoc, *CurrentLoc,
1761 KillingOffset, DeadOffset);
1767 (OR == OW_Complete || OR == OW_MaybePartial))
1773 CanOptimize =
false;
1778 if (OR == OW_Unknown || OR == OW_None)
1780 else if (OR == OW_MaybePartial) {
1785 if (PartialLimit <= 1) {
1786 WalkerStepLimit -= 1;
1787 LLVM_DEBUG(
dbgs() <<
" ... reached partial limit ... continue with "
1801 SmallPtrSet<Instruction *, 16> KillingDefs;
1803 MemoryAccess *MaybeDeadAccess = Current;
1804 MemoryLocation MaybeDeadLoc = *CurrentLoc;
1806 LLVM_DEBUG(
dbgs() <<
" Checking for reads of " << *MaybeDeadAccess <<
" ("
1807 << *MaybeDeadI <<
")\n");
1810 SmallPtrSet<MemoryAccess *, 32> Visited;
1814 for (
unsigned I = 0;
I < WorkList.
size();
I++) {
1815 MemoryAccess *UseAccess = WorkList[
I];
1819 if (ScanLimit < (WorkList.
size() -
I)) {
1821 return std::nullopt;
1824 NumDomMemDefChecks++;
1827 if (
any_of(KillingDefs, [
this, UseAccess](Instruction *KI) {
1830 LLVM_DEBUG(
dbgs() <<
" ... skipping, dominated by killing block\n");
1841 if (
any_of(KillingDefs, [
this, UseInst](Instruction *KI) {
1844 LLVM_DEBUG(
dbgs() <<
" ... skipping, dominated by killing def\n");
1850 if (isMemTerminator(MaybeDeadLoc, MaybeDeadI, UseInst)) {
1853 <<
" ... skipping, memterminator invalidates following accesses\n");
1863 if (UseInst->
mayThrow() && !isInvisibleToCallerOnUnwind(KillingUndObj)) {
1865 return std::nullopt;
1872 bool IsKillingDefFromInitAttr =
false;
1873 if (IsInitializesAttrMemLoc) {
1874 if (KillingI == UseInst &&
1876 IsKillingDefFromInitAttr =
true;
1879 if (isReadClobber(MaybeDeadLoc, UseInst) && !IsKillingDefFromInitAttr) {
1881 return std::nullopt;
1887 if (MaybeDeadAccess == UseAccess &&
1888 !isGuaranteedLoopInvariant(MaybeDeadLoc.
Ptr)) {
1889 LLVM_DEBUG(
dbgs() <<
" ... found not loop invariant self access\n");
1890 return std::nullopt;
1896 if (KillingDef == UseAccess || MaybeDeadAccess == UseAccess) {
1912 if (isCompleteOverwrite(MaybeDeadLoc, MaybeDeadI, UseInst)) {
1914 if (PostOrderNumbers.
find(MaybeKillingBlock)->second <
1915 PostOrderNumbers.
find(MaybeDeadAccess->
getBlock())->second) {
1916 if (!isInvisibleToCallerAfterRet(KillingUndObj, KillingLoc.
Ptr,
1919 <<
" ... found killing def " << *UseInst <<
"\n");
1920 KillingDefs.
insert(UseInst);
1924 <<
" ... found preceeding def " << *UseInst <<
"\n");
1925 return std::nullopt;
1935 if (!isInvisibleToCallerAfterRet(KillingUndObj, KillingLoc.
Ptr,
1937 SmallPtrSet<BasicBlock *, 16> KillingBlocks;
1938 for (Instruction *KD : KillingDefs)
1939 KillingBlocks.
insert(KD->getParent());
1941 "Expected at least a single killing block");
1955 if (!AnyUnreachableExit)
1956 return std::nullopt;
1960 CommonPred =
nullptr;
1964 if (KillingBlocks.
count(CommonPred))
1965 return {MaybeDeadAccess};
1967 SetVector<BasicBlock *> WorkList;
1971 WorkList.
insert(CommonPred);
1973 for (BasicBlock *R : PDT.
roots()) {
1981 for (
unsigned I = 0;
I < WorkList.
size();
I++) {
1984 if (KillingBlocks.
count(Current))
1986 if (Current == MaybeDeadAccess->
getBlock())
1987 return std::nullopt;
1997 return std::nullopt;
2004 return {MaybeDeadAccess};
2007void DSEState::deleteDeadInstruction(Instruction *SI,
2008 SmallPtrSetImpl<MemoryAccess *> *
Deleted) {
2009 MemorySSAUpdater Updater(&MSSA);
2014 while (!NowDeadInsts.
empty()) {
2028 SkipStores.insert(MD);
2032 if (
SI->getValueOperand()->getType()->isPointerTy()) {
2034 if (CapturedBeforeReturn.erase(UO))
2035 ShouldIterateEndOfFunctionDSE =
true;
2036 InvisibleToCallerAfterRet.erase(UO);
2037 InvisibleToCallerAfterRetBounded.erase(UO);
2042 Updater.removeMemoryAccess(MA);
2046 if (
I != IOLs.end())
2047 I->second.erase(DeadInst);
2049 for (Use &O : DeadInst->
operands())
2069bool DSEState::mayThrowBetween(Instruction *KillingI, Instruction *DeadI,
2070 const Value *KillingUndObj) {
2074 if (KillingUndObj && isInvisibleToCallerOnUnwind(KillingUndObj))
2078 return ThrowingBlocks.count(KillingI->
getParent());
2079 return !ThrowingBlocks.empty();
2082bool DSEState::isDSEBarrier(
const Value *KillingUndObj, Instruction *DeadI) {
2085 if (DeadI->
mayThrow() && !isInvisibleToCallerOnUnwind(KillingUndObj))
2105bool DSEState::eliminateDeadWritesAtEndOfFunction() {
2106 bool MadeChange =
false;
2108 dbgs() <<
"Trying to eliminate MemoryDefs at the end of the function\n");
2110 ShouldIterateEndOfFunctionDSE =
false;
2112 if (SkipStores.contains(Def))
2116 auto DefLoc = getLocForWrite(DefI);
2117 if (!DefLoc || !isRemovable(DefI)) {
2119 "instruction not removable.\n");
2129 if (!isInvisibleToCallerAfterRet(UO, DefLoc->
Ptr, DefLoc->
Size))
2132 if (isWriteAtEndOfFunction(Def, *DefLoc)) {
2134 LLVM_DEBUG(
dbgs() <<
" ... MemoryDef is not accessed until the end "
2135 "of the function\n");
2141 }
while (ShouldIterateEndOfFunctionDSE);
2145bool DSEState::tryFoldIntoCalloc(MemoryDef *Def,
const Value *DefUO) {
2152 if (!StoredConstant || !StoredConstant->
isNullValue())
2155 if (!isRemovable(DefI))
2159 if (
F.hasFnAttribute(Attribute::SanitizeMemory) ||
2160 F.hasFnAttribute(Attribute::SanitizeAddress) ||
2161 F.hasFnAttribute(Attribute::SanitizeHWAddress) ||
F.getName() ==
"calloc")
2166 auto *InnerCallee =
Malloc->getCalledFunction();
2169 LibFunc
Func = NotLibFunc;
2170 StringRef ZeroedVariantName;
2171 if (!TLI.
getLibFunc(*InnerCallee, Func) || !TLI.
has(Func) ||
2172 Func != LibFunc_malloc) {
2177 if (ZeroedVariantName.
empty())
2186 auto shouldCreateCalloc = [](CallInst *
Malloc, CallInst *Memset) {
2189 auto *MallocBB =
Malloc->getParent(), *MemsetBB = Memset->getParent();
2190 if (MallocBB == MemsetBB)
2192 auto *Ptr = Memset->getArgOperand(0);
2193 auto *TI = MallocBB->getTerminator();
2199 if (MemsetBB != FalseBB)
2210 assert(Func == LibFunc_malloc || !ZeroedVariantName.
empty());
2211 Value *Calloc =
nullptr;
2212 if (!ZeroedVariantName.
empty()) {
2213 LLVMContext &Ctx =
Malloc->getContext();
2214 AttributeList
Attrs = InnerCallee->getAttributes();
2216 Attrs.getFnAttr(Attribute::AllocKind).getAllocKind() |
2217 AllocFnKind::Zeroed;
2220 Attrs.addFnAttribute(Ctx, Attribute::getWithAllocKind(Ctx, AllocKind))
2221 .removeFnAttribute(Ctx,
"alloc-variant-zeroed");
2222 FunctionCallee ZeroedVariant =
Malloc->getModule()->getOrInsertFunction(
2223 ZeroedVariantName, InnerCallee->getFunctionType(), Attrs);
2225 ->setCallingConv(
Malloc->getCallingConv());
2228 CallInst *CI = IRB.CreateCall(ZeroedVariant, Args, ZeroedVariantName);
2232 Type *SizeTTy =
Malloc->getArgOperand(0)->getType();
2233 Calloc =
emitCalloc(ConstantInt::get(SizeTTy, 1),
Malloc->getArgOperand(0),
2234 IRB, TLI,
Malloc->getType()->getPointerAddressSpace());
2239 MemorySSAUpdater Updater(&MSSA);
2241 nullptr, MallocDef);
2243 Updater.insertDef(NewAccessMD,
true);
2244 Malloc->replaceAllUsesWith(Calloc);
2249bool DSEState::dominatingConditionImpliesValue(MemoryDef *Def) {
2252 Value *StorePtr = StoreI->getPointerOperand();
2253 Value *StoreVal = StoreI->getValueOperand();
2260 if (!BI || !BI->isConditional())
2266 if (BI->getSuccessor(0) == BI->getSuccessor(1))
2271 if (!
match(BI->getCondition(),
2287 if (Pred == ICmpInst::ICMP_EQ &&
2288 !DT.
dominates(BasicBlockEdge(BI->getParent(), BI->getSuccessor(0)),
2292 if (Pred == ICmpInst::ICMP_NE &&
2293 !DT.
dominates(BasicBlockEdge(BI->getParent(), BI->getSuccessor(1)),
2298 MemoryAccess *ClobAcc =
2301 return MSSA.
dominates(ClobAcc, LoadAcc);
2304bool DSEState::storeIsNoop(MemoryDef *Def,
const Value *DefUO) {
2308 Constant *StoredConstant =
nullptr;
2316 if (!isRemovable(DefI))
2319 if (StoredConstant) {
2324 if (InitC && InitC == StoredConstant)
2332 if (dominatingConditionImpliesValue(Def))
2336 if (LoadI->getPointerOperand() ==
Store->getOperand(1)) {
2340 if (LoadAccess ==
Def->getDefiningAccess())
2346 SetVector<MemoryAccess *> ToCheck;
2347 MemoryAccess *Current =
2355 for (
unsigned I = 1;
I < ToCheck.
size(); ++
I) {
2356 Current = ToCheck[
I];
2359 for (
auto &Use : PhiAccess->incoming_values())
2371 if (LoadAccess != Current)
2383 for (
auto OI : IOL) {
2385 MemoryLocation Loc = *getLocForWrite(DeadI);
2386 assert(isRemovable(DeadI) &&
"Expect only removable instruction");
2389 int64_t DeadStart = 0;
2394 if (IntervalMap.empty())
2401bool DSEState::eliminateRedundantStoresOfExistingValues() {
2402 bool MadeChange =
false;
2403 LLVM_DEBUG(
dbgs() <<
"Trying to eliminate MemoryDefs that write the "
2404 "already existing value\n");
2405 for (
auto *Def : MemDefs) {
2410 auto MaybeDefLoc = getLocForWrite(DefInst);
2411 if (!MaybeDefLoc || !isRemovable(DefInst))
2414 MemoryDef *UpperDef;
2418 if (
Def->isOptimized())
2426 auto IsRedundantStore = [&]() {
2434 auto UpperLoc = getLocForWrite(UpperInst);
2437 int64_t InstWriteOffset = 0;
2438 int64_t DepWriteOffset = 0;
2439 auto OR = isOverwrite(UpperInst, DefInst, *UpperLoc, *MaybeDefLoc,
2440 InstWriteOffset, DepWriteOffset);
2442 return StoredByte && StoredByte == MemSetI->getOperand(1) &&
2449 if (!IsRedundantStore() || isReadClobber(*MaybeDefLoc, DefInst))
2451 LLVM_DEBUG(
dbgs() <<
"DSE: Remove No-Op Store:\n DEAD: " << *DefInst
2454 NumRedundantStores++;
2461DSEState::getInitializesArgMemLoc(
const Instruction *
I) {
2467 SmallMapVector<Value *, SmallVector<ArgumentInitInfo, 2>, 2>
Arguments;
2473 ConstantRangeList Inits;
2485 Inits = ConstantRangeList();
2493 bool IsDeadOrInvisibleOnUnwind =
2496 ArgumentInitInfo InitInfo{Idx, IsDeadOrInvisibleOnUnwind, Inits};
2497 bool FoundAliasing =
false;
2498 for (
auto &[Arg, AliasList] :
Arguments) {
2504 FoundAliasing =
true;
2505 AliasList.push_back(InitInfo);
2510 FoundAliasing =
true;
2511 AliasList.push_back(ArgumentInitInfo{Idx, IsDeadOrInvisibleOnUnwind,
2512 ConstantRangeList()});
2521 auto IntersectedRanges =
2523 if (IntersectedRanges.empty())
2526 for (
const auto &Arg : Args) {
2527 for (
const auto &
Range : IntersectedRanges) {
2541std::pair<bool, bool>
2542DSEState::eliminateDeadDefs(
const MemoryLocationWrapper &KillingLocWrapper) {
2544 bool DeletedKillingLoc =
false;
2550 SmallSetVector<MemoryAccess *, 8> ToCheck;
2554 SmallPtrSet<MemoryAccess *, 8>
Deleted;
2555 [[maybe_unused]]
unsigned OrigNumSkipStores = SkipStores.size();
2560 for (
unsigned I = 0;
I < ToCheck.
size();
I++) {
2561 MemoryAccess *Current = ToCheck[
I];
2562 if (
Deleted.contains(Current))
2564 std::optional<MemoryAccess *> MaybeDeadAccess = getDomMemoryDef(
2565 KillingLocWrapper.MemDef, Current, KillingLocWrapper.MemLoc,
2566 KillingLocWrapper.UnderlyingObject, ScanLimit, WalkerStepLimit,
2567 isMemTerminatorInst(KillingLocWrapper.DefInst), PartialLimit,
2568 KillingLocWrapper.DefByInitializesAttr);
2570 if (!MaybeDeadAccess) {
2574 MemoryAccess *DeadAccess = *MaybeDeadAccess;
2575 LLVM_DEBUG(
dbgs() <<
" Checking if we can kill " << *DeadAccess);
2577 LLVM_DEBUG(
dbgs() <<
"\n ... adding incoming values to worklist\n");
2586 if (PostOrderNumbers[IncomingBlock] > PostOrderNumbers[PhiBlock])
2587 ToCheck.
insert(IncomingAccess);
2598 MemoryDefWrapper DeadDefWrapper(
2602 assert(DeadDefWrapper.DefinedLocations.size() == 1);
2603 MemoryLocationWrapper &DeadLocWrapper =
2604 DeadDefWrapper.DefinedLocations.front();
2607 NumGetDomMemoryDefPassed++;
2611 if (isMemTerminatorInst(KillingLocWrapper.DefInst)) {
2612 if (KillingLocWrapper.UnderlyingObject != DeadLocWrapper.UnderlyingObject)
2615 << *DeadLocWrapper.DefInst <<
"\n KILLER: "
2616 << *KillingLocWrapper.DefInst <<
'\n');
2622 int64_t KillingOffset = 0;
2623 int64_t DeadOffset = 0;
2624 OverwriteResult
OR =
2625 isOverwrite(KillingLocWrapper.DefInst, DeadLocWrapper.DefInst,
2626 KillingLocWrapper.MemLoc, DeadLocWrapper.MemLoc,
2627 KillingOffset, DeadOffset);
2628 if (OR == OW_MaybePartial) {
2629 auto &IOL = IOLs[DeadLocWrapper.DefInst->
getParent()];
2631 KillingOffset, DeadOffset,
2632 DeadLocWrapper.DefInst, IOL);
2640 if (DeadSI && KillingSI && DT.
dominates(DeadSI, KillingSI)) {
2642 KillingSI, DeadSI, KillingOffset, DeadOffset,
DL, BatchAA,
2646 DeadSI->setOperand(0, Merged);
2647 ++NumModifiedStores;
2649 DeletedKillingLoc =
true;
2654 auto I = IOLs.find(DeadSI->getParent());
2655 if (
I != IOLs.end())
2656 I->second.erase(DeadSI);
2661 if (OR == OW_Complete) {
2663 << *DeadLocWrapper.DefInst <<
"\n KILLER: "
2664 << *KillingLocWrapper.DefInst <<
'\n');
2672 assert(SkipStores.size() - OrigNumSkipStores ==
Deleted.size() &&
2673 "SkipStores and Deleted out of sync?");
2675 return {
Changed, DeletedKillingLoc};
2678bool DSEState::eliminateDeadDefs(
const MemoryDefWrapper &KillingDefWrapper) {
2679 if (KillingDefWrapper.DefinedLocations.empty()) {
2680 LLVM_DEBUG(
dbgs() <<
"Failed to find analyzable write location for "
2681 << *KillingDefWrapper.DefInst <<
"\n");
2685 bool MadeChange =
false;
2686 for (
auto &KillingLocWrapper : KillingDefWrapper.DefinedLocations) {
2688 << *KillingLocWrapper.MemDef <<
" ("
2689 << *KillingLocWrapper.DefInst <<
")\n");
2690 auto [
Changed, DeletedKillingLoc] = eliminateDeadDefs(KillingLocWrapper);
2694 if (!DeletedKillingLoc && storeIsNoop(KillingLocWrapper.MemDef,
2695 KillingLocWrapper.UnderlyingObject)) {
2697 << *KillingLocWrapper.DefInst <<
'\n');
2699 NumRedundantStores++;
2704 if (!DeletedKillingLoc &&
2705 tryFoldIntoCalloc(KillingLocWrapper.MemDef,
2706 KillingLocWrapper.UnderlyingObject)) {
2707 LLVM_DEBUG(
dbgs() <<
"DSE: Remove memset after forming calloc:\n"
2708 <<
" DEAD: " << *KillingLocWrapper.DefInst <<
'\n');
2721 bool MadeChange =
false;
2722 DSEState State(
F,
AA, MSSA, DT, PDT, TLI, LI);
2724 for (
unsigned I = 0;
I < State.MemDefs.size();
I++) {
2726 if (State.SkipStores.count(KillingDef))
2729 MemoryDefWrapper KillingDefWrapper(
2730 KillingDef, State.getLocForInst(KillingDef->
getMemoryInst(),
2732 MadeChange |= State.eliminateDeadDefs(KillingDefWrapper);
2736 for (
auto &KV : State.IOLs)
2737 MadeChange |= State.removePartiallyOverlappedStores(KV.second);
2739 MadeChange |= State.eliminateRedundantStoresOfExistingValues();
2740 MadeChange |= State.eliminateDeadWritesAtEndOfFunction();
2742 while (!State.ToRemove.empty()) {
2743 Instruction *DeadInst = State.ToRemove.pop_back_val();
2763#ifdef LLVM_ENABLE_STATS
2791 if (skipFunction(
F))
2794 AliasAnalysis &
AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2795 DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2797 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
F);
2798 MemorySSA &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
2800 getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree();
2801 LoopInfo &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2805#ifdef LLVM_ENABLE_STATS
2814 void getAnalysisUsage(AnalysisUsage &AU)
const override {
2833char DSELegacyPass::ID = 0;
2850 return new DSELegacyPass();
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Lower Kernel Arguments
This file implements a class to represent arbitrary precision integral constant values and operations...
ReachingDefInfo InstSet & ToRemove
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
MapVector< Instruction *, OverlapIntervalsTy > InstOverlapIntervalsTy
static bool canSkipDef(MemoryDef *D, bool DefVisibleToCaller)
static cl::opt< bool > EnableInitializesImprovement("enable-dse-initializes-attr-improvement", cl::init(true), cl::Hidden, cl::desc("Enable the initializes attr improvement in DSE"))
static void shortenAssignment(Instruction *Inst, Value *OriginalDest, uint64_t OldOffsetInBits, uint64_t OldSizeInBits, uint64_t NewSizeInBits, bool IsOverwriteEnd)
static bool isShortenableAtTheEnd(Instruction *I)
Returns true if the end of this instruction can be safely shortened in length.
static bool isNoopIntrinsic(Instruction *I)
static ConstantRangeList getIntersectedInitRangeList(ArrayRef< ArgumentInitInfo > Args, bool CallHasNoUnwindAttr)
static cl::opt< bool > EnablePartialStoreMerging("enable-dse-partial-store-merging", cl::init(true), cl::Hidden, cl::desc("Enable partial store merging in DSE"))
static bool tryToShortenBegin(Instruction *DeadI, OverlapIntervalsTy &IntervalMap, int64_t &DeadStart, uint64_t &DeadSize)
std::map< int64_t, int64_t > OverlapIntervalsTy
static void pushMemUses(MemoryAccess *Acc, SmallVectorImpl< MemoryAccess * > &WorkList, SmallPtrSetImpl< MemoryAccess * > &Visited)
static bool isShortenableAtTheBeginning(Instruction *I)
Returns true if the beginning of this instruction can be safely shortened in length.
static cl::opt< unsigned > MemorySSADefsPerBlockLimit("dse-memoryssa-defs-per-block-limit", cl::init(5000), cl::Hidden, cl::desc("The number of MemoryDefs we consider as candidates to eliminated " "other stores per basic block (default = 5000)"))
static Constant * tryToMergePartialOverlappingStores(StoreInst *KillingI, StoreInst *DeadI, int64_t KillingOffset, int64_t DeadOffset, const DataLayout &DL, BatchAAResults &AA, DominatorTree *DT)
static bool memoryIsNotModifiedBetween(Instruction *FirstI, Instruction *SecondI, BatchAAResults &AA, const DataLayout &DL, DominatorTree *DT)
Returns true if the memory which is accessed by the second instruction is not modified between the fi...
static OverwriteResult isMaskedStoreOverwrite(const Instruction *KillingI, const Instruction *DeadI, BatchAAResults &AA)
Check if two instruction are masked stores that completely overwrite one another.
static cl::opt< unsigned > MemorySSAOtherBBStepCost("dse-memoryssa-otherbb-cost", cl::init(5), cl::Hidden, cl::desc("The cost of a step in a different basic " "block than the killing MemoryDef" "(default = 5)"))
static bool tryToShorten(Instruction *DeadI, int64_t &DeadStart, uint64_t &DeadSize, int64_t KillingStart, uint64_t KillingSize, bool IsOverwriteEnd)
static cl::opt< unsigned > MemorySSAScanLimit("dse-memoryssa-scanlimit", cl::init(150), cl::Hidden, cl::desc("The number of memory instructions to scan for " "dead store elimination (default = 150)"))
static bool isFuncLocalAndNotCaptured(Value *Arg, const CallBase *CB, EarliestEscapeAnalysis &EA)
static cl::opt< unsigned > MemorySSASameBBStepCost("dse-memoryssa-samebb-cost", cl::init(1), cl::Hidden, cl::desc("The cost of a step in the same basic block as the killing MemoryDef" "(default = 1)"))
static cl::opt< bool > EnablePartialOverwriteTracking("enable-dse-partial-overwrite-tracking", cl::init(true), cl::Hidden, cl::desc("Enable partial-overwrite tracking in DSE"))
static OverwriteResult isPartialOverwrite(const MemoryLocation &KillingLoc, const MemoryLocation &DeadLoc, int64_t KillingOff, int64_t DeadOff, Instruction *DeadI, InstOverlapIntervalsTy &IOL)
Return 'OW_Complete' if a store to the 'KillingLoc' location completely overwrites a store to the 'De...
static cl::opt< unsigned > MemorySSAPartialStoreLimit("dse-memoryssa-partial-store-limit", cl::init(5), cl::Hidden, cl::desc("The maximum number candidates that only partially overwrite the " "killing MemoryDef to consider" " (default = 5)"))
static std::optional< TypeSize > getPointerSize(const Value *V, const DataLayout &DL, const TargetLibraryInfo &TLI, const Function *F)
static bool tryToShortenEnd(Instruction *DeadI, OverlapIntervalsTy &IntervalMap, int64_t &DeadStart, uint64_t &DeadSize)
static void adjustArgAttributes(AnyMemIntrinsic *Intrinsic, unsigned ArgNo, uint64_t PtrOffset)
Update the attributes given that a memory access is updated (the dereferenced pointer could be moved ...
static cl::opt< unsigned > MemorySSAUpwardsStepLimit("dse-memoryssa-walklimit", cl::init(90), cl::Hidden, cl::desc("The maximum number of steps while walking upwards to find " "MemoryDefs that may be killed (default = 90)"))
static cl::opt< bool > OptimizeMemorySSA("dse-optimize-memoryssa", cl::init(true), cl::Hidden, cl::desc("Allow DSE to optimize memory accesses."))
static bool hasInitializesAttr(Instruction *I)
static cl::opt< unsigned > MemorySSAPathCheckLimit("dse-memoryssa-path-check-limit", cl::init(50), cl::Hidden, cl::desc("The maximum number of blocks to check when trying to prove that " "all paths to an exit go through a killing block (default = 50)"))
static bool eliminateDeadStores(Function &F, AliasAnalysis &AA, MemorySSA &MSSA, DominatorTree &DT, PostDominatorTree &PDT, const TargetLibraryInfo &TLI, const LoopInfo &LI)
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
This file defines the DenseMap class.
early cse Early CSE w MemorySSA
static bool runOnFunction(Function &F, bool PostInlining)
This is the interface for a simple mod/ref and alias analysis over globals.
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
static void deleteDeadInstruction(Instruction *I)
This file implements a map that provides insertion order iteration.
This file provides utility analysis objects describing memory locations.
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
Contains a collection of routines for determining if a given instruction is guaranteed to execute if ...
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
Class for arbitrary precision integers.
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit)
Get a value with a block of bits set.
unsigned getBitWidth() const
Return the number of bits in the APInt.
int64_t getSExtValue() const
Get sign extended value.
@ NoAlias
The two locations do not alias at all.
@ PartialAlias
The two locations alias, but only due to a partial overlap.
@ MustAlias
The two locations precisely alias each other.
constexpr int32_t getOffset() const
constexpr bool hasOffset() const
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
An immutable pass that tracks lazily created AssumptionCache objects.
This class stores enough information to efficiently remove some attributes from an existing AttrBuild...
AttributeMask & addAttribute(Attribute::AttrKind Val)
Add an attribute to the mask.
This class holds the attributes for a particular argument, parameter, function, or return value.
LLVM_ABI ArrayRef< ConstantRange > getValueAsConstantRangeList() const
Return the attribute's value as a ConstantRange array.
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
bool isValid() const
Return true if the attribute is any kind of attribute.
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
InstListType::iterator iterator
Instruction iterators...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB)
bool isMustAlias(const MemoryLocation &LocA, const MemoryLocation &LocB)
ModRefInfo getModRefInfo(const Instruction *I, const std::optional< MemoryLocation > &OptLoc)
Represents analyses that only rely on functions' control flow.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
void setCallingConv(CallingConv::ID CC)
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Get the attribute of a given kind from a given arg.
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
LLVM_ABI bool onlyAccessesInaccessibleMemOrArgMem() const
Determine if the function may only access memory that is either inaccessible from the IR or pointed t...
bool doesNotThrow() const
Determine if the call cannot unwind.
Value * getArgOperand(unsigned i) const
LLVM_ABI Value * getArgOperandWithAttribute(Attribute::AttrKind Kind) const
If one of the arguments has the specified attribute, returns its operand value.
unsigned arg_size() const
This class represents a list of constant ranges.
bool empty() const
Return true if this list contains no members.
LLVM_ABI ConstantRangeList intersectWith(const ConstantRangeList &CRL) const
Return the range list that results from the intersection of this ConstantRangeList with another Const...
const APInt & getLower() const
Return the lower value for this range.
const APInt & getUpper() const
Return the upper value for this range.
This is an important base class in LLVM.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
static DIAssignID * getDistinct(LLVMContext &Context)
DbgVariableFragmentInfo FragmentInfo
static LLVM_ABI std::optional< DIExpression * > createFragmentExpression(const DIExpression *Expr, unsigned OffsetInBits, unsigned SizeInBits)
Create a DIExpression to describe one part of an aggregate variable that is fragmented across multipl...
PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM)
A parsed version of the target data layout string in and methods for querying it.
Record of a variable value-assignment, aka a non instruction representation of the dbg....
static bool shouldExecute(CounterInfo &Counter)
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
DomTreeNodeBase * getIDom() const
Analysis pass which computes a DominatorTree.
NodeT * findNearestCommonDominator(NodeT *A, NodeT *B) const
Find nearest common dominator basic block for basic block A and B.
iterator_range< root_iterator > roots()
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
LLVM_ABI bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Context-sensitive CaptureAnalysis provider, which computes and caches the earliest common dominator c...
void removeInstruction(Instruction *I)
CaptureComponents getCapturesBefore(const Value *Object, const Instruction *I, bool OrAt) override
Return how Object may be captured before instruction I, considering only provenance captures.
FunctionPass class - This class is used to implement most global optimizations.
const BasicBlock & getEntryBlock() const
static GetElementPtrInst * CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Create an "inbounds" getelementptr.
Legacy wrapper pass to provide the GlobalsAAResult object.
bool isEquality() const
Return true if this predicate is either EQ or NE.
LLVM_ABI bool mayThrow(bool IncludePhaseOneUnwind=false) const LLVM_READONLY
Return true if this instruction may throw an exception.
LLVM_ABI bool mayWriteToMemory() const LLVM_READONLY
Return true if this instruction may modify memory.
LLVM_ABI bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI bool isIdenticalToWhenDefined(const Instruction *I, bool IntersectAttrs=false) const LLVM_READONLY
This is like isIdenticalTo, except that it ignores the SubclassOptionalData flags,...
LLVM_ABI bool mayReadFromMemory() const LLVM_READONLY
Return true if this instruction may read memory.
LLVM_ABI AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
const_iterator begin() const
bool empty() const
empty - Return true when no intervals are mapped.
const_iterator end() const
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
static LocationSize precise(uint64_t Value)
TypeSize getValue() const
Analysis pass that exposes the LoopInfo for a function.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
The legacy pass manager's analysis pass to compute loop information.
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
This class implements a map that also provides access to all stored values in a deterministic order.
Value * getLength() const
BasicBlock * getBlock() const
Represents a read-write access to memory, whether it is a must-alias, or a may-alias.
void setOptimized(MemoryAccess *MA)
A wrapper analysis pass for the legacy pass manager that exposes a MemoryDepnedenceResults instance.
Representation for a specific memory location.
static LLVM_ABI MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
LocationSize Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known.
static MemoryLocation getBeforeOrAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location before or after Ptr, while remaining within the underl...
static MemoryLocation getAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location after Ptr, while remaining within the underlying objec...
MemoryLocation getWithNewPtr(const Value *NewPtr) const
const Value * Ptr
The address of the start of the location.
static LLVM_ABI MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
static LLVM_ABI std::optional< MemoryLocation > getOrNone(const Instruction *Inst)
static LLVM_ABI MemoryLocation getForArgument(const CallBase *Call, unsigned ArgIdx, const TargetLibraryInfo *TLI)
Return a location representing a particular argument of a call.
An analysis that produces MemorySSA for a function.
MemoryAccess * getClobberingMemoryAccess(const Instruction *I, BatchAAResults &AA)
Given a memory Mod/Ref/ModRef'ing instruction, calling this will give you the nearest dominating Memo...
Legacy analysis pass which computes MemorySSA.
Encapsulates MemorySSA, including all data associated with memory accesses.
LLVM_ABI MemorySSAWalker * getSkipSelfWalker()
LLVM_ABI bool dominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in potentially different blocks, determine whether MemoryAccess A dominates...
LLVM_ABI MemorySSAWalker * getWalker()
MemoryUseOrDef * getMemoryAccess(const Instruction *I) const
Given a memory Mod/Ref'ing instruction, get the MemorySSA access associated with it.
bool isLiveOnEntryDef(const MemoryAccess *MA) const
Return true if MA represents the live on entry value.
MemoryAccess * getDefiningAccess() const
Get the access that produces the memory state used by this Use.
Instruction * getMemoryInst() const
Get the instruction that this MemoryUse represents.
PHITransAddr - An address value which tracks and handles phi translation.
LLVM_ABI Value * translateValue(BasicBlock *CurBB, BasicBlock *PredBB, const DominatorTree *DT, bool MustDominate)
translateValue - PHI translate the current address up the CFG from CurBB to Pred, updating our state ...
LLVM_ABI bool isPotentiallyPHITranslatable() const
isPotentiallyPHITranslatable - If this needs PHI translation, return true if we have some hope of doi...
bool needsPHITranslationFromBlock(BasicBlock *BB) const
needsPHITranslationFromBlock - Return true if moving from the specified BasicBlock to its predecessor...
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Analysis pass which computes a PostDominatorTree.
PostDominatorTree Class - Concrete subclass of DominatorTree that is used to compute the post-dominat...
LLVM_ABI bool dominates(const Instruction *I1, const Instruction *I2) const
Return true if I1 dominates I2.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
PreservedAnalyses & preserve()
Mark an analysis as preserved.
size_type size() const
Determine the number of elements in the SetVector.
void insert_range(Range &&R)
bool insert(const value_type &X)
Insert a new element into the SetVector.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Value * getValueOperand()
constexpr bool empty() const
empty - Check if the string is empty.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
bool isPointerTy() const
True if this is an instance of PointerType.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
bool isVoidTy() const
Return true if this is 'void'.
A Use represents the edge between a Value definition and its users.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVMContext & getContext() const
All values hold a context through their type.
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
iterator_range< use_iterator > uses()
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
const ParentTy * getParent() const
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Abstract Attribute helper functions.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ BasicBlock
Various leaf nodes.
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
bool match(Val *V, const Pattern &P)
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
CmpClass_match< LHS, RHS, ICmpInst, true > m_c_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
Matches an ICmp with a predicate over LHS and RHS in either order.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
OneOps_match< OpTy, Instruction::Load > m_Load(const OpTy &Op)
Matches LoadInst.
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
SmallVector< DbgVariableRecord * > getDVRAssignmentMarkers(const Instruction *Inst)
Return a range of dbg_assign records for which Inst performs the assignment they encode.
LLVM_ABI bool calculateFragmentIntersect(const DataLayout &DL, const Value *Dest, uint64_t SliceOffsetInBits, uint64_t SliceSizeInBits, const DbgVariableRecord *DVRAssign, std::optional< DIExpression::FragmentInfo > &Result)
Calculate the fragment of the variable in DAI covered from (Dest + SliceOffsetInBits) to to (Dest + S...
initializer< Ty > init(const Ty &Val)
NodeAddr< DefNode * > Def
NodeAddr< FuncNode * > Func
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
LLVM_ABI void initializeDSELegacyPassPass(PassRegistry &)
FunctionAddr VTableAddr Value
LLVM_ABI Constant * getInitialValueOfAllocation(const Value *V, const TargetLibraryInfo *TLI, Type *Ty)
If this is a call to an allocation function that initializes memory to a fixed value,...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
bool isStrongerThanMonotonic(AtomicOrdering AO)
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset.
iterator_range< po_iterator< T > > post_order(const T &G)
LLVM_ABI bool isNoAliasCall(const Value *V)
Return true if this pointer is returned by a noalias function.
DomTreeNodeBase< BasicBlock > DomTreeNode
auto dyn_cast_or_null(const Y &Val)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
LLVM_ABI bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
auto reverse(ContainerTy &&C)
LLVM_ABI bool canReplacePointersIfEqual(const Value *From, const Value *To, const DataLayout &DL)
Returns true if a pointer value From can be replaced with another pointer value \To if they are deeme...
bool isModSet(const ModRefInfo MRI)
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
FunctionAddr VTableAddr Count
LLVM_ABI bool AreStatisticsEnabled()
Check if statistics are enabled.
LLVM_ABI bool isNotVisibleOnUnwind(const Value *Object, bool &RequiresNoCaptureBeforeUnwind)
Return true if Object memory is not visible after an unwind, in the sense that program semantics cann...
LLVM_ABI Value * emitCalloc(Value *Num, Value *Size, IRBuilderBase &B, const TargetLibraryInfo &TLI, unsigned AddrSpace)
Emit a call to the calloc function.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
uint64_t offsetToAlignment(uint64_t Value, Align Alignment)
Returns the offset to the next integer (mod 2**64) that is greater than or equal to Value and is a mu...
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
LLVM_ABI bool salvageKnowledge(Instruction *I, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert if before I.
LLVM_ABI bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, unsigned MaxUsesToExplore=0)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)
If this if a call to a free function, return the freed operand.
LLVM_ABI bool isIdentifiedFunctionLocal(const Value *V)
Return true if V is umabigously identified at the function-level.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI FunctionPass * createDeadStoreEliminationPass()
LLVM_ABI Value * isBytewiseValue(Value *V, const DataLayout &DL)
If the specified value can be set by repeating the same byte in memory, return the i8 value that it i...
auto predecessors(const MachineBasicBlock *BB)
bool capturesAnything(CaptureComponents CC)
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI bool mayContainIrreducibleControl(const Function &F, const LoopInfo *LI)
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
AAResults AliasAnalysis
Temporary typedef for legacy code that uses a generic AliasAnalysis pointer or reference.
bool capturesNothing(CaptureComponents CC)
LLVM_ABI bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
bool isStrongerThan(AtomicOrdering AO, AtomicOrdering Other)
Returns true if ao is stronger than other as defined by the AtomicOrdering lattice,...
bool isRefSet(const ModRefInfo MRI)
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Various options to control the behavior of getObjectSize.
bool NullIsUnknownSize
If this is true, null pointers in address space 0 will be treated as though they can't be evaluated.