LLVM 23.0.0git
Loads.cpp
Go to the documentation of this file.
1//===- Loads.cpp - Local load analysis ------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines simple local analyses for load instructions.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/Analysis/Loads.h"
23#include "llvm/IR/DataLayout.h"
26#include "llvm/IR/Operator.h"
27
28using namespace llvm;
29
30static bool isAligned(const Value *Base, Align Alignment,
31 const DataLayout &DL) {
32 return Base->getPointerAlignment(DL) >= Alignment;
33}
34
36 const Value *Ptr, Align Alignment,
37 function_ref<bool(const RetainedKnowledge &RK)> CheckSize,
38 const DataLayout &DL, const Instruction *CtxI, AssumptionCache *AC,
39 const DominatorTree *DT) {
40 if (!CtxI)
41 return false;
42 /// Look through assumes to see if both dereferencability and alignment can
43 /// be proven by an assume if needed.
44 RetainedKnowledge AlignRK;
45 RetainedKnowledge DerefRK;
46 bool PtrCanBeFreed = Ptr->canBeFreed();
47 bool IsAligned = Ptr->getPointerAlignment(DL) >= Alignment;
49 Ptr, {Attribute::Dereferenceable, Attribute::Alignment}, *AC,
50 [&](RetainedKnowledge RK, Instruction *Assume, auto) {
51 if (!isValidAssumeForContext(Assume, CtxI, DT))
52 return false;
53 if (RK.AttrKind == Attribute::Alignment)
54 AlignRK = std::max(AlignRK, RK);
55
56 // Dereferenceable information from assumptions is only valid if the
57 // value cannot be freed between the assumption and use.
58 if ((!PtrCanBeFreed || willNotFreeBetween(Assume, CtxI)) &&
59 RK.AttrKind == Attribute::Dereferenceable)
60 DerefRK = std::max(DerefRK, RK);
61 IsAligned |= AlignRK && AlignRK.ArgValue >= Alignment.value();
62 if (IsAligned && DerefRK && CheckSize(DerefRK))
63 return true; // We have found what we needed so we stop looking
64 return false; // Other assumes may have better information. so
65 // keep looking
66 });
67}
68
69/// Test if V is always a pointer to allocated and suitably aligned memory for
70/// a simple load or store.
72 const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL,
73 const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT,
75 unsigned MaxDepth) {
76 assert(V->getType()->isPointerTy() && "Base must be pointer");
77
78 // Recursion limit.
79 if (MaxDepth-- == 0)
80 return false;
81
82 // Already visited? Bail out, we've likely hit unreachable code.
83 if (!Visited.insert(V).second)
84 return false;
85
86 // Note that it is not safe to speculate into a malloc'd region because
87 // malloc may return null.
88
89 // For GEPs, determine if the indexing lands within the allocated object.
90 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
91 const Value *Base = GEP->getPointerOperand();
92
93 APInt Offset(DL.getIndexTypeSizeInBits(GEP->getType()), 0);
94 if (!GEP->accumulateConstantOffset(DL, Offset) || Offset.isNegative() ||
95 !Offset.urem(APInt(Offset.getBitWidth(), Alignment.value()))
96 .isMinValue())
97 return false;
98
99 // If the base pointer is dereferenceable for Offset+Size bytes, then the
100 // GEP (== Base + Offset) is dereferenceable for Size bytes. If the base
101 // pointer is aligned to Align bytes, and the Offset is divisible by Align
102 // then the GEP (== Base + Offset == k_0 * Align + k_1 * Align) is also
103 // aligned to Align bytes.
104
105 // Offset and Size may have different bit widths if we have visited an
106 // addrspacecast, so we can't do arithmetic directly on the APInt values.
108 Base, Alignment, Offset + Size.sextOrTrunc(Offset.getBitWidth()), DL,
109 CtxI, AC, DT, TLI, Visited, MaxDepth);
110 }
111
112 // bitcast instructions are no-ops as far as dereferenceability is concerned.
113 if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(V)) {
114 if (BC->getSrcTy()->isPointerTy())
116 BC->getOperand(0), Alignment, Size, DL, CtxI, AC, DT, TLI,
117 Visited, MaxDepth);
118 }
119
120 // Recurse into both hands of select.
121 if (const SelectInst *Sel = dyn_cast<SelectInst>(V)) {
122 return isDereferenceableAndAlignedPointer(Sel->getTrueValue(), Alignment,
123 Size, DL, CtxI, AC, DT, TLI,
124 Visited, MaxDepth) &&
125 isDereferenceableAndAlignedPointer(Sel->getFalseValue(), Alignment,
126 Size, DL, CtxI, AC, DT, TLI,
127 Visited, MaxDepth);
128 }
129
130 auto IsKnownDeref = [&]() {
131 bool CheckForNonNull, CheckForFreed;
132 if (!Size.ule(V->getPointerDereferenceableBytes(DL, CheckForNonNull,
133 CheckForFreed)) ||
134 CheckForFreed)
135 return false;
136 if (CheckForNonNull &&
137 !isKnownNonZero(V, SimplifyQuery(DL, DT, AC, CtxI)))
138 return false;
139 // When using something like !dereferenceable on a load, the
140 // dereferenceability may only be valid on a specific control-flow path.
141 // If the instruction doesn't dominate the context instruction, we're
142 // asking about dereferenceability under the assumption that the
143 // instruction has been speculated to the point of the context instruction,
144 // in which case we don't know if the dereferenceability info still holds.
145 // We don't bother handling allocas here, as they aren't speculatable
146 // anyway.
147 auto *I = dyn_cast<Instruction>(V);
148 if (I && !isa<AllocaInst>(I))
149 return CtxI && isValidAssumeForContext(I, CtxI, DT);
150 return true;
151 };
152 if (IsKnownDeref()) {
153 // As we recursed through GEPs to get here, we've incrementally checked
154 // that each step advanced by a multiple of the alignment. If our base is
155 // properly aligned, then the original offset accessed must also be.
156 return isAligned(V, Alignment, DL);
157 }
158
159 /// TODO refactor this function to be able to search independently for
160 /// Dereferencability and Alignment requirements.
161
162
163 if (const auto *Call = dyn_cast<CallBase>(V)) {
164 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, true))
165 return isDereferenceableAndAlignedPointer(RP, Alignment, Size, DL, CtxI,
166 AC, DT, TLI, Visited, MaxDepth);
167
168 // If we have a call we can't recurse through, check to see if this is an
169 // allocation function for which we can establish an minimum object size.
170 // Such a minimum object size is analogous to a deref_or_null attribute in
171 // that we still need to prove the result non-null at point of use.
172 // NOTE: We can only use the object size as a base fact as we a) need to
173 // prove alignment too, and b) don't want the compile time impact of a
174 // separate recursive walk.
175 ObjectSizeOpts Opts;
176 // TODO: It may be okay to round to align, but that would imply that
177 // accessing slightly out of bounds was legal, and we're currently
178 // inconsistent about that. For the moment, be conservative.
179 Opts.RoundToAlign = false;
180 Opts.NullIsUnknownSize = true;
181 uint64_t ObjSize;
182 if (getObjectSize(V, ObjSize, DL, TLI, Opts)) {
183 APInt KnownDerefBytes(Size.getBitWidth(), ObjSize);
184 if (KnownDerefBytes.getBoolValue() && KnownDerefBytes.uge(Size) &&
185 isKnownNonZero(V, SimplifyQuery(DL, DT, AC, CtxI)) &&
186 !V->canBeFreed()) {
187 // As we recursed through GEPs to get here, we've incrementally
188 // checked that each step advanced by a multiple of the alignment. If
189 // our base is properly aligned, then the original offset accessed
190 // must also be.
191 return isAligned(V, Alignment, DL);
192 }
193 }
194 }
195
196 // For gc.relocate, look through relocations
197 if (const GCRelocateInst *RelocateInst = dyn_cast<GCRelocateInst>(V))
198 return isDereferenceableAndAlignedPointer(RelocateInst->getDerivedPtr(),
199 Alignment, Size, DL, CtxI, AC, DT,
200 TLI, Visited, MaxDepth);
201
203 return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Alignment,
204 Size, DL, CtxI, AC, DT, TLI,
205 Visited, MaxDepth);
206
208 V, Alignment,
209 [Size](const RetainedKnowledge &RK) {
210 return RK.ArgValue >= Size.getZExtValue();
211 },
212 DL, CtxI, AC, DT);
213}
214
216 const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL,
217 const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT,
218 const TargetLibraryInfo *TLI) {
219 // Note: At the moment, Size can be zero. This ends up being interpreted as
220 // a query of whether [Base, V] is dereferenceable and V is aligned (since
221 // that's what the implementation happened to do). It's unclear if this is
222 // the desired semantic, but at least SelectionDAG does exercise this case.
223
225 return ::isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, AC,
226 DT, TLI, Visited, 16);
227}
228
230 const Value *V, Type *Ty, Align Alignment, const DataLayout &DL,
231 const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT,
232 const TargetLibraryInfo *TLI) {
233 // For unsized types or scalable vectors we don't know exactly how many bytes
234 // are dereferenced, so bail out.
235 if (!Ty->isSized() || Ty->isScalableTy())
236 return false;
237
238 // When dereferenceability information is provided by a dereferenceable
239 // attribute, we know exactly how many bytes are dereferenceable. If we can
240 // determine the exact offset to the attributed variable, we can use that
241 // information here.
242
243 APInt AccessSize(DL.getPointerTypeSizeInBits(V->getType()),
244 DL.getTypeStoreSize(Ty));
245 return isDereferenceableAndAlignedPointer(V, Alignment, AccessSize, DL, CtxI,
246 AC, DT, TLI);
247}
248
250 const DataLayout &DL,
251 const Instruction *CtxI,
252 AssumptionCache *AC,
253 const DominatorTree *DT,
254 const TargetLibraryInfo *TLI) {
255 return isDereferenceableAndAlignedPointer(V, Ty, Align(1), DL, CtxI, AC, DT,
256 TLI);
257}
258
259/// Test if A and B will obviously have the same value.
260///
261/// This includes recognizing that %t0 and %t1 will have the same
262/// value in code like this:
263/// \code
264/// %t0 = getelementptr \@a, 0, 3
265/// store i32 0, i32* %t0
266/// %t1 = getelementptr \@a, 0, 3
267/// %t2 = load i32* %t1
268/// \endcode
269///
270static bool AreEquivalentAddressValues(const Value *A, const Value *B) {
271 // Test if the values are trivially equivalent.
272 if (A == B)
273 return true;
274
275 // Test if the values come from identical arithmetic instructions.
276 // Use isIdenticalToWhenDefined instead of isIdenticalTo because
277 // this function is only used when one address use dominates the
278 // other, which means that they'll always either have the same
279 // value or one of them will have an undefined value.
281 if (const Instruction *BI = dyn_cast<Instruction>(B))
282 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
283 return true;
284
285 // Otherwise they may not be equivalent.
286 return false;
287}
288
290 LoadInst *LI, Loop *L, ScalarEvolution &SE, DominatorTree &DT,
292 auto &DL = LI->getDataLayout();
293 Value *Ptr = LI->getPointerOperand();
294 const SCEV *PtrSCEV = SE.getSCEV(Ptr);
295 APInt EltSize(DL.getIndexTypeSizeInBits(Ptr->getType()),
296 DL.getTypeStoreSize(LI->getType()).getFixedValue());
297
298 // If given a uniform (i.e. non-varying) address, see if we can prove the
299 // access is safe within the loop w/o needing predication.
300 if (L->isLoopInvariant(Ptr))
302 Ptr, LI->getAlign(), EltSize, DL, &*L->getHeader()->getFirstNonPHIIt(),
303 AC, &DT);
304
305 const SCEV *EltSizeSCEV = SE.getConstant(EltSize);
306 return isDereferenceableAndAlignedInLoop(PtrSCEV, LI->getAlign(), EltSizeSCEV,
307 L, SE, DT, AC, Predicates);
308}
309
311 const SCEV *PtrSCEV, Align Alignment, const SCEV *EltSizeSCEV, Loop *L,
314 auto *AddRec = dyn_cast<SCEVAddRecExpr>(PtrSCEV);
315
316 // Check to see if we have a repeating access pattern and it's possible
317 // to prove all accesses are well aligned.
318 if (!AddRec || AddRec->getLoop() != L || !AddRec->isAffine())
319 return false;
320
321 auto *Step = dyn_cast<SCEVConstant>(AddRec->getStepRecurrence(SE));
322 if (!Step)
323 return false;
324
325 const APInt &EltSize = cast<SCEVConstant>(EltSizeSCEV)->getAPInt();
326 // For the moment, restrict ourselves to the case where the access size is a
327 // multiple of the requested alignment and the base is aligned.
328 // TODO: generalize if a case found which warrants
329 if (EltSize.urem(Alignment.value()) != 0)
330 return false;
331
332 // TODO: Handle overlapping accesses.
333 if (EltSize.ugt(Step->getAPInt().abs()))
334 return false;
335
336 const SCEV *MaxBECount =
337 Predicates ? SE.getPredicatedSymbolicMaxBackedgeTakenCount(L, *Predicates)
339 const SCEV *BECount = Predicates
340 ? SE.getPredicatedBackedgeTakenCount(L, *Predicates)
341 : SE.getBackedgeTakenCount(L);
342 if (isa<SCEVCouldNotCompute>(MaxBECount))
343 return false;
344 std::optional<ScalarEvolution::LoopGuards> LoopGuards;
345
346 auto &DL = L->getHeader()->getDataLayout();
347 const auto &[AccessStart, AccessEnd] =
348 getStartAndEndForAccess(L, PtrSCEV, EltSizeSCEV, BECount, MaxBECount, &SE,
349 nullptr, &DT, AC, LoopGuards);
350 if (isa<SCEVCouldNotCompute>(AccessStart) ||
351 isa<SCEVCouldNotCompute>(AccessEnd))
352 return false;
353
354 // Try to get the access size.
355 const SCEV *PtrDiff = SE.getMinusSCEV(AccessEnd, AccessStart);
356 if (isa<SCEVCouldNotCompute>(PtrDiff))
357 return false;
358
359 if (!LoopGuards)
360 LoopGuards.emplace(
361 ScalarEvolution::LoopGuards::collect(AddRec->getLoop(), SE));
362
363 APInt MaxPtrDiff =
364 SE.getUnsignedRangeMax(SE.applyLoopGuards(PtrDiff, *LoopGuards));
365
366 Value *Base = nullptr;
367 APInt AccessSize;
368 const SCEV *AccessSizeSCEV = nullptr;
369 if (const SCEVUnknown *NewBase = dyn_cast<SCEVUnknown>(AccessStart)) {
370 Base = NewBase->getValue();
371 AccessSize = std::move(MaxPtrDiff);
372 AccessSizeSCEV = PtrDiff;
373 } else if (auto *MinAdd = dyn_cast<SCEVAddExpr>(AccessStart)) {
374 if (MinAdd->getNumOperands() != 2)
375 return false;
376
377 const auto *Offset = dyn_cast<SCEVConstant>(MinAdd->getOperand(0));
378 const auto *NewBase = dyn_cast<SCEVUnknown>(MinAdd->getOperand(1));
379 if (!Offset || !NewBase)
380 return false;
381
382 // The following code below assumes the offset is unsigned, but GEP
383 // offsets are treated as signed so we can end up with a signed value
384 // here too. For example, suppose the initial PHI value is (i8 255),
385 // the offset will be treated as (i8 -1) and sign-extended to (i64 -1).
386 if (Offset->getAPInt().isNegative())
387 return false;
388
389 // For the moment, restrict ourselves to the case where the offset is a
390 // multiple of the requested alignment and the base is aligned.
391 // TODO: generalize if a case found which warrants
392 if (Offset->getAPInt().urem(Alignment.value()) != 0)
393 return false;
394
395 bool Overflow = false;
396 AccessSize = MaxPtrDiff.uadd_ov(Offset->getAPInt(), Overflow);
397 if (Overflow)
398 return false;
399 AccessSizeSCEV = SE.getAddExpr(PtrDiff, Offset);
400 Base = NewBase->getValue();
401 } else
402 return false;
403
404 Instruction *CtxI = &*L->getHeader()->getFirstNonPHIIt();
405 if (BasicBlock *LoopPred = L->getLoopPredecessor()) {
406 if (isa<BranchInst>(LoopPred->getTerminator()))
407 CtxI = LoopPred->getTerminator();
408 }
410 Base, Alignment,
411 [&SE, AccessSizeSCEV, &LoopGuards](const RetainedKnowledge &RK) {
412 return SE.isKnownPredicate(
414 SE.applyLoopGuards(AccessSizeSCEV, *LoopGuards),
415 SE.applyLoopGuards(SE.getSCEV(RK.IRArgValue), *LoopGuards));
416 },
417 DL, CtxI, AC, &DT) ||
418 isDereferenceableAndAlignedPointer(Base, Alignment, AccessSize, DL,
419 CtxI, AC, &DT);
420}
421
423 const Function &F = *CtxI.getFunction();
424 // Speculative load may create a race that did not exist in the source.
425 return F.hasFnAttribute(Attribute::SanitizeThread) ||
426 // Speculative load may load data from dirty regions.
427 F.hasFnAttribute(Attribute::SanitizeAddress) ||
428 F.hasFnAttribute(Attribute::SanitizeHWAddress);
429}
430
434
435/// Check if executing a load of this pointer value cannot trap.
436///
437/// If DT and ScanFrom are specified this method performs context-sensitive
438/// analysis and returns true if it is safe to load immediately before ScanFrom.
439///
440/// If it is not obviously safe to load from the specified pointer, we do
441/// a quick local scan of the basic block containing \c ScanFrom, to determine
442/// if the address is already accessed.
443///
444/// This uses the pointee type to determine how many bytes need to be safe to
445/// load from the pointer.
447 const DataLayout &DL,
448 Instruction *ScanFrom,
449 AssumptionCache *AC,
450 const DominatorTree *DT,
451 const TargetLibraryInfo *TLI) {
452 // If DT is not specified we can't make context-sensitive query
453 const Instruction* CtxI = DT ? ScanFrom : nullptr;
454 if (isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, AC, DT,
455 TLI)) {
456 // With sanitizers `Dereferenceable` is not always enough for unconditional
457 // load.
458 if (!ScanFrom || !suppressSpeculativeLoadForSanitizers(*ScanFrom))
459 return true;
460 }
461
462 if (!ScanFrom)
463 return false;
464
465 if (Size.getBitWidth() > 64)
466 return false;
467 const TypeSize LoadSize = TypeSize::getFixed(Size.getZExtValue());
468
469 // Otherwise, be a little bit aggressive by scanning the local block where we
470 // want to check to see if the pointer is already being loaded or stored
471 // from/to. If so, the previous load or store would have already trapped,
472 // so there is no harm doing an extra load (also, CSE will later eliminate
473 // the load entirely).
474 BasicBlock::iterator BBI = ScanFrom->getIterator(),
475 E = ScanFrom->getParent()->begin();
476
477 // We can at least always strip pointer casts even though we can't use the
478 // base here.
479 V = V->stripPointerCasts();
480
481 while (BBI != E) {
482 --BBI;
483
484 // If we see a free or a call which may write to memory (i.e. which might do
485 // a free) the pointer could be marked invalid.
486 if (isa<CallInst>(BBI) && BBI->mayWriteToMemory() &&
488 return false;
489
490 Value *AccessedPtr;
491 Type *AccessedTy;
492 Align AccessedAlign;
493 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
494 // Ignore volatile loads. The execution of a volatile load cannot
495 // be used to prove an address is backed by regular memory; it can,
496 // for example, point to an MMIO register.
497 if (LI->isVolatile())
498 continue;
499 AccessedPtr = LI->getPointerOperand();
500 AccessedTy = LI->getType();
501 AccessedAlign = LI->getAlign();
502 } else if (StoreInst *SI = dyn_cast<StoreInst>(BBI)) {
503 // Ignore volatile stores (see comment for loads).
504 if (SI->isVolatile())
505 continue;
506 AccessedPtr = SI->getPointerOperand();
507 AccessedTy = SI->getValueOperand()->getType();
508 AccessedAlign = SI->getAlign();
509 } else
510 continue;
511
512 if (AccessedAlign < Alignment)
513 continue;
514
515 // Handle trivial cases.
516 if (AccessedPtr == V &&
517 TypeSize::isKnownLE(LoadSize, DL.getTypeStoreSize(AccessedTy)))
518 return true;
519
520 if (AreEquivalentAddressValues(AccessedPtr->stripPointerCasts(), V) &&
521 TypeSize::isKnownLE(LoadSize, DL.getTypeStoreSize(AccessedTy)))
522 return true;
523 }
524 return false;
525}
526
528 const DataLayout &DL,
529 Instruction *ScanFrom,
530 AssumptionCache *AC,
531 const DominatorTree *DT,
532 const TargetLibraryInfo *TLI) {
533 TypeSize TySize = DL.getTypeStoreSize(Ty);
534 if (TySize.isScalable())
535 return false;
536 APInt Size(DL.getIndexTypeSizeInBits(V->getType()), TySize.getFixedValue());
537 return isSafeToLoadUnconditionally(V, Alignment, Size, DL, ScanFrom, AC, DT,
538 TLI);
539}
540
541/// DefMaxInstsToScan - the default number of maximum instructions
542/// to scan in the block, used by FindAvailableLoadedValue().
543/// FindAvailableLoadedValue() was introduced in r60148, to improve jump
544/// threading in part by eliminating partially redundant loads.
545/// At that point, the value of MaxInstsToScan was already set to '6'
546/// without documented explanation.
548llvm::DefMaxInstsToScan("available-load-scan-limit", cl::init(6), cl::Hidden,
549 cl::desc("Use this to specify the default maximum number of instructions "
550 "to scan backward from a given instruction, when searching for "
551 "available loaded value"));
552
554 BasicBlock::iterator &ScanFrom,
555 unsigned MaxInstsToScan,
556 BatchAAResults *AA, bool *IsLoad,
557 unsigned *NumScanedInst) {
558 // Don't CSE load that is volatile or anything stronger than unordered.
559 if (!Load->isUnordered())
560 return nullptr;
561
563 return findAvailablePtrLoadStore(Loc, Load->getType(), Load->isAtomic(),
564 ScanBB, ScanFrom, MaxInstsToScan, AA, IsLoad,
565 NumScanedInst);
566}
567
568// Check if the load and the store have the same base, constant offsets and
569// non-overlapping access ranges.
570static bool areNonOverlapSameBaseLoadAndStore(const Value *LoadPtr,
571 Type *LoadTy,
572 const Value *StorePtr,
573 Type *StoreTy,
574 const DataLayout &DL) {
575 APInt LoadOffset(DL.getIndexTypeSizeInBits(LoadPtr->getType()), 0);
576 APInt StoreOffset(DL.getIndexTypeSizeInBits(StorePtr->getType()), 0);
577 const Value *LoadBase = LoadPtr->stripAndAccumulateConstantOffsets(
578 DL, LoadOffset, /* AllowNonInbounds */ false);
579 const Value *StoreBase = StorePtr->stripAndAccumulateConstantOffsets(
580 DL, StoreOffset, /* AllowNonInbounds */ false);
581 if (LoadBase != StoreBase)
582 return false;
583 auto LoadAccessSize = LocationSize::precise(DL.getTypeStoreSize(LoadTy));
584 auto StoreAccessSize = LocationSize::precise(DL.getTypeStoreSize(StoreTy));
585 ConstantRange LoadRange(LoadOffset,
586 LoadOffset + LoadAccessSize.toRaw());
587 ConstantRange StoreRange(StoreOffset,
588 StoreOffset + StoreAccessSize.toRaw());
589 return LoadRange.intersectWith(StoreRange).isEmptySet();
590}
591
593 Type *AccessTy, bool AtLeastAtomic,
594 const DataLayout &DL, bool *IsLoadCSE) {
595 // If this is a load of Ptr, the loaded value is available.
596 // (This is true even if the load is volatile or atomic, although
597 // those cases are unlikely.)
598 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
599 // We can value forward from an atomic to a non-atomic, but not the
600 // other way around.
601 if (LI->isAtomic() < AtLeastAtomic)
602 return nullptr;
603
604 Value *LoadPtr = LI->getPointerOperand()->stripPointerCasts();
605 if (!AreEquivalentAddressValues(LoadPtr, Ptr))
606 return nullptr;
607
608 if (CastInst::isBitOrNoopPointerCastable(LI->getType(), AccessTy, DL)) {
609 if (IsLoadCSE)
610 *IsLoadCSE = true;
611 return LI;
612 }
613 }
614
615 // If this is a store through Ptr, the value is available!
616 // (This is true even if the store is volatile or atomic, although
617 // those cases are unlikely.)
618 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
619 // We can value forward from an atomic to a non-atomic, but not the
620 // other way around.
621 if (SI->isAtomic() < AtLeastAtomic)
622 return nullptr;
623
624 Value *StorePtr = SI->getPointerOperand()->stripPointerCasts();
625 if (!AreEquivalentAddressValues(StorePtr, Ptr))
626 return nullptr;
627
628 if (IsLoadCSE)
629 *IsLoadCSE = false;
630
631 Value *Val = SI->getValueOperand();
632 if (CastInst::isBitOrNoopPointerCastable(Val->getType(), AccessTy, DL))
633 return Val;
634
635 TypeSize StoreSize = DL.getTypeSizeInBits(Val->getType());
636 TypeSize LoadSize = DL.getTypeSizeInBits(AccessTy);
637 if (TypeSize::isKnownLE(LoadSize, StoreSize))
638 if (auto *C = dyn_cast<Constant>(Val))
639 return ConstantFoldLoadFromConst(C, AccessTy, DL);
640 }
641
642 if (auto *MSI = dyn_cast<MemSetInst>(Inst)) {
643 // Don't forward from (non-atomic) memset to atomic load.
644 if (AtLeastAtomic)
645 return nullptr;
646
647 // Only handle constant memsets.
648 auto *Val = dyn_cast<ConstantInt>(MSI->getValue());
649 auto *Len = dyn_cast<ConstantInt>(MSI->getLength());
650 if (!Val || !Len)
651 return nullptr;
652
653 // Handle offsets.
654 int64_t StoreOffset = 0, LoadOffset = 0;
655 const Value *StoreBase =
656 GetPointerBaseWithConstantOffset(MSI->getDest(), StoreOffset, DL);
657 const Value *LoadBase =
658 GetPointerBaseWithConstantOffset(Ptr, LoadOffset, DL);
659 if (StoreBase != LoadBase || LoadOffset < StoreOffset)
660 return nullptr;
661
662 if (IsLoadCSE)
663 *IsLoadCSE = false;
664
665 TypeSize LoadTypeSize = DL.getTypeSizeInBits(AccessTy);
666 if (LoadTypeSize.isScalable())
667 return nullptr;
668
669 // Make sure the read bytes are contained in the memset.
670 uint64_t LoadSize = LoadTypeSize.getFixedValue();
671 if ((Len->getValue() * 8).ult(LoadSize + (LoadOffset - StoreOffset) * 8))
672 return nullptr;
673
674 APInt Splat = LoadSize >= 8 ? APInt::getSplat(LoadSize, Val->getValue())
675 : Val->getValue().trunc(LoadSize);
676 ConstantInt *SplatC = ConstantInt::get(MSI->getContext(), Splat);
677 if (CastInst::isBitOrNoopPointerCastable(SplatC->getType(), AccessTy, DL))
678 return SplatC;
679
680 return nullptr;
681 }
682
683 return nullptr;
684}
685
687 const MemoryLocation &Loc, Type *AccessTy, bool AtLeastAtomic,
688 BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan,
689 BatchAAResults *AA, bool *IsLoadCSE, unsigned *NumScanedInst) {
690 if (MaxInstsToScan == 0)
691 MaxInstsToScan = ~0U;
692
693 const DataLayout &DL = ScanBB->getDataLayout();
694 const Value *StrippedPtr = Loc.Ptr->stripPointerCasts();
695
696 while (ScanFrom != ScanBB->begin()) {
697 // We must ignore debug info directives when counting (otherwise they
698 // would affect codegen).
699 Instruction *Inst = &*--ScanFrom;
700 if (Inst->isDebugOrPseudoInst())
701 continue;
702
703 // Restore ScanFrom to expected value in case next test succeeds
704 ScanFrom++;
705
706 if (NumScanedInst)
707 ++(*NumScanedInst);
708
709 // Don't scan huge blocks.
710 if (MaxInstsToScan-- == 0)
711 return nullptr;
712
713 --ScanFrom;
714
715 if (Value *Available = getAvailableLoadStore(Inst, StrippedPtr, AccessTy,
716 AtLeastAtomic, DL, IsLoadCSE))
717 return Available;
718
719 // Try to get the store size for the type.
720 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
721 Value *StorePtr = SI->getPointerOperand()->stripPointerCasts();
722
723 // If both StrippedPtr and StorePtr reach all the way to an alloca or
724 // global and they are different, ignore the store. This is a trivial form
725 // of alias analysis that is important for reg2mem'd code.
726 if ((isa<AllocaInst>(StrippedPtr) || isa<GlobalVariable>(StrippedPtr)) &&
727 (isa<AllocaInst>(StorePtr) || isa<GlobalVariable>(StorePtr)) &&
728 StrippedPtr != StorePtr)
729 continue;
730
731 if (!AA) {
732 // When AA isn't available, but if the load and the store have the same
733 // base, constant offsets and non-overlapping access ranges, ignore the
734 // store. This is a simple form of alias analysis that is used by the
735 // inliner. FIXME: use BasicAA if possible.
737 Loc.Ptr, AccessTy, SI->getPointerOperand(),
738 SI->getValueOperand()->getType(), DL))
739 continue;
740 } else {
741 // If we have alias analysis and it says the store won't modify the
742 // loaded value, ignore the store.
743 if (!isModSet(AA->getModRefInfo(SI, Loc)))
744 continue;
745 }
746
747 // Otherwise the store that may or may not alias the pointer, bail out.
748 ++ScanFrom;
749 return nullptr;
750 }
751
752 // If this is some other instruction that may clobber Ptr, bail out.
753 if (Inst->mayWriteToMemory()) {
754 // If alias analysis claims that it really won't modify the load,
755 // ignore it.
756 if (AA && !isModSet(AA->getModRefInfo(Inst, Loc)))
757 continue;
758
759 // May modify the pointer, bail out.
760 ++ScanFrom;
761 return nullptr;
762 }
763 }
764
765 // Got to the start of the block, we didn't find it, but are done for this
766 // block.
767 return nullptr;
768}
769
771 bool *IsLoadCSE,
772 unsigned MaxInstsToScan) {
773 const DataLayout &DL = Load->getDataLayout();
774 Value *StrippedPtr = Load->getPointerOperand()->stripPointerCasts();
775 BasicBlock *ScanBB = Load->getParent();
776 Type *AccessTy = Load->getType();
777 bool AtLeastAtomic = Load->isAtomic();
778
779 if (!Load->isUnordered())
780 return nullptr;
781
782 // Try to find an available value first, and delay expensive alias analysis
783 // queries until later.
784 Value *Available = nullptr;
785 SmallVector<Instruction *> MustNotAliasInsts;
786 for (Instruction &Inst : make_range(++Load->getReverseIterator(),
787 ScanBB->rend())) {
788 if (Inst.isDebugOrPseudoInst())
789 continue;
790
791 if (MaxInstsToScan-- == 0)
792 return nullptr;
793
794 Available = getAvailableLoadStore(&Inst, StrippedPtr, AccessTy,
795 AtLeastAtomic, DL, IsLoadCSE);
796 if (Available)
797 break;
798
799 if (Inst.mayWriteToMemory())
800 MustNotAliasInsts.push_back(&Inst);
801 }
802
803 // If we found an available value, ensure that the instructions in between
804 // did not modify the memory location.
805 if (Available) {
807 for (Instruction *Inst : MustNotAliasInsts)
808 if (isModSet(AA.getModRefInfo(Inst, Loc)))
809 return nullptr;
810 }
811
812 return Available;
813}
814
815// Returns true if a use is either in an ICmp/PtrToInt or a Phi/Select that only
816// feeds into them.
817static bool isPointerUseReplacable(const Use &U, bool HasNonAddressBits) {
818 unsigned Limit = 40;
819 SmallVector<const User *> Worklist({U.getUser()});
821
822 while (!Worklist.empty() && --Limit) {
823 auto *User = Worklist.pop_back_val();
824 if (!Visited.insert(User).second)
825 continue;
827 continue;
828 // FIXME: The PtrToIntInst case here is not strictly correct, as it
829 // changes which provenance is exposed.
830 if (!HasNonAddressBits && isa<PtrToIntInst>(User))
831 continue;
833 Worklist.append(User->user_begin(), User->user_end());
834 else
835 return false;
836 }
837
838 return Limit != 0;
839}
840
841static bool isPointerAlwaysReplaceable(const Value *From, const Value *To,
842 const DataLayout &DL) {
843 // This is not strictly correct, but we do it for now to retain important
844 // optimizations.
846 return true;
847 // Conversely, replacing null in the default address space with destination
848 // pointer is always valid.
849 if (isa<ConstantPointerNull>(From) &&
850 From->getType()->getPointerAddressSpace() == 0)
851 return true;
852 if (isa<Constant>(To) && To->getType()->isPointerTy() &&
854 return true;
855 return getUnderlyingObjectAggressive(From) ==
857}
858
860 const DataLayout &DL) {
861 Type *Ty = To->getType();
862 assert(U->getType() == Ty && "values must have matching types");
863 // Not a pointer, just return true.
864 if (!Ty->isPtrOrPtrVectorTy())
865 return true;
866
867 // Do not perform replacements in lifetime intrinsic arguments.
868 if (isa<LifetimeIntrinsic>(U.getUser()))
869 return false;
870
871 if (isPointerAlwaysReplaceable(&*U, To, DL))
872 return true;
873
874 bool HasNonAddressBits =
875 DL.getAddressSizeInBits(Ty) != DL.getPointerTypeSizeInBits(Ty);
876 return isPointerUseReplacable(U, HasNonAddressBits);
877}
878
879bool llvm::canReplacePointersIfEqual(const Value *From, const Value *To,
880 const DataLayout &DL) {
881 assert(From->getType() == To->getType() && "values must have matching types");
882 // Not a pointer, just return true.
883 if (!From->getType()->isPtrOrPtrVectorTy())
884 return true;
885
886 return isPointerAlwaysReplaceable(From, To, DL);
887}
888
891 SmallVectorImpl<LoadInst *> &NonDereferenceableAndAlignedLoads,
893 for (BasicBlock *BB : L->blocks()) {
894 for (Instruction &I : *BB) {
895 if (auto *LI = dyn_cast<LoadInst>(&I)) {
896 if (!isDereferenceableAndAlignedInLoop(LI, L, *SE, *DT, AC, Predicates))
897 NonDereferenceableAndAlignedLoads.push_back(LI);
898 } else if (I.mayReadFromMemory() || I.mayWriteToMemory() ||
899 I.mayThrow()) {
900 return false;
901 }
902 }
903 }
904 return true;
905}
906
908 Value *Ptr) {
909 assert(Ptr->getType()->isPointerTy() && "Must be called with pointer arg");
910
911 unsigned BitWidth = DL.getIndexTypeSizeInBits(Ptr->getType());
912 LinearExpression Expr(Ptr, BitWidth);
913
914 while (true) {
915 auto *GEP = dyn_cast<GEPOperator>(Expr.BasePtr);
916 if (!GEP || GEP->getSourceElementType()->isScalableTy())
917 return Expr;
918
919 Value *VarIndex = nullptr;
920 for (Value *Index : GEP->indices()) {
921 if (isa<ConstantInt>(Index))
922 continue;
923 // Only allow a single variable index. We do not bother to handle the
924 // case of the same variable index appearing multiple times.
925 if (Expr.Index || VarIndex)
926 return Expr;
927 VarIndex = Index;
928 }
929
930 // Don't return non-canonical indexes.
931 if (VarIndex && !VarIndex->getType()->isIntegerTy(BitWidth))
932 return Expr;
933
934 // We have verified that we can fully handle this GEP, so we can update Expr
935 // members past this point.
936 Expr.BasePtr = GEP->getPointerOperand();
937 Expr.Flags = Expr.Flags.intersectForOffsetAdd(GEP->getNoWrapFlags());
939 GTI != GTE; ++GTI) {
940 Value *Index = GTI.getOperand();
941 if (auto *ConstOffset = dyn_cast<ConstantInt>(Index)) {
942 if (ConstOffset->isZero())
943 continue;
944 if (StructType *STy = GTI.getStructTypeOrNull()) {
945 unsigned ElementIdx = ConstOffset->getZExtValue();
946 const StructLayout *SL = DL.getStructLayout(STy);
947 Expr.Offset += SL->getElementOffset(ElementIdx);
948 continue;
949 }
950 // Truncate if type size exceeds index space.
951 APInt IndexedSize(BitWidth, GTI.getSequentialElementStride(DL),
952 /*isSigned=*/false,
953 /*implcitTrunc=*/true);
954 Expr.Offset += ConstOffset->getValue() * IndexedSize;
955 continue;
956 }
957
958 // FIXME: Also look through a mul/shl in the index.
959 assert(Expr.Index == nullptr && "Shouldn't have index yet");
960 Expr.Index = Index;
961 // Truncate if type size exceeds index space.
962 Expr.Scale = APInt(BitWidth, GTI.getSequentialElementStride(DL),
963 /*isSigned=*/false, /*implicitTrunc=*/true);
964 }
965 }
966
967 return Expr;
968}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
@ Available
We know the block is fully available. This is a fixpoint.
Definition GVN.cpp:951
Hexagon Common GEP
static bool AreEquivalentAddressValues(const Value *A, const Value *B)
Test if A and B will obviously have the same value.
Definition Loads.cpp:270
static bool isPointerAlwaysReplaceable(const Value *From, const Value *To, const DataLayout &DL)
Definition Loads.cpp:841
static bool isPointerUseReplacable(const Use &U, bool HasNonAddressBits)
Definition Loads.cpp:817
static bool areNonOverlapSameBaseLoadAndStore(const Value *LoadPtr, Type *LoadTy, const Value *StorePtr, Type *StoreTy, const DataLayout &DL)
Definition Loads.cpp:570
static bool isDereferenceableAndAlignedPointerViaAssumption(const Value *Ptr, Align Alignment, function_ref< bool(const RetainedKnowledge &RK)> CheckSize, const DataLayout &DL, const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT)
Definition Loads.cpp:35
static Value * getAvailableLoadStore(Instruction *Inst, const Value *Ptr, Type *AccessTy, bool AtLeastAtomic, const DataLayout &DL, bool *IsLoadCSE)
Definition Loads.cpp:592
static bool suppressSpeculativeLoadForSanitizers(const Instruction &CtxI)
Definition Loads.cpp:422
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
This file provides utility analysis objects describing memory locations.
Class for arbitrary precision integers.
Definition APInt.h:78
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
Definition APInt.h:1189
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
Definition APInt.cpp:1677
LLVM_ABI APInt uadd_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1946
static LLVM_ABI APInt getSplat(unsigned NewLen, const APInt &V)
Return a value containing V broadcasted over NewLen bits.
Definition APInt.cpp:651
bool getBoolValue() const
Convert APInt to a boolean value.
Definition APInt.h:472
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition APInt.h:1228
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:470
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
reverse_iterator rend()
Definition BasicBlock.h:488
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
static LLVM_ABI bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
This is the shared class of boolean and integer constants.
Definition Constants.h:87
This class represents a range of values.
LLVM_ABI bool isEmptySet() const
Return true if this set contains no members.
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:164
Represents calls to the gc.relocate intrinsic.
GEPNoWrapFlags intersectForOffsetAdd(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep p, x+y).
LLVM_ABI bool isDebugOrPseudoInst() const LLVM_READONLY
Return true if the instruction is a DbgInfoIntrinsic or PseudoProbeInst.
LLVM_ABI bool mayWriteToMemory() const LLVM_READONLY
Return true if this instruction may modify memory.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
An instruction for reading from memory.
Value * getPointerOperand()
bool isUnordered() const
Align getAlign() const
Return the alignment of the access that is being performed.
static LocationSize precise(uint64_t Value)
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
Representation for a specific memory location.
static LLVM_ABI MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
This class represents an analyzed expression in the program.
static LLVM_ABI LoopGuards collect(const Loop *L, ScalarEvolution &SE)
Collect rewrite map for loop guards for loop L, together with flags indicating if NUW and NSW can be ...
The main scalar evolution driver.
LLVM_ABI const SCEV * getBackedgeTakenCount(const Loop *L, ExitCountKind Kind=Exact)
If the specified loop has a predictable backedge-taken count, return it, otherwise return a SCEVCould...
LLVM_ABI const SCEV * getConstant(ConstantInt *V)
LLVM_ABI const SCEV * getPredicatedBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getBackedgeTakenCount, except it will add a set of SCEV predicates to Predicates that are ...
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
LLVM_ABI const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
APInt getUnsignedRangeMax(const SCEV *S)
Determine the max of the unsigned range for a particular SCEV.
LLVM_ABI const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
LLVM_ABI const SCEV * getPredicatedSymbolicMaxBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getSymbolicMaxBackedgeTakenCount, except it will add a set of SCEV predicates to Predicate...
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVM_ABI bool isKnownPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
const SCEV * getSymbolicMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEV that is greater than or equal to (i.e.
This class represents the LLVM 'select' instruction.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
Definition DataLayout.h:723
TypeSize getElementOffset(unsigned Idx) const
Definition DataLayout.h:754
Class to represent struct types.
Provides information about what library functions are available for the current target.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:294
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition Type.h:270
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
user_iterator user_begin()
Definition Value.h:403
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:259
LLVM_ABI Align getPointerAlignment(const DataLayout &DL) const
Returns an alignment of the pointer value.
Definition Value.cpp:967
LLVM_ABI bool canBeFreed() const
Return true if the memory object referred to by V can by freed in the scope for which the SSA value d...
Definition Value.cpp:828
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:713
user_iterator user_end()
Definition Value.h:411
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:230
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
CallInst * Call
Abstract Attribute helper functions.
Definition Attributor.h:165
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
LLVM_ABI std::pair< const SCEV *, const SCEV * > getStartAndEndForAccess(const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy, const SCEV *BTC, const SCEV *MaxBTC, ScalarEvolution *SE, DenseMap< std::pair< const SCEV *, const SCEV * >, std::pair< const SCEV *, const SCEV * > > *PointerBounds, DominatorTree *DT, AssumptionCache *AC, std::optional< ScalarEvolution::LoopGuards > &LoopGuards)
Calculate Start and End points of memory access using exact backedge taken count BTC if computable or...
LLVM_ABI bool willNotFreeBetween(const Instruction *Assume, const Instruction *CtxI)
Returns true, if no instruction between Assume and CtxI may free memory and the function is marked as...
@ Offset
Definition DWP.cpp:532
LLVM_ABI RetainedKnowledge getKnowledgeForValue(const Value *V, ArrayRef< Attribute::AttrKind > AttrKinds, AssumptionCache &AC, function_ref< bool(RetainedKnowledge, Instruction *, const CallBase::BundleOpInfo *)> Filter=[](auto...) { return true;})
Return a valid Knowledge associated to the Value V if its Attribute kind is in AttrKinds and it match...
LLVM_ABI bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
LLVM_ABI const Value * getArgumentAliasingToReturnedPointer(const CallBase *Call, bool MustPreserveNullness)
This function returns call pointer argument that is considered the same by aliasing rules.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
Definition Alignment.h:134
LLVM_ABI bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Returns true if V is always a dereferenceable pointer with alignment greater or equal than requested.
Definition Loads.cpp:229
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset.
LLVM_ABI Value * findAvailablePtrLoadStore(const MemoryLocation &Loc, Type *AccessTy, bool AtLeastAtomic, BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan, BatchAAResults *AA, bool *IsLoadCSE, unsigned *NumScanedInst)
Scan backwards to see if we have the value of the given pointer available locally within a small numb...
Definition Loads.cpp:686
LLVM_ABI bool mustSuppressSpeculation(const LoadInst &LI)
Return true if speculation of the given load must be suppressed to avoid ordering or interfering with...
Definition Loads.cpp:431
gep_type_iterator gep_type_end(const User *GEP)
LLVM_ABI Value * FindAvailableLoadedValue(LoadInst *Load, BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan=DefMaxInstsToScan, BatchAAResults *AA=nullptr, bool *IsLoadCSE=nullptr, unsigned *NumScanedInst=nullptr)
Scan backwards to see if we have the value of the given load available locally within a small number ...
Definition Loads.cpp:553
LLVM_ABI bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
LLVM_ABI bool canReplacePointersInUseIfEqual(const Use &U, const Value *To, const DataLayout &DL)
Definition Loads.cpp:859
LLVM_ABI bool canReplacePointersIfEqual(const Value *From, const Value *To, const DataLayout &DL)
Returns true if a pointer value From can be replaced with another pointer value \To if they are deeme...
Definition Loads.cpp:879
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
LLVM_ABI LinearExpression decomposeLinearExpression(const DataLayout &DL, Value *Ptr)
Decompose a pointer into a linear expression.
Definition Loads.cpp:907
LLVM_ABI bool isSafeToLoadUnconditionally(Value *V, Align Alignment, const APInt &Size, const DataLayout &DL, Instruction *ScanFrom, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if we know that executing a load from this value cannot trap.
Definition Loads.cpp:446
generic_gep_type_iterator<> gep_type_iterator
LLVM_ABI Constant * ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset, const DataLayout &DL)
Extract value of C at the given Offset reinterpreted as Ty.
LLVM_ABI cl::opt< unsigned > DefMaxInstsToScan
The default number of maximum instructions to scan in the block, used by FindAvailableLoadedValue().
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
LLVM_ABI const Value * getUnderlyingObjectAggressive(const Value *V)
Like getUnderlyingObject(), but will try harder to find a single underlying object.
constexpr unsigned BitWidth
LLVM_ABI bool isDereferenceablePointer(const Value *V, Type *Ty, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if this is always a dereferenceable pointer.
Definition Loads.cpp:249
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI bool isReadOnlyLoop(Loop *L, ScalarEvolution *SE, DominatorTree *DT, AssumptionCache *AC, SmallVectorImpl< LoadInst * > &NonDereferenceableAndAlignedLoads, SmallVectorImpl< const SCEVPredicate * > *Predicates=nullptr)
Returns true if the loop contains read-only memory accesses and doesn't throw.
Definition Loads.cpp:889
gep_type_iterator gep_type_begin(const User *GEP)
LLVM_ABI bool isDereferenceableAndAlignedInLoop(LoadInst *LI, Loop *L, ScalarEvolution &SE, DominatorTree &DT, AssumptionCache *AC=nullptr, SmallVectorImpl< const SCEVPredicate * > *Predicates=nullptr)
Return true if we can prove that the given load (which is assumed to be within the specified loop) wo...
Definition Loads.cpp:289
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
Linear expression BasePtr + Index * Scale + Offset.
Definition Loads.h:211
GEPNoWrapFlags Flags
Definition Loads.h:216
Various options to control the behavior of getObjectSize.
bool NullIsUnknownSize
If this is true, null pointers in address space 0 will be treated as though they can't be evaluated.
bool RoundToAlign
Whether to round the result up to the alignment of allocas, byval arguments, and global variables.
Represent one information held inside an operand bundle of an llvm.assume.
Attribute::AttrKind AttrKind