LLVM 23.0.0git
Value.cpp
Go to the documentation of this file.
1//===-- Value.cpp - Implement the Value class -----------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the Value, ValueHandle, and User classes.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/IR/Value.h"
14#include "LLVMContextImpl.h"
15#include "llvm/ADT/DenseMap.h"
17#include "llvm/IR/Constant.h"
18#include "llvm/IR/Constants.h"
19#include "llvm/IR/DataLayout.h"
20#include "llvm/IR/DebugInfo.h"
22#include "llvm/IR/DerivedUser.h"
24#include "llvm/IR/InstrTypes.h"
27#include "llvm/IR/Module.h"
28#include "llvm/IR/Operator.h"
30#include "llvm/IR/ValueHandle.h"
35#include <algorithm>
36
37using namespace llvm;
38
40 "use-dereferenceable-at-point-semantics", cl::Hidden, cl::init(false),
41 cl::desc("Deref attributes and metadata infer facts at definition only"));
42
43//===----------------------------------------------------------------------===//
44// Value Class
45//===----------------------------------------------------------------------===//
46static inline Type *checkType(Type *Ty) {
47 assert(Ty && "Value defined with a null type: Error!");
48 assert(!isa<TypedPointerType>(Ty->getScalarType()) &&
49 "Cannot have values with typed pointer types");
50 return Ty;
51}
52
53Value::Value(Type *ty, unsigned scid)
54 : SubclassID(scid), HasValueHandle(0), SubclassOptionalData(0),
55 SubclassData(0), NumUserOperands(0), IsUsedByMD(false), HasName(false),
56 HasMetadata(false), VTy(checkType(ty)) {
57 static_assert(ConstantFirstVal == 0, "!(SubclassID < ConstantFirstVal)");
58 // FIXME: Why isn't this in the subclass gunk??
59 // Note, we cannot call isa<CallInst> before the CallInst has been
60 // constructed.
61 unsigned OpCode = 0;
62 if (SubclassID >= InstructionVal)
63 OpCode = SubclassID - InstructionVal;
64 if (OpCode == Instruction::Call || OpCode == Instruction::Invoke ||
65 OpCode == Instruction::CallBr)
66 assert((VTy->isFirstClassType() || VTy->isVoidTy() || VTy->isStructTy()) &&
67 "invalid CallBase type!");
68 else if (SubclassID != BasicBlockVal &&
69 (/*SubclassID < ConstantFirstVal ||*/ SubclassID > ConstantLastVal))
70 assert((VTy->isFirstClassType() || VTy->isVoidTy()) &&
71 "Cannot create non-first-class values except for constants!");
72 static_assert(sizeof(Value) == 2 * sizeof(void *) + 2 * sizeof(unsigned),
73 "Value too big");
74}
75
77 // Notify all ValueHandles (if present) that this value is going away.
78 if (HasValueHandle)
79 ValueHandleBase::ValueIsDeleted(this);
80 if (isUsedByMetadata())
81 ValueAsMetadata::handleDeletion(this);
82
83 // Remove associated metadata from context.
84 if (HasMetadata)
85 clearMetadata();
86
87#ifndef NDEBUG // Only in -g mode...
88 // Check to make sure that there are no uses of this value that are still
89 // around when the value is destroyed. If there are, then we have a dangling
90 // reference and something is wrong. This code is here to print out where
91 // the value is still being referenced.
92 //
93 // Note that use_empty() cannot be called here, as it eventually downcasts
94 // 'this' to GlobalValue (derived class of Value), but GlobalValue has already
95 // been destructed, so accessing it is UB.
96 //
97 if (!materialized_use_empty()) {
98 dbgs() << "While deleting: " << *VTy << " %" << getName() << "\n";
99 for (auto *U : users())
100 dbgs() << "Use still stuck around after Def is destroyed:" << *U << "\n";
101
102 llvm_unreachable("Uses remain when a value is destroyed!");
103 }
104#endif
105
106 // If this value is named, destroy the name. This should not be in a symtab
107 // at this point.
108 destroyValueName();
109}
110
111void Value::deleteValue() {
112 switch (getValueID()) {
113#define HANDLE_VALUE(Name) \
114 case Value::Name##Val: \
115 delete static_cast<Name *>(this); \
116 break;
117#define HANDLE_MEMORY_VALUE(Name) \
118 case Value::Name##Val: \
119 static_cast<DerivedUser *>(this)->DeleteValue( \
120 static_cast<DerivedUser *>(this)); \
121 break;
122#define HANDLE_CONSTANT(Name) \
123 case Value::Name##Val: \
124 llvm_unreachable("constants should be destroyed with destroyConstant"); \
125 break;
126#define HANDLE_INSTRUCTION(Name) /* nothing */
127#include "llvm/IR/Value.def"
128
129#define HANDLE_INST(N, OPC, CLASS) \
130 case Value::InstructionVal + Instruction::OPC: \
131 delete static_cast<CLASS *>(this); \
132 break;
133#define HANDLE_USER_INST(N, OPC, CLASS)
134#include "llvm/IR/Instruction.def"
135
136 default:
137 llvm_unreachable("attempting to delete unknown value kind");
138 }
139}
140
141void Value::destroyValueName() {
142 ValueName *Name = getValueName();
143 if (Name) {
144 MallocAllocator Allocator;
145 Name->Destroy(Allocator);
146 }
147 setValueName(nullptr);
148}
149
150bool Value::hasNUses(unsigned N) const {
151 if (!UseList)
152 return N == 0;
153
154 // TODO: Disallow for ConstantData and remove !UseList check?
155 return hasNItems(use_begin(), use_end(), N);
156}
157
158bool Value::hasNUsesOrMore(unsigned N) const {
159 // TODO: Disallow for ConstantData and remove !UseList check?
160 if (!UseList)
161 return N == 0;
162
163 return hasNItemsOrMore(use_begin(), use_end(), N);
164}
165
166bool Value::hasOneUser() const {
167 if (use_empty())
168 return false;
169 if (hasOneUse())
170 return true;
171 return std::equal(++user_begin(), user_end(), user_begin());
172}
173
174static bool isUnDroppableUser(const User *U) { return !U->isDroppable(); }
175
176Use *Value::getSingleUndroppableUse() {
177 Use *Result = nullptr;
178 for (Use &U : uses()) {
179 if (!U.getUser()->isDroppable()) {
180 if (Result)
181 return nullptr;
182 Result = &U;
183 }
184 }
185 return Result;
186}
187
188User *Value::getUniqueUndroppableUser() {
189 User *Result = nullptr;
190 for (auto *U : users()) {
191 if (!U->isDroppable()) {
192 if (Result && Result != U)
193 return nullptr;
194 Result = U;
195 }
196 }
197 return Result;
198}
199
200bool Value::hasNUndroppableUses(unsigned int N) const {
201 return hasNItems(user_begin(), user_end(), N, isUnDroppableUser);
202}
203
204bool Value::hasNUndroppableUsesOrMore(unsigned int N) const {
205 return hasNItemsOrMore(user_begin(), user_end(), N, isUnDroppableUser);
206}
207
208void Value::dropDroppableUses(
209 llvm::function_ref<bool(const Use *)> ShouldDrop) {
210 SmallVector<Use *, 8> ToBeEdited;
211 for (Use &U : uses())
212 if (U.getUser()->isDroppable() && ShouldDrop(&U))
213 ToBeEdited.push_back(&U);
214 for (Use *U : ToBeEdited)
215 dropDroppableUse(*U);
216}
217
218void Value::dropDroppableUsesIn(User &Usr) {
219 assert(Usr.isDroppable() && "Expected a droppable user!");
220 for (Use &UsrOp : Usr.operands()) {
221 if (UsrOp.get() == this)
222 dropDroppableUse(UsrOp);
223 }
224}
225
226void Value::dropDroppableUse(Use &U) {
227 if (auto *Assume = dyn_cast<AssumeInst>(U.getUser())) {
228 unsigned OpNo = U.getOperandNo();
229 if (OpNo == 0)
230 U.set(ConstantInt::getTrue(Assume->getContext()));
231 else {
232 U.set(PoisonValue::get(U.get()->getType()));
233 CallInst::BundleOpInfo &BOI = Assume->getBundleOpInfoForOperand(OpNo);
234 BOI.Tag = Assume->getContext().pImpl->getOrInsertBundleTag("ignore");
235 }
236 return;
237 }
238
239 llvm_unreachable("unknown droppable use");
240}
241
242bool Value::isUsedInBasicBlock(const BasicBlock *BB) const {
243 assert(hasUseList() && "ConstantData has no use-list");
244
245 // This can be computed either by scanning the instructions in BB, or by
246 // scanning the use list of this Value. Both lists can be very long, but
247 // usually one is quite short.
248 //
249 // Scan both lists simultaneously until one is exhausted. This limits the
250 // search to the shorter list.
251 BasicBlock::const_iterator BI = BB->begin(), BE = BB->end();
252 const_user_iterator UI = user_begin(), UE = user_end();
253 for (; BI != BE && UI != UE; ++BI, ++UI) {
254 // Scan basic block: Check if this Value is used by the instruction at BI.
255 if (is_contained(BI->operands(), this))
256 return true;
257 // Scan use list: Check if the use at UI is in BB.
258 const auto *User = dyn_cast<Instruction>(*UI);
259 if (User && User->getParent() == BB)
260 return true;
261 }
262 return false;
263}
264
265unsigned Value::getNumUses() const {
266 // TODO: Disallow for ConstantData and remove !UseList check?
267 if (!UseList)
268 return 0;
269 return (unsigned)std::distance(use_begin(), use_end());
270}
271
272static bool getSymTab(Value *V, ValueSymbolTable *&ST) {
273 ST = nullptr;
274 if (Instruction *I = dyn_cast<Instruction>(V)) {
275 if (BasicBlock *P = I->getParent())
276 if (Function *PP = P->getParent())
277 ST = PP->getValueSymbolTable();
278 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(V)) {
279 if (Function *P = BB->getParent())
280 ST = P->getValueSymbolTable();
281 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
282 if (Module *P = GV->getParent())
283 ST = &P->getValueSymbolTable();
284 } else if (Argument *A = dyn_cast<Argument>(V)) {
285 if (Function *P = A->getParent())
286 ST = P->getValueSymbolTable();
287 } else {
288 assert(isa<Constant>(V) && "Unknown value type!");
289 return true; // no name is setable for this.
290 }
291 return false;
292}
293
294ValueName *Value::getValueName() const {
295 if (!HasName) return nullptr;
296
297 LLVMContext &Ctx = getContext();
298 auto I = Ctx.pImpl->ValueNames.find(this);
299 assert(I != Ctx.pImpl->ValueNames.end() &&
300 "No name entry found!");
301
302 return I->second;
303}
304
305void Value::setValueName(ValueName *VN) {
306 LLVMContext &Ctx = getContext();
307
308 assert(HasName == Ctx.pImpl->ValueNames.count(this) &&
309 "HasName bit out of sync!");
310
311 if (!VN) {
312 if (HasName)
313 Ctx.pImpl->ValueNames.erase(this);
314 HasName = false;
315 return;
316 }
317
318 HasName = true;
319 Ctx.pImpl->ValueNames[this] = VN;
320}
321
322StringRef Value::getName() const {
323 // Make sure the empty string is still a C string. For historical reasons,
324 // some clients want to call .data() on the result and expect it to be null
325 // terminated.
326 if (!hasName())
327 return StringRef("", 0);
328 return getValueName()->getKey();
329}
330
331void Value::setNameImpl(const Twine &NewName) {
332 bool NeedNewName =
333 !getContext().shouldDiscardValueNames() || isa<GlobalValue>(this);
334
335 // Fast-path: LLVMContext can be set to strip out non-GlobalValue names
336 // and there is no need to delete the old name.
337 if (!NeedNewName && !hasName())
338 return;
339
340 // Fast path for common IRBuilder case of setName("") when there is no name.
341 if (NewName.isTriviallyEmpty() && !hasName())
342 return;
343
344 SmallString<256> NameData;
345 StringRef NameRef = NeedNewName ? NewName.toStringRef(NameData) : "";
346 assert(!NameRef.contains(0) && "Null bytes are not allowed in names");
347
348 // Name isn't changing?
349 if (getName() == NameRef)
350 return;
351
352 assert(!getType()->isVoidTy() && "Cannot assign a name to void values!");
353
354 // Get the symbol table to update for this object.
355 ValueSymbolTable *ST;
356 if (getSymTab(this, ST))
357 return; // Cannot set a name on this value (e.g. constant).
358
359 ValueName *NewValueName = nullptr;
360 if (!ST) { // No symbol table to update? Just do the change.
361 if (!NameRef.empty()) {
362 // Create the new name.
363 MallocAllocator Allocator;
364 NewValueName = ValueName::create(NameRef, Allocator);
365 }
366 // NOTE: Could optimize for the case the name is shrinking to not deallocate
367 // then reallocated.
368 destroyValueName();
369
370 if (NewValueName) {
371 assert(NeedNewName);
372 setValueName(NewValueName);
373 getValueName()->setValue(this);
374 }
375 return;
376 }
377
378 if (!NameRef.empty())
379 NewValueName = ST->createValueName(NameRef, this);
380
381 // NOTE: Could optimize for the case the name is shrinking to not deallocate
382 // then reallocated.
383 if (hasName()) {
384 // Remove old name.
385 ST->removeValueName(getValueName());
386 destroyValueName();
387
388 if (NameRef.empty())
389 return;
390 }
391
392 // Name is changing to something new.
393 assert(NeedNewName && NewValueName != nullptr);
394 setValueName(NewValueName);
395}
396
397void Value::setName(const Twine &NewName) {
398 setNameImpl(NewName);
399 if (Function *F = dyn_cast<Function>(this))
400 F->updateAfterNameChange();
401}
402
403void Value::takeName(Value *V) {
404 assert(V != this && "Illegal call to this->takeName(this)!");
405 ValueSymbolTable *ST = nullptr;
406 // If this value has a name, drop it.
407 if (hasName()) {
408 // Get the symtab this is in.
409 if (getSymTab(this, ST)) {
410 // We can't set a name on this value, but we need to clear V's name if
411 // it has one.
412 if (V->hasName()) V->setName("");
413 return; // Cannot set a name on this value (e.g. constant).
414 }
415
416 // Remove old name.
417 if (ST)
418 ST->removeValueName(getValueName());
419 destroyValueName();
420 }
421
422 // Now we know that this has no name.
423
424 // If V has no name either, we're done.
425 if (!V->hasName()) return;
426
427 // Get this's symtab if we didn't before.
428 if (!ST) {
429 if (getSymTab(this, ST)) {
430 // Clear V's name.
431 V->setName("");
432 return; // Cannot set a name on this value (e.g. constant).
433 }
434 }
435
436 // Get V's ST, this should always succeed, because V has a name.
437 ValueSymbolTable *VST;
438 bool Failure = getSymTab(V, VST);
439 assert(!Failure && "V has a name, so it should have a ST!"); (void)Failure;
440
441 // If these values are both in the same symtab, we can do this very fast.
442 // This works even if both values have no symtab yet.
443 if (ST == VST) {
444 // Take the name!
445 setValueName(V->getValueName());
446 V->setValueName(nullptr);
447 getValueName()->setValue(this);
448 return;
449 }
450
451 // Otherwise, things are slightly more complex. Remove V's name from VST and
452 // then reinsert it into ST.
453
454 if (VST)
455 VST->removeValueName(V->getValueName());
456 setValueName(V->getValueName());
457 V->setValueName(nullptr);
458 getValueName()->setValue(this);
459
460 if (ST)
461 ST->reinsertValue(this);
462}
463
464std::string Value::getNameOrAsOperand() const {
465 if (!getName().empty())
466 return std::string(getName());
467
468 std::string BBName;
469 raw_string_ostream OS(BBName);
470 printAsOperand(OS, false);
471 return OS.str();
472}
473
474void Value::assertModuleIsMaterializedImpl() const {
475#ifndef NDEBUG
476 const GlobalValue *GV = dyn_cast<GlobalValue>(this);
477 if (!GV)
478 return;
479 const Module *M = GV->getParent();
480 if (!M)
481 return;
482 assert(M->isMaterialized());
483#endif
484}
485
486#ifndef NDEBUG
487static bool contains(SmallPtrSetImpl<ConstantExpr *> &Cache, ConstantExpr *Expr,
488 Constant *C) {
489 if (!Cache.insert(Expr).second)
490 return false;
491
492 for (auto &O : Expr->operands()) {
493 if (O == C)
494 return true;
495 auto *CE = dyn_cast<ConstantExpr>(O);
496 if (!CE)
497 continue;
498 if (contains(Cache, CE, C))
499 return true;
500 }
501 return false;
502}
503
504static bool contains(Value *Expr, Value *V) {
505 if (Expr == V)
506 return true;
507
508 auto *C = dyn_cast<Constant>(V);
509 if (!C)
510 return false;
511
512 auto *CE = dyn_cast<ConstantExpr>(Expr);
513 if (!CE)
514 return false;
515
516 SmallPtrSet<ConstantExpr *, 4> Cache;
517 return contains(Cache, CE, C);
518}
519#endif // NDEBUG
520
521void Value::doRAUW(Value *New, ReplaceMetadataUses ReplaceMetaUses) {
522 assert(hasUseList() && "Cannot replace constant data");
523 assert(New && "Value::replaceAllUsesWith(<null>) is invalid!");
524 assert(!contains(New, this) &&
525 "this->replaceAllUsesWith(expr(this)) is NOT valid!");
526 assert(New->getType() == getType() &&
527 "replaceAllUses of value with new value of different type!");
528
529 // Notify all ValueHandles (if present) that this value is going away.
530 if (HasValueHandle)
531 ValueHandleBase::ValueIsRAUWd(this, New);
532 if (ReplaceMetaUses == ReplaceMetadataUses::Yes && isUsedByMetadata())
533 ValueAsMetadata::handleRAUW(this, New);
534
535 while (!materialized_use_empty()) {
536 Use &U = *UseList;
537 // Must handle Constants specially, we cannot call replaceUsesOfWith on a
538 // constant because they are uniqued.
539 if (auto *C = dyn_cast<Constant>(U.getUser())) {
540 if (!isa<GlobalValue>(C)) {
541 C->handleOperandChange(this, New);
542 continue;
543 }
544 }
545
546 U.set(New);
547 }
548
549 if (BasicBlock *BB = dyn_cast<BasicBlock>(this))
550 BB->replaceSuccessorsPhiUsesWith(cast<BasicBlock>(New));
551}
552
553void Value::replaceAllUsesWith(Value *New) {
554 doRAUW(New, ReplaceMetadataUses::Yes);
555}
556
557void Value::replaceNonMetadataUsesWith(Value *New) {
558 doRAUW(New, ReplaceMetadataUses::No);
559}
560
561bool Value::replaceUsesWithIf(Value *New,
562 llvm::function_ref<bool(Use &U)> ShouldReplace) {
563 assert(New && "Value::replaceUsesWithIf(<null>) is invalid!");
564 assert(New->getType() == getType() &&
565 "replaceUses of value with new value of different type!");
566
567 SmallVector<TrackingVH<Constant>, 8> Consts;
568 SmallPtrSet<Constant *, 8> Visited;
569
570 bool Changed = false;
571 for (Use &U : llvm::make_early_inc_range(uses())) {
572 if (!ShouldReplace(U))
573 continue;
574 Changed = true;
575
576 // Must handle Constants specially, we cannot call replaceUsesOfWith on a
577 // constant because they are uniqued.
578 if (auto *C = dyn_cast<Constant>(U.getUser())) {
579 if (!isa<GlobalValue>(C)) {
580 if (Visited.insert(C).second)
581 Consts.push_back(TrackingVH<Constant>(C));
582 continue;
583 }
584 }
585 U.set(New);
586 }
587
588 while (!Consts.empty()) {
589 // FIXME: handleOperandChange() updates all the uses in a given Constant,
590 // not just the one passed to ShouldReplace
591 Consts.pop_back_val()->handleOperandChange(this, New);
592 }
593
594 return Changed;
595}
596
597/// Replace debug record uses of MetadataAsValue(ValueAsMetadata(V)) outside BB
598/// with New.
599static void replaceDbgUsesOutsideBlock(Value *V, Value *New, BasicBlock *BB) {
600 SmallVector<DbgVariableRecord *> DPUsers;
601 findDbgUsers(V, DPUsers);
602 for (auto *DVR : DPUsers) {
603 DbgMarker *Marker = DVR->getMarker();
604 if (Marker->getParent() != BB)
605 DVR->replaceVariableLocationOp(V, New);
606 }
607}
608
609// Like replaceAllUsesWith except it does not handle constants or basic blocks.
610// This routine leaves uses within BB.
611void Value::replaceUsesOutsideBlock(Value *New, BasicBlock *BB) {
612 assert(New && "Value::replaceUsesOutsideBlock(<null>, BB) is invalid!");
613 assert(!contains(New, this) &&
614 "this->replaceUsesOutsideBlock(expr(this), BB) is NOT valid!");
615 assert(New->getType() == getType() &&
616 "replaceUses of value with new value of different type!");
617 assert(BB && "Basic block that may contain a use of 'New' must be defined\n");
618
619 replaceDbgUsesOutsideBlock(this, New, BB);
620 replaceUsesWithIf(New, [BB](Use &U) {
621 auto *I = dyn_cast<Instruction>(U.getUser());
622 // Don't replace if it's an instruction in the BB basic block.
623 return !I || I->getParent() != BB;
624 });
625}
626
627namespace {
628// Various metrics for how much to strip off of pointers.
629enum PointerStripKind {
630 PSK_ZeroIndices,
631 PSK_ZeroIndicesAndAliases,
632 PSK_ZeroIndicesSameRepresentation,
633 PSK_ForAliasAnalysis,
634 PSK_InBoundsConstantIndices,
635 PSK_InBounds
636};
637} // end anonymous namespace
638
639template <PointerStripKind StripKind> static void NoopCallback(const Value *) {}
640
641template <PointerStripKind StripKind>
643 const Value *V,
644 function_ref<void(const Value *)> Func = NoopCallback<StripKind>) {
645 if (!V->getType()->isPointerTy())
646 return V;
647
648 // Even though we don't look through PHI nodes, we could be called on an
649 // instruction in an unreachable block, which may be on a cycle.
650 SmallPtrSet<const Value *, 4> Visited;
651
652 Visited.insert(V);
653 do {
654 Func(V);
655 if (auto *GEP = dyn_cast<GEPOperator>(V)) {
656 switch (StripKind) {
657 case PSK_ZeroIndices:
658 case PSK_ZeroIndicesAndAliases:
659 case PSK_ZeroIndicesSameRepresentation:
660 case PSK_ForAliasAnalysis:
661 if (!GEP->hasAllZeroIndices())
662 return V;
663 break;
664 case PSK_InBoundsConstantIndices:
665 if (!GEP->hasAllConstantIndices())
666 return V;
667 [[fallthrough]];
668 case PSK_InBounds:
669 if (!GEP->isInBounds())
670 return V;
671 break;
672 }
673 V = GEP->getPointerOperand();
674 } else if (Operator::getOpcode(V) == Instruction::BitCast) {
675 Value *NewV = cast<Operator>(V)->getOperand(0);
676 if (!NewV->getType()->isPointerTy())
677 return V;
678 V = NewV;
679 } else if (StripKind != PSK_ZeroIndicesSameRepresentation &&
680 Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
681 // TODO: If we know an address space cast will not change the
682 // representation we could look through it here as well.
683 V = cast<Operator>(V)->getOperand(0);
684 } else if (StripKind == PSK_ZeroIndicesAndAliases && isa<GlobalAlias>(V)) {
685 V = cast<GlobalAlias>(V)->getAliasee();
686 } else if (StripKind == PSK_ForAliasAnalysis && isa<PHINode>(V) &&
687 cast<PHINode>(V)->getNumIncomingValues() == 1) {
688 V = cast<PHINode>(V)->getIncomingValue(0);
689 } else {
690 if (const auto *Call = dyn_cast<CallBase>(V)) {
691 if (const Value *RV = Call->getReturnedArgOperand()) {
692 V = RV;
693 continue;
694 }
695 // The result of launder.invariant.group must alias it's argument,
696 // but it can't be marked with returned attribute, that's why it needs
697 // special case.
698 if (StripKind == PSK_ForAliasAnalysis &&
699 (Call->getIntrinsicID() == Intrinsic::launder_invariant_group ||
700 Call->getIntrinsicID() == Intrinsic::strip_invariant_group)) {
701 V = Call->getArgOperand(0);
702 continue;
703 }
704 }
705 return V;
706 }
707 assert(V->getType()->isPointerTy() && "Unexpected operand type!");
708 } while (Visited.insert(V).second);
709
710 return V;
711}
712
713const Value *Value::stripPointerCasts() const {
715}
716
717const Value *Value::stripPointerCastsAndAliases() const {
719}
720
721const Value *Value::stripPointerCastsSameRepresentation() const {
723}
724
725const Value *Value::stripInBoundsConstantOffsets() const {
727}
728
729const Value *Value::stripPointerCastsForAliasAnalysis() const {
731}
732
733const Value *Value::stripAndAccumulateConstantOffsets(
734 const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
735 bool AllowInvariantGroup,
736 function_ref<bool(Value &, APInt &)> ExternalAnalysis,
737 bool LookThroughIntToPtr) const {
738 if (!getType()->isPtrOrPtrVectorTy())
739 return this;
740
741 unsigned BitWidth = Offset.getBitWidth();
742 assert(BitWidth == DL.getIndexTypeSizeInBits(getType()) &&
743 "The offset bit width does not match the DL specification.");
744
745 // Even though we don't look through PHI nodes, we could be called on an
746 // instruction in an unreachable block, which may be on a cycle.
747 SmallPtrSet<const Value *, 4> Visited;
748 Visited.insert(this);
749 const Value *V = this;
750 do {
751 if (auto *GEP = dyn_cast<GEPOperator>(V)) {
752 // If in-bounds was requested, we do not strip non-in-bounds GEPs.
753 if (!AllowNonInbounds && !GEP->isInBounds())
754 return V;
755
756 // If one of the values we have visited is an addrspacecast, then
757 // the pointer type of this GEP may be different from the type
758 // of the Ptr parameter which was passed to this function. This
759 // means when we construct GEPOffset, we need to use the size
760 // of GEP's pointer type rather than the size of the original
761 // pointer type.
762 APInt GEPOffset(DL.getIndexTypeSizeInBits(V->getType()), 0);
763 if (!GEP->accumulateConstantOffset(DL, GEPOffset, ExternalAnalysis))
764 return V;
765
766 // Stop traversal if the pointer offset wouldn't fit in the bit-width
767 // provided by the Offset argument. This can happen due to AddrSpaceCast
768 // stripping.
769 if (GEPOffset.getSignificantBits() > BitWidth)
770 return V;
771
772 // External Analysis can return a result higher/lower than the value
773 // represents. We need to detect overflow/underflow.
774 APInt GEPOffsetST = GEPOffset.sextOrTrunc(BitWidth);
775 if (!ExternalAnalysis) {
776 Offset += GEPOffsetST;
777 } else {
778 bool Overflow = false;
779 APInt OldOffset = Offset;
780 Offset = Offset.sadd_ov(GEPOffsetST, Overflow);
781 if (Overflow) {
782 Offset = std::move(OldOffset);
783 return V;
784 }
785 }
786 V = GEP->getPointerOperand();
787 } else if (Operator::getOpcode(V) == Instruction::BitCast ||
788 Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
789 V = cast<Operator>(V)->getOperand(0);
790 } else if (auto *GA = dyn_cast<GlobalAlias>(V)) {
791 if (!GA->isInterposable())
792 V = GA->getAliasee();
793 } else if (const auto *Call = dyn_cast<CallBase>(V)) {
794 if (const Value *RV = Call->getReturnedArgOperand())
795 V = RV;
796 if (AllowInvariantGroup && Call->isLaunderOrStripInvariantGroup())
797 V = Call->getArgOperand(0);
798 } else if (auto *Int2Ptr = dyn_cast<Operator>(V)) {
799 // Try to accumulate across (inttoptr (add (ptrtoint p), off)).
800 if (!AllowNonInbounds || !LookThroughIntToPtr || !Int2Ptr ||
801 Int2Ptr->getOpcode() != Instruction::IntToPtr ||
802 Int2Ptr->getOperand(0)->getType()->getScalarSizeInBits() != BitWidth)
803 return V;
804
805 auto *Add = dyn_cast<AddOperator>(Int2Ptr->getOperand(0));
806 if (!Add)
807 return V;
808
809 auto *Ptr2Int = dyn_cast<PtrToIntOperator>(Add->getOperand(0));
810 auto *CI = dyn_cast<ConstantInt>(Add->getOperand(1));
811 if (!Ptr2Int || !CI)
812 return V;
813
814 Offset += CI->getValue();
815 V = Ptr2Int->getOperand(0);
816 }
817 assert(V->getType()->isPtrOrPtrVectorTy() && "Unexpected operand type!");
818 } while (Visited.insert(V).second);
819
820 return V;
821}
822
823const Value *
824Value::stripInBoundsOffsets(function_ref<void(const Value *)> Func) const {
826}
827
828bool Value::canBeFreed() const {
830
831 // Cases that can simply never be deallocated
832 // *) Constants aren't allocated per se, thus not deallocated either.
833 if (isa<Constant>(this))
834 return false;
835
836 // Handle byval/byref/sret/inalloca/preallocated arguments. The storage
837 // lifetime is guaranteed to be longer than the callee's lifetime.
838 if (auto *A = dyn_cast<Argument>(this)) {
839 if (A->hasPointeeInMemoryValueAttr())
840 return false;
841 // A pointer to an object in a function which neither frees, nor can arrange
842 // for another thread to free on its behalf, can not be freed in the scope
843 // of the function. Note that this logic is restricted to memory
844 // allocations in existance before the call; a nofree function *is* allowed
845 // to free memory it allocated.
846 const Function *F = A->getParent();
847 if (F->doesNotFreeMemory() && F->hasNoSync())
848 return false;
849 }
850
851 if (isa<IntToPtrInst>(this) && getMetadata(LLVMContext::MD_nofree))
852 return false;
853
854 const Function *F = nullptr;
855 if (auto *I = dyn_cast<Instruction>(this))
856 F = I->getFunction();
857 if (auto *A = dyn_cast<Argument>(this))
858 F = A->getParent();
859
860 if (!F)
861 return true;
862
863 // With garbage collection, deallocation typically occurs solely at or after
864 // safepoints. If we're compiling for a collector which uses the
865 // gc.statepoint infrastructure, safepoints aren't explicitly present
866 // in the IR until after lowering from abstract to physical machine model.
867 // The collector could chose to mix explicit deallocation and gc'd objects
868 // which is why we need the explicit opt in on a per collector basis.
869 if (!F->hasGC())
870 return true;
871
872 const auto &GCName = F->getGC();
873 if (GCName == "statepoint-example") {
874 auto *PT = cast<PointerType>(this->getType());
875 if (PT->getAddressSpace() != 1)
876 // For the sake of this example GC, we arbitrarily pick addrspace(1) as
877 // our GC managed heap. This must match the same check in
878 // RewriteStatepointsForGC (and probably needs better factored.)
879 return true;
880
881 // It is cheaper to scan for a declaration than to scan for a use in this
882 // function. Note that gc.statepoint is a type overloaded function so the
883 // usual trick of requesting declaration of the intrinsic from the module
884 // doesn't work.
885 for (auto &Fn : *F->getParent())
886 if (Fn.getIntrinsicID() == Intrinsic::experimental_gc_statepoint)
887 return true;
888 return false;
889 }
890 return true;
891}
892
893uint64_t Value::getPointerDereferenceableBytes(const DataLayout &DL,
894 bool &CanBeNull,
895 bool &CanBeFreed) const {
896 assert(getType()->isPointerTy() && "must be pointer");
897
898 uint64_t DerefBytes = 0;
899 CanBeNull = false;
900 CanBeFreed = UseDerefAtPointSemantics && canBeFreed();
901 if (const Argument *A = dyn_cast<Argument>(this)) {
902 DerefBytes = A->getDereferenceableBytes();
903 if (DerefBytes == 0) {
904 // Handle byval/byref/inalloca/preallocated arguments
905 if (Type *ArgMemTy = A->getPointeeInMemoryValueType()) {
906 if (ArgMemTy->isSized()) {
907 // FIXME: Why isn't this the type alloc size?
908 DerefBytes = DL.getTypeStoreSize(ArgMemTy).getKnownMinValue();
909 }
910 }
911 }
912
913 if (DerefBytes == 0) {
914 DerefBytes = A->getDereferenceableOrNullBytes();
915 CanBeNull = true;
916 }
917 } else if (const auto *Call = dyn_cast<CallBase>(this)) {
918 DerefBytes = Call->getRetDereferenceableBytes();
919 if (DerefBytes == 0) {
920 DerefBytes = Call->getRetDereferenceableOrNullBytes();
921 CanBeNull = true;
922 }
923 } else if (const LoadInst *LI = dyn_cast<LoadInst>(this)) {
924 if (MDNode *MD = LI->getMetadata(LLVMContext::MD_dereferenceable)) {
925 ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
926 DerefBytes = CI->getLimitedValue();
927 }
928 if (DerefBytes == 0) {
929 if (MDNode *MD =
930 LI->getMetadata(LLVMContext::MD_dereferenceable_or_null)) {
931 ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
932 DerefBytes = CI->getLimitedValue();
933 }
934 CanBeNull = true;
935 }
936 } else if (auto *IP = dyn_cast<IntToPtrInst>(this)) {
937 if (MDNode *MD = IP->getMetadata(LLVMContext::MD_dereferenceable)) {
938 ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
939 DerefBytes = CI->getLimitedValue();
940 }
941 if (DerefBytes == 0) {
942 if (MDNode *MD =
943 IP->getMetadata(LLVMContext::MD_dereferenceable_or_null)) {
944 ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
945 DerefBytes = CI->getLimitedValue();
946 }
947 CanBeNull = true;
948 }
949 } else if (auto *AI = dyn_cast<AllocaInst>(this)) {
950 if (std::optional<TypeSize> Size = AI->getAllocationSize(DL)) {
951 DerefBytes = Size->getKnownMinValue();
952 CanBeNull = false;
953 CanBeFreed = false;
954 }
955 } else if (auto *GV = dyn_cast<GlobalVariable>(this)) {
956 if (GV->getValueType()->isSized() && !GV->hasExternalWeakLinkage()) {
957 // TODO: Don't outright reject hasExternalWeakLinkage but set the
958 // CanBeNull flag.
959 DerefBytes = DL.getTypeStoreSize(GV->getValueType()).getFixedValue();
960 CanBeNull = false;
961 CanBeFreed = false;
962 }
963 }
964 return DerefBytes;
965}
966
967Align Value::getPointerAlignment(const DataLayout &DL) const {
968 assert(getType()->isPointerTy() && "must be pointer");
969 if (const Function *F = dyn_cast<Function>(this)) {
970 Align FunctionPtrAlign = DL.getFunctionPtrAlign().valueOrOne();
971 switch (DL.getFunctionPtrAlignType()) {
972 case DataLayout::FunctionPtrAlignType::Independent:
973 return FunctionPtrAlign;
974 case DataLayout::FunctionPtrAlignType::MultipleOfFunctionAlign:
975 return std::max(FunctionPtrAlign, F->getAlign().valueOrOne());
976 }
977 llvm_unreachable("Unhandled FunctionPtrAlignType");
978 } else if (auto *GVar = dyn_cast<GlobalVariable>(this)) {
979 const MaybeAlign Alignment(GVar->getAlign());
980 if (!Alignment) {
981 Type *ObjectType = GVar->getValueType();
982 if (ObjectType->isSized()) {
983 // If the object is defined in the current Module, we'll be giving
984 // it the preferred alignment. Otherwise, we have to assume that it
985 // may only have the minimum ABI alignment.
986 if (GVar->isStrongDefinitionForLinker())
987 return DL.getPreferredAlign(GVar);
988 else
989 return DL.getABITypeAlign(ObjectType);
990 }
991 }
992 return Alignment.valueOrOne();
993 } else if (const Argument *A = dyn_cast<Argument>(this)) {
994 const MaybeAlign Alignment = A->getParamAlign();
995 if (!Alignment && A->hasStructRetAttr()) {
996 // An sret parameter has at least the ABI alignment of the return type.
997 Type *EltTy = A->getParamStructRetType();
998 if (EltTy->isSized())
999 return DL.getABITypeAlign(EltTy);
1000 }
1001 return Alignment.valueOrOne();
1002 } else if (const AllocaInst *AI = dyn_cast<AllocaInst>(this)) {
1003 return AI->getAlign();
1004 } else if (const auto *Call = dyn_cast<CallBase>(this)) {
1005 MaybeAlign Alignment = Call->getRetAlign();
1006 if (!Alignment && Call->getCalledFunction())
1007 Alignment = Call->getCalledFunction()->getAttributes().getRetAlignment();
1008 return Alignment.valueOrOne();
1009 } else if (const LoadInst *LI = dyn_cast<LoadInst>(this)) {
1010 if (MDNode *MD = LI->getMetadata(LLVMContext::MD_align)) {
1011 ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
1012 return Align(CI->getLimitedValue());
1013 }
1014 } else if (auto *CE = dyn_cast<ConstantExpr>(this)) {
1015 // Determine the alignment of inttoptr(C).
1016 if (CE->getOpcode() == Instruction::IntToPtr &&
1017 isa<ConstantInt>(CE->getOperand(0))) {
1018 ConstantInt *IntPtr = cast<ConstantInt>(CE->getOperand(0));
1019 size_t TrailingZeros = IntPtr->getValue().countr_zero();
1020 // While the actual alignment may be large, elsewhere we have
1021 // an arbitrary upper alignmet limit, so let's clamp to it.
1022 return Align(TrailingZeros < Value::MaxAlignmentExponent
1023 ? uint64_t(1) << TrailingZeros
1024 : Value::MaximumAlignment);
1025 }
1026 }
1027 return Align(1);
1028}
1029
1030static std::optional<int64_t>
1031getOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, const DataLayout &DL) {
1032 // Skip over the first indices.
1034 for (unsigned i = 1; i != Idx; ++i, ++GTI)
1035 /*skip along*/;
1036
1037 // Compute the offset implied by the rest of the indices.
1038 int64_t Offset = 0;
1039 for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
1040 ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
1041 if (!OpC)
1042 return std::nullopt;
1043 if (OpC->isZero())
1044 continue; // No offset.
1045
1046 // Handle struct indices, which add their field offset to the pointer.
1047 if (StructType *STy = GTI.getStructTypeOrNull()) {
1048 Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
1049 continue;
1050 }
1051
1052 // Otherwise, we have a sequential type like an array or fixed-length
1053 // vector. Multiply the index by the ElementSize.
1054 TypeSize Size = GTI.getSequentialElementStride(DL);
1055 if (Size.isScalable())
1056 return std::nullopt;
1057 Offset += Size.getFixedValue() * OpC->getSExtValue();
1058 }
1059
1060 return Offset;
1061}
1062
1063std::optional<int64_t> Value::getPointerOffsetFrom(const Value *Other,
1064 const DataLayout &DL) const {
1065 const Value *Ptr1 = Other;
1066 const Value *Ptr2 = this;
1067 APInt Offset1(DL.getIndexTypeSizeInBits(Ptr1->getType()), 0);
1068 APInt Offset2(DL.getIndexTypeSizeInBits(Ptr2->getType()), 0);
1069 Ptr1 = Ptr1->stripAndAccumulateConstantOffsets(DL, Offset1, true);
1070 Ptr2 = Ptr2->stripAndAccumulateConstantOffsets(DL, Offset2, true);
1071
1072 // Handle the trivial case first.
1073 if (Ptr1 == Ptr2)
1074 return Offset2.getSExtValue() - Offset1.getSExtValue();
1075
1076 const GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1);
1077 const GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2);
1078
1079 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
1080 // base. After that base, they may have some number of common (and
1081 // potentially variable) indices. After that they handle some constant
1082 // offset, which determines their offset from each other. At this point, we
1083 // handle no other case.
1084 if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0) ||
1085 GEP1->getSourceElementType() != GEP2->getSourceElementType())
1086 return std::nullopt;
1087
1088 // Skip any common indices and track the GEP types.
1089 unsigned Idx = 1;
1090 for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
1091 if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
1092 break;
1093
1094 auto IOffset1 = getOffsetFromIndex(GEP1, Idx, DL);
1095 auto IOffset2 = getOffsetFromIndex(GEP2, Idx, DL);
1096 if (!IOffset1 || !IOffset2)
1097 return std::nullopt;
1098 return *IOffset2 - *IOffset1 + Offset2.getSExtValue() -
1099 Offset1.getSExtValue();
1100}
1101
1102const Value *Value::DoPHITranslation(const BasicBlock *CurBB,
1103 const BasicBlock *PredBB) const {
1104 auto *PN = dyn_cast<PHINode>(this);
1105 if (PN && PN->getParent() == CurBB)
1106 return PN->getIncomingValueForBlock(PredBB);
1107 return this;
1108}
1109
1110void Value::reverseUseList() {
1111 if (!UseList || !UseList->Next)
1112 // No need to reverse 0 or 1 uses.
1113 return;
1114
1115 Use *Head = UseList;
1116 Use *Current = UseList->Next;
1117 Head->Next = nullptr;
1118 while (Current) {
1119 Use *Next = Current->Next;
1120 Current->Next = Head;
1121 Head->Prev = &Current->Next;
1122 Head = Current;
1123 Current = Next;
1124 }
1125 UseList = Head;
1126 Head->Prev = &UseList;
1127}
1128
1129bool Value::isSwiftError() const {
1130 auto *Arg = dyn_cast<Argument>(this);
1131 if (Arg)
1132 return Arg->hasSwiftErrorAttr();
1133 auto *Alloca = dyn_cast<AllocaInst>(this);
1134 if (!Alloca)
1135 return false;
1136 return Alloca->isSwiftError();
1137}
1138
1139//===----------------------------------------------------------------------===//
1140// ValueHandleBase Class
1141//===----------------------------------------------------------------------===//
1142
1143void ValueHandleBase::AddToExistingUseList(ValueHandleBase **List) {
1144 assert(List && "Handle list is null?");
1145
1146 // Splice ourselves into the list.
1147 Next = *List;
1148 *List = this;
1149 setPrevPtr(List);
1150 if (Next) {
1151 Next->setPrevPtr(&Next);
1152 assert(getValPtr() == Next->getValPtr() && "Added to wrong list?");
1153 }
1154}
1155
1156void ValueHandleBase::AddToExistingUseListAfter(ValueHandleBase *List) {
1157 assert(List && "Must insert after existing node");
1158
1159 Next = List->Next;
1160 setPrevPtr(&List->Next);
1161 List->Next = this;
1162 if (Next)
1163 Next->setPrevPtr(&Next);
1164}
1165
1166void ValueHandleBase::AddToUseList() {
1167 assert(getValPtr() && "Null pointer doesn't have a use list!");
1168
1169 LLVMContextImpl *pImpl = getValPtr()->getContext().pImpl;
1170
1171 if (getValPtr()->HasValueHandle) {
1172 // If this value already has a ValueHandle, then it must be in the
1173 // ValueHandles map already.
1174 ValueHandleBase *&Entry = pImpl->ValueHandles[getValPtr()];
1175 assert(Entry && "Value doesn't have any handles?");
1176 AddToExistingUseList(&Entry);
1177 return;
1178 }
1179
1180 // Ok, it doesn't have any handles yet, so we must insert it into the
1181 // DenseMap. However, doing this insertion could cause the DenseMap to
1182 // reallocate itself, which would invalidate all of the PrevP pointers that
1183 // point into the old table. Handle this by checking for reallocation and
1184 // updating the stale pointers only if needed.
1185 DenseMap<Value*, ValueHandleBase*> &Handles = pImpl->ValueHandles;
1186 const void *OldBucketPtr = Handles.getPointerIntoBucketsArray();
1187
1188 ValueHandleBase *&Entry = Handles[getValPtr()];
1189 assert(!Entry && "Value really did already have handles?");
1190 AddToExistingUseList(&Entry);
1191 getValPtr()->HasValueHandle = true;
1192
1193 // If reallocation didn't happen or if this was the first insertion, don't
1194 // walk the table.
1195 if (Handles.isPointerIntoBucketsArray(OldBucketPtr) ||
1196 Handles.size() == 1) {
1197 return;
1198 }
1199
1200 // Okay, reallocation did happen. Fix the Prev Pointers.
1201 for (DenseMap<Value*, ValueHandleBase*>::iterator I = Handles.begin(),
1202 E = Handles.end(); I != E; ++I) {
1203 assert(I->second && I->first == I->second->getValPtr() &&
1204 "List invariant broken!");
1205 I->second->setPrevPtr(&I->second);
1206 }
1207}
1208
1209void ValueHandleBase::RemoveFromUseList() {
1210 assert(getValPtr() && getValPtr()->HasValueHandle &&
1211 "Pointer doesn't have a use list!");
1212
1213 // Unlink this from its use list.
1214 ValueHandleBase **PrevPtr = getPrevPtr();
1215 assert(*PrevPtr == this && "List invariant broken");
1216
1217 *PrevPtr = Next;
1218 if (Next) {
1219 assert(Next->getPrevPtr() == &Next && "List invariant broken");
1220 Next->setPrevPtr(PrevPtr);
1221 return;
1222 }
1223
1224 // If the Next pointer was null, then it is possible that this was the last
1225 // ValueHandle watching VP. If so, delete its entry from the ValueHandles
1226 // map.
1227 LLVMContextImpl *pImpl = getValPtr()->getContext().pImpl;
1228 DenseMap<Value*, ValueHandleBase*> &Handles = pImpl->ValueHandles;
1229 if (Handles.isPointerIntoBucketsArray(PrevPtr)) {
1230 Handles.erase(getValPtr());
1231 getValPtr()->HasValueHandle = false;
1232 }
1233}
1234
1235void ValueHandleBase::ValueIsDeleted(Value *V) {
1236 assert(V->HasValueHandle && "Should only be called if ValueHandles present");
1237
1238 // Get the linked list base, which is guaranteed to exist since the
1239 // HasValueHandle flag is set.
1240 LLVMContextImpl *pImpl = V->getContext().pImpl;
1241 ValueHandleBase *Entry = pImpl->ValueHandles[V];
1242 assert(Entry && "Value bit set but no entries exist");
1243
1244 // We use a local ValueHandleBase as an iterator so that ValueHandles can add
1245 // and remove themselves from the list without breaking our iteration. This
1246 // is not really an AssertingVH; we just have to give ValueHandleBase a kind.
1247 // Note that we deliberately do not the support the case when dropping a value
1248 // handle results in a new value handle being permanently added to the list
1249 // (as might occur in theory for CallbackVH's): the new value handle will not
1250 // be processed and the checking code will mete out righteous punishment if
1251 // the handle is still present once we have finished processing all the other
1252 // value handles (it is fine to momentarily add then remove a value handle).
1253 for (ValueHandleBase Iterator(Assert, *Entry); Entry; Entry = Iterator.Next) {
1254 Iterator.RemoveFromUseList();
1255 Iterator.AddToExistingUseListAfter(Entry);
1256 assert(Entry->Next == &Iterator && "Loop invariant broken.");
1257
1258 switch (Entry->getKind()) {
1259 case Assert:
1260 break;
1261 case Weak:
1262 case WeakTracking:
1263 // WeakTracking and Weak just go to null, which unlinks them
1264 // from the list.
1265 Entry->operator=(nullptr);
1266 break;
1267 case Callback:
1268 // Forward to the subclass's implementation.
1269 static_cast<CallbackVH*>(Entry)->deleted();
1270 break;
1271 }
1272 }
1273
1274 // All callbacks, weak references, and assertingVHs should be dropped by now.
1275 if (V->HasValueHandle) {
1276#ifndef NDEBUG // Only in +Asserts mode...
1277 dbgs() << "While deleting: " << *V->getType() << " %" << V->getName()
1278 << "\n";
1279 if (pImpl->ValueHandles[V]->getKind() == Assert)
1280 llvm_unreachable("An asserting value handle still pointed to this"
1281 " value!");
1282
1283#endif
1284 llvm_unreachable("All references to V were not removed?");
1285 }
1286}
1287
1288void ValueHandleBase::ValueIsRAUWd(Value *Old, Value *New) {
1289 assert(Old->HasValueHandle &&"Should only be called if ValueHandles present");
1290 assert(Old != New && "Changing value into itself!");
1291 assert(Old->getType() == New->getType() &&
1292 "replaceAllUses of value with new value of different type!");
1293
1294 // Get the linked list base, which is guaranteed to exist since the
1295 // HasValueHandle flag is set.
1296 LLVMContextImpl *pImpl = Old->getContext().pImpl;
1297 ValueHandleBase *Entry = pImpl->ValueHandles[Old];
1298
1299 assert(Entry && "Value bit set but no entries exist");
1300
1301 // We use a local ValueHandleBase as an iterator so that
1302 // ValueHandles can add and remove themselves from the list without
1303 // breaking our iteration. This is not really an AssertingVH; we
1304 // just have to give ValueHandleBase some kind.
1305 for (ValueHandleBase Iterator(Assert, *Entry); Entry; Entry = Iterator.Next) {
1306 Iterator.RemoveFromUseList();
1307 Iterator.AddToExistingUseListAfter(Entry);
1308 assert(Entry->Next == &Iterator && "Loop invariant broken.");
1309
1310 switch (Entry->getKind()) {
1311 case Assert:
1312 case Weak:
1313 // Asserting and Weak handles do not follow RAUW implicitly.
1314 break;
1315 case WeakTracking:
1316 // Weak goes to the new value, which will unlink it from Old's list.
1317 Entry->operator=(New);
1318 break;
1319 case Callback:
1320 // Forward to the subclass's implementation.
1321 static_cast<CallbackVH*>(Entry)->allUsesReplacedWith(New);
1322 break;
1323 }
1324 }
1325
1326#ifndef NDEBUG
1327 // If any new weak value handles were added while processing the
1328 // list, then complain about it now.
1329 if (Old->HasValueHandle)
1330 for (Entry = pImpl->ValueHandles[Old]; Entry; Entry = Entry->Next)
1331 switch (Entry->getKind()) {
1332 case WeakTracking:
1333 dbgs() << "After RAUW from " << *Old->getType() << " %"
1334 << Old->getName() << " to " << *New->getType() << " %"
1335 << New->getName() << "\n";
1337 "A weak tracking value handle still pointed to the old value!\n");
1338 default:
1339 break;
1340 }
1341#endif
1342}
1343
1344// Pin the vtable to this file.
1345void CallbackVH::anchor() {}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ShadowStackGC > C("shadow-stack", "Very portable GC for uncooperative code generators")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseMap class.
Hexagon Common GEP
Module.h This file contains the declarations for the Module class.
iv users
Definition IVUsers.cpp:48
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Machine Check Debug Module
const uint64_t BitWidth
#define P(N)
if(PassOpts->AAPipeline)
static StringRef getName(Value *V)
Basic Register Allocator
static std::optional< int64_t > getOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, const DataLayout &DL)
Definition Value.cpp:1031
static void NoopCallback(const Value *)
Definition Value.cpp:639
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:487
static cl::opt< bool > UseDerefAtPointSemantics("use-dereferenceable-at-point-semantics", cl::Hidden, cl::init(false), cl::desc("Deref attributes and metadata infer facts at definition only"))
static Type * checkType(Type *Ty)
Definition Value.cpp:46
static bool getSymTab(Value *V, ValueSymbolTable *&ST)
Definition Value.cpp:272
static const Value * stripPointerCastsAndOffsets(const Value *V, function_ref< void(const Value *)> Func=NoopCallback< StripKind >)
Definition Value.cpp:642
static void replaceDbgUsesOutsideBlock(Value *V, Value *New, BasicBlock *BB)
Replace debug record uses of MetadataAsValue(ValueAsMetadata(V)) outside BB with New.
Definition Value.cpp:599
static bool isUnDroppableUser(const User *U)
Definition Value.cpp:174
This file defines the SmallString class.
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
unsigned countr_zero() const
Count the number of trailing zero bits.
Definition APInt.h:1654
LLVM_ABI void replaceSuccessorsPhiUsesWith(BasicBlock *Old, BasicBlock *New)
Update all phi nodes in this basic block's successors to refer to basic block New instead of basic bl...
iterator end()
Definition BasicBlock.h:483
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:470
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
uint64_t getLimitedValue(uint64_t Limit=~0ULL) const
getLimitedValue - If the value is smaller than the specified limit, return it, otherwise return the l...
Definition Constants.h:269
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition Constants.h:219
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
Definition Constants.h:174
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:159
LLVM_ABI const BasicBlock * getParent() const
bool erase(const KeyT &Val)
Definition DenseMap.h:330
unsigned size() const
Definition DenseMap.h:110
iterator begin()
Definition DenseMap.h:78
iterator end()
Definition DenseMap.h:81
bool isPointerIntoBucketsArray(const void *Ptr) const
isPointerIntoBucketsArray - Return true if the specified pointer points somewhere into the DenseMap's...
Definition DenseMap.h:360
const void * getPointerIntoBucketsArray() const
getPointerIntoBucketsArray() - Return an opaque pointer into the buckets array.
Definition DenseMap.h:367
LLVM_ABI Type * getSourceElementType() const
Definition Operator.cpp:82
bool hasExternalWeakLinkage() const
Module * getParent()
Get the module that this global value is contained inside of...
Type * getValueType() const
DenseMap< const Value *, ValueName * > ValueNames
ValueHandlesTy ValueHandles
LLVMContextImpl *const pImpl
Definition LLVMContext.h:70
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
void push_back(const T &Elt)
static StringMapEntry * create(StringRef key, AllocatorTy &allocator, InitTy &&...initVals)
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:140
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition StringRef.h:446
bool isTriviallyEmpty() const
Check if this twine is trivially empty; a false return value does not necessarily mean the twine is e...
Definition Twine.h:398
StringRef toStringRef(SmallVectorImpl< char > &Out) const
This returns the twine as a single StringRef if it can be represented as such.
Definition Twine.h:461
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:311
op_range operands()
Definition User.h:267
LLVM_ABI bool isDroppable() const
A droppable user is a user for which uses can be dropped without affecting correctness and should be ...
Definition User.cpp:119
Value * getOperand(unsigned i) const
Definition User.h:207
unsigned getNumOperands() const
Definition User.h:229
LLVM_ABI Value(Type *Ty, unsigned scid)
Definition Value.cpp:53
LLVM_ABI ~Value()
Value's destructor should be virtual by design, but that would require that Value and all of its subc...
Definition Value.cpp:76
TypeSize getSequentialElementStride(const DataLayout &DL) const
CallInst * Call
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
@ Entry
Definition COFF.h:862
@ CE
Windows NT (Windows on ARM)
Definition MCAsmInfo.h:48
initializer< Ty > init(const Ty &Val)
@ Assume
Do not drop type tests (default).
llvm::unique_function< void(llvm::Expected< T >)> Callback
A Callback<T> is a void function that accepts Expected<T>.
Definition Transport.h:139
@ User
could "use" a pointer
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
NodeAddr< FuncNode * > Func
Definition RDFGraph.h:393
bool empty() const
Definition BasicBlock.h:101
Context & getContext() const
Definition BasicBlock.h:99
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
StringMapEntry< Value * > ValueName
Definition Value.h:56
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:634
bool hasNItemsOrMore(IterTy &&Begin, IterTy &&End, unsigned N, Pred &&ShouldBeCounted=[](const decltype(*std::declval< IterTy >()) &) { return true;}, std::enable_if_t< !std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< std::remove_reference_t< decltype(Begin)> >::iterator_category >::value, void > *=nullptr)
Return true if the sequence [Begin, End) has N or more items.
Definition STLExtras.h:2638
bool hasNItems(IterTy &&Begin, IterTy &&End, unsigned N, Pred &&ShouldBeCounted=[](const decltype(*std::declval< IterTy >()) &) { return true;}, std::enable_if_t< !std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< std::remove_reference_t< decltype(Begin)> >::iterator_category >::value, void > *=nullptr)
Return true if the sequence [Begin, End) has exactly N items.
Definition STLExtras.h:2613
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool isPointerTy(const Type *T)
Definition SPIRVUtils.h:364
generic_gep_type_iterator<> gep_type_iterator
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
gep_type_iterator gep_type_begin(const User *GEP)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1947
LLVM_ABI void findDbgUsers(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the debug info records describing a value.
#define N
StringMapEntry< uint32_t > * Tag
The operand bundle tag, interned by LLVMContextImpl::getOrInsertBundleTag.
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition Alignment.h:130