LLVM 23.0.0git
SafeStack.cpp
Go to the documentation of this file.
1//===- SafeStack.cpp - Safe Stack Insertion -------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass splits the stack into the safe stack (kept as-is for LLVM backend)
10// and the unsafe stack (explicitly allocated and managed through the runtime
11// support library).
12//
13// http://clang.llvm.org/docs/SafeStack.html
14//
15//===----------------------------------------------------------------------===//
16
18#include "SafeStackLayout.h"
19#include "llvm/ADT/APInt.h"
20#include "llvm/ADT/ArrayRef.h"
23#include "llvm/ADT/Statistic.h"
36#include "llvm/IR/Argument.h"
37#include "llvm/IR/Attributes.h"
39#include "llvm/IR/Constants.h"
40#include "llvm/IR/DIBuilder.h"
41#include "llvm/IR/DataLayout.h"
43#include "llvm/IR/Dominators.h"
44#include "llvm/IR/Function.h"
45#include "llvm/IR/IRBuilder.h"
47#include "llvm/IR/Instruction.h"
50#include "llvm/IR/Intrinsics.h"
51#include "llvm/IR/MDBuilder.h"
52#include "llvm/IR/Metadata.h"
53#include "llvm/IR/Module.h"
54#include "llvm/IR/Type.h"
55#include "llvm/IR/Use.h"
56#include "llvm/IR/Value.h"
58#include "llvm/Pass.h"
60#include "llvm/Support/Debug.h"
67#include <algorithm>
68#include <cassert>
69#include <cstdint>
70#include <optional>
71#include <string>
72
73using namespace llvm;
74using namespace llvm::safestack;
75
76#define DEBUG_TYPE "safe-stack"
77
78STATISTIC(NumFunctions, "Total number of functions");
79STATISTIC(NumUnsafeStackFunctions, "Number of functions with unsafe stack");
80STATISTIC(NumUnsafeStackRestorePointsFunctions,
81 "Number of functions that use setjmp or exceptions");
82
83STATISTIC(NumAllocas, "Total number of allocas");
84STATISTIC(NumUnsafeStaticAllocas, "Number of unsafe static allocas");
85STATISTIC(NumUnsafeDynamicAllocas, "Number of unsafe dynamic allocas");
86STATISTIC(NumUnsafeByValArguments, "Number of unsafe byval arguments");
87STATISTIC(NumUnsafeStackRestorePoints, "Number of setjmps and landingpads");
88
89/// Use __safestack_pointer_address even if the platform has a faster way of
90/// access safe stack pointer.
91static cl::opt<bool>
92 SafeStackUsePointerAddress("safestack-use-pointer-address",
93 cl::init(false), cl::Hidden);
94
95static cl::opt<bool> ClColoring("safe-stack-coloring",
96 cl::desc("enable safe stack coloring"),
97 cl::Hidden, cl::init(true));
98
99namespace {
100
101/// The SafeStack pass splits the stack of each function into the safe
102/// stack, which is only accessed through memory safe dereferences (as
103/// determined statically), and the unsafe stack, which contains all
104/// local variables that are accessed in ways that we can't prove to
105/// be safe.
106class SafeStack {
107 Function &F;
108 const TargetLoweringBase &TL;
109 const LibcallLoweringInfo &Libcalls;
110 const DataLayout &DL;
111 DomTreeUpdater *DTU;
112 ScalarEvolution &SE;
113
114 Type *StackPtrTy;
115 Type *AddrTy;
116 Type *Int32Ty;
117
118 Value *UnsafeStackPtr = nullptr;
119
120 /// Unsafe stack alignment. Each stack frame must ensure that the stack is
121 /// aligned to this value. We need to re-align the unsafe stack if the
122 /// alignment of any object on the stack exceeds this value.
123 ///
124 /// 16 seems like a reasonable upper bound on the alignment of objects that we
125 /// might expect to appear on the stack on most common targets.
126 static constexpr Align StackAlignment = Align::Constant<16>();
127
128 /// Return the value of the stack canary.
130
131 /// Load stack guard from the frame and check if it has changed.
132 void checkStackGuard(IRBuilder<> &IRB, Function &F, Instruction &RI,
133 AllocaInst *StackGuardSlot, Value *StackGuard);
134
135 /// Find all static allocas, dynamic allocas, return instructions and
136 /// stack restore points (exception unwind blocks and setjmp calls) in the
137 /// given function and append them to the respective vectors.
138 void findInsts(Function &F, SmallVectorImpl<AllocaInst *> &StaticAllocas,
139 SmallVectorImpl<AllocaInst *> &DynamicAllocas,
140 SmallVectorImpl<Argument *> &ByValArguments,
142 SmallVectorImpl<Instruction *> &StackRestorePoints);
143
144 /// Calculate the allocation size of a given alloca. Returns 0 if the
145 /// size can not be statically determined.
146 uint64_t getStaticAllocaAllocationSize(const AllocaInst* AI);
147
148 /// Allocate space for all static allocas in \p StaticAllocas,
149 /// replace allocas with pointers into the unsafe stack.
150 ///
151 /// \returns A pointer to the top of the unsafe stack after all unsafe static
152 /// allocas are allocated.
153 Value *moveStaticAllocasToUnsafeStack(IRBuilder<> &IRB, Function &F,
154 ArrayRef<AllocaInst *> StaticAllocas,
155 ArrayRef<Argument *> ByValArguments,
156 Instruction *BasePointer,
157 AllocaInst *StackGuardSlot);
158
159 /// Generate code to restore the stack after all stack restore points
160 /// in \p StackRestorePoints.
161 ///
162 /// \returns A local variable in which to maintain the dynamic top of the
163 /// unsafe stack if needed.
164 AllocaInst *
165 createStackRestorePoints(IRBuilder<> &IRB, Function &F,
166 ArrayRef<Instruction *> StackRestorePoints,
167 Value *StaticTop, bool NeedDynamicTop);
168
169 /// Replace all allocas in \p DynamicAllocas with code to allocate
170 /// space dynamically on the unsafe stack and store the dynamic unsafe stack
171 /// top to \p DynamicTop if non-null.
172 void moveDynamicAllocasToUnsafeStack(Function &F, Value *UnsafeStackPtr,
173 AllocaInst *DynamicTop,
174 ArrayRef<AllocaInst *> DynamicAllocas);
175
176 bool IsSafeStackAlloca(const Value *AllocaPtr, uint64_t AllocaSize);
177
178 bool IsMemIntrinsicSafe(const MemIntrinsic *MI, const Use &U,
179 const Value *AllocaPtr, uint64_t AllocaSize);
180 bool IsAccessSafe(Value *Addr, TypeSize Size, const Value *AllocaPtr,
181 uint64_t AllocaSize);
182 bool IsAccessSafe(Value *Addr, uint64_t Size, const Value *AllocaPtr,
183 uint64_t AllocaSize);
184
185 bool ShouldInlinePointerAddress(CallInst &CI);
186 void TryInlinePointerAddress();
187
188public:
189 SafeStack(Function &F, const TargetLoweringBase &TL,
190 const LibcallLoweringInfo &Libcalls, const DataLayout &DL,
192 : F(F), TL(TL), Libcalls(Libcalls), DL(DL), DTU(DTU), SE(SE),
193 StackPtrTy(DL.getAllocaPtrType(F.getContext())),
194 AddrTy(DL.getAddressType(StackPtrTy)),
195 Int32Ty(Type::getInt32Ty(F.getContext())) {}
196
197 // Run the transformation on the associated function.
198 // Returns whether the function was changed.
199 bool run();
200};
201
202uint64_t SafeStack::getStaticAllocaAllocationSize(const AllocaInst* AI) {
203 if (auto Size = AI->getAllocationSize(DL))
204 if (Size->isFixed())
205 return Size->getFixedValue();
206 return 0;
207}
208
209bool SafeStack::IsAccessSafe(Value *Addr, TypeSize AccessSize,
210 const Value *AllocaPtr, uint64_t AllocaSize) {
211 if (AccessSize.isScalable()) {
212 // In case we don't know the size at compile time we cannot verify if the
213 // access is safe.
214 return false;
215 }
216 return IsAccessSafe(Addr, AccessSize.getFixedValue(), AllocaPtr, AllocaSize);
217}
218
219bool SafeStack::IsAccessSafe(Value *Addr, uint64_t AccessSize,
220 const Value *AllocaPtr, uint64_t AllocaSize) {
221 const SCEV *AddrExpr = SE.getSCEV(Addr);
222 const auto *Base = dyn_cast<SCEVUnknown>(SE.getPointerBase(AddrExpr));
223 if (!Base || Base->getValue() != AllocaPtr) {
225 dbgs() << "[SafeStack] "
226 << (isa<AllocaInst>(AllocaPtr) ? "Alloca " : "ByValArgument ")
227 << *AllocaPtr << "\n"
228 << "SCEV " << *AddrExpr << " not directly based on alloca\n");
229 return false;
230 }
231
232 const SCEV *Expr = SE.removePointerBase(AddrExpr);
233 uint64_t BitWidth = SE.getTypeSizeInBits(Expr->getType());
234 ConstantRange AccessStartRange = SE.getUnsignedRange(Expr);
235 ConstantRange SizeRange =
236 ConstantRange(APInt(BitWidth, 0), APInt(BitWidth, AccessSize));
237 ConstantRange AccessRange = AccessStartRange.add(SizeRange);
238 ConstantRange AllocaRange =
239 ConstantRange(APInt(BitWidth, 0), APInt(BitWidth, AllocaSize));
240 bool Safe = AllocaRange.contains(AccessRange);
241
243 dbgs() << "[SafeStack] "
244 << (isa<AllocaInst>(AllocaPtr) ? "Alloca " : "ByValArgument ")
245 << *AllocaPtr << "\n"
246 << " Access " << *Addr << "\n"
247 << " SCEV " << *Expr
248 << " U: " << SE.getUnsignedRange(Expr)
249 << ", S: " << SE.getSignedRange(Expr) << "\n"
250 << " Range " << AccessRange << "\n"
251 << " AllocaRange " << AllocaRange << "\n"
252 << " " << (Safe ? "safe" : "unsafe") << "\n");
253
254 return Safe;
255}
256
257bool SafeStack::IsMemIntrinsicSafe(const MemIntrinsic *MI, const Use &U,
258 const Value *AllocaPtr,
259 uint64_t AllocaSize) {
260 if (auto MTI = dyn_cast<MemTransferInst>(MI)) {
261 if (MTI->getRawSource() != U && MTI->getRawDest() != U)
262 return true;
263 } else {
264 if (MI->getRawDest() != U)
265 return true;
266 }
267
268 auto Len = MI->getLengthInBytes();
269 // Non-constant size => unsafe. FIXME: try SCEV getRange.
270 if (!Len) return false;
271 return IsAccessSafe(U, Len->getZExtValue(), AllocaPtr, AllocaSize);
272}
273
274/// Check whether a given allocation must be put on the safe
275/// stack or not. The function analyzes all uses of AI and checks whether it is
276/// only accessed in a memory safe way (as decided statically).
277bool SafeStack::IsSafeStackAlloca(const Value *AllocaPtr, uint64_t AllocaSize) {
278 // Go through all uses of this alloca and check whether all accesses to the
279 // allocated object are statically known to be memory safe and, hence, the
280 // object can be placed on the safe stack.
281 SmallPtrSet<const Value *, 16> Visited;
282 SmallVector<const Value *, 8> WorkList;
283 WorkList.push_back(AllocaPtr);
284
285 // A DFS search through all uses of the alloca in bitcasts/PHI/GEPs/etc.
286 while (!WorkList.empty()) {
287 const Value *V = WorkList.pop_back_val();
288 for (const Use &UI : V->uses()) {
289 auto I = cast<const Instruction>(UI.getUser());
290 assert(V == UI.get());
291
292 switch (I->getOpcode()) {
293 case Instruction::Load:
294 if (!IsAccessSafe(UI, DL.getTypeStoreSize(I->getType()), AllocaPtr,
295 AllocaSize))
296 return false;
297 break;
298
299 case Instruction::VAArg:
300 // "va-arg" from a pointer is safe.
301 break;
302 case Instruction::Store:
303 if (V == I->getOperand(0)) {
304 // Stored the pointer - conservatively assume it may be unsafe.
306 << "[SafeStack] Unsafe alloca: " << *AllocaPtr
307 << "\n store of address: " << *I << "\n");
308 return false;
309 }
310
311 if (!IsAccessSafe(UI, DL.getTypeStoreSize(I->getOperand(0)->getType()),
312 AllocaPtr, AllocaSize))
313 return false;
314 break;
315
316 case Instruction::Ret:
317 // Information leak.
318 return false;
319
320 case Instruction::Call:
321 case Instruction::Invoke: {
322 const CallBase &CS = *cast<CallBase>(I);
323
324 if (I->isLifetimeStartOrEnd())
325 continue;
326
327 if (const MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
328 if (!IsMemIntrinsicSafe(MI, UI, AllocaPtr, AllocaSize)) {
330 << "[SafeStack] Unsafe alloca: " << *AllocaPtr
331 << "\n unsafe memintrinsic: " << *I << "\n");
332 return false;
333 }
334 continue;
335 }
336
337 // LLVM 'nocapture' attribute is only set for arguments whose address
338 // is not stored, passed around, or used in any other non-trivial way.
339 // We assume that passing a pointer to an object as a 'nocapture
340 // readnone' argument is safe.
341 // FIXME: a more precise solution would require an interprocedural
342 // analysis here, which would look at all uses of an argument inside
343 // the function being called.
344 auto B = CS.arg_begin(), E = CS.arg_end();
345 for (const auto *A = B; A != E; ++A)
346 if (A->get() == V)
347 if (!(CS.doesNotCapture(A - B) && (CS.doesNotAccessMemory(A - B) ||
348 CS.doesNotAccessMemory()))) {
349 LLVM_DEBUG(dbgs() << "[SafeStack] Unsafe alloca: " << *AllocaPtr
350 << "\n unsafe call: " << *I << "\n");
351 return false;
352 }
353 continue;
354 }
355
356 default:
357 if (Visited.insert(I).second)
359 }
360 }
361 }
362
363 // All uses of the alloca are safe, we can place it on the safe stack.
364 return true;
365}
366
367Value *SafeStack::getStackGuard(IRBuilder<> &IRB, Function &F) {
368 Value *StackGuardVar = TL.getIRStackGuard(IRB, Libcalls);
369 Module *M = F.getParent();
370
371 if (!StackGuardVar) {
372 TL.insertSSPDeclarations(*M, Libcalls);
373 return IRB.CreateIntrinsic(Intrinsic::stackguard, {});
374 }
375
376 return IRB.CreateLoad(StackPtrTy, StackGuardVar, "StackGuard");
377}
378
379void SafeStack::findInsts(Function &F,
380 SmallVectorImpl<AllocaInst *> &StaticAllocas,
381 SmallVectorImpl<AllocaInst *> &DynamicAllocas,
382 SmallVectorImpl<Argument *> &ByValArguments,
383 SmallVectorImpl<Instruction *> &Returns,
384 SmallVectorImpl<Instruction *> &StackRestorePoints) {
385 for (Instruction &I : instructions(&F)) {
386 if (auto AI = dyn_cast<AllocaInst>(&I)) {
387 ++NumAllocas;
388
389 uint64_t Size = getStaticAllocaAllocationSize(AI);
390 if (IsSafeStackAlloca(AI, Size))
391 continue;
392
393 if (AI->isStaticAlloca()) {
394 ++NumUnsafeStaticAllocas;
395 StaticAllocas.push_back(AI);
396 } else {
397 ++NumUnsafeDynamicAllocas;
398 DynamicAllocas.push_back(AI);
399 }
400 } else if (auto RI = dyn_cast<ReturnInst>(&I)) {
401 if (CallInst *CI = I.getParent()->getTerminatingMustTailCall())
402 Returns.push_back(CI);
403 else
404 Returns.push_back(RI);
405 } else if (auto CI = dyn_cast<CallInst>(&I)) {
406 // setjmps require stack restore.
407 if (CI->getCalledFunction() && CI->canReturnTwice())
408 StackRestorePoints.push_back(CI);
409 } else if (auto LP = dyn_cast<LandingPadInst>(&I)) {
410 // Exception landing pads require stack restore.
411 StackRestorePoints.push_back(LP);
412 } else if (auto II = dyn_cast<IntrinsicInst>(&I)) {
413 if (II->getIntrinsicID() == Intrinsic::gcroot)
415 "gcroot intrinsic not compatible with safestack attribute");
416 }
417 }
418 for (Argument &Arg : F.args()) {
419 if (!Arg.hasByValAttr())
420 continue;
421 uint64_t Size = DL.getTypeStoreSize(Arg.getParamByValType());
422 if (IsSafeStackAlloca(&Arg, Size))
423 continue;
424
425 ++NumUnsafeByValArguments;
426 ByValArguments.push_back(&Arg);
427 }
428}
429
430AllocaInst *
431SafeStack::createStackRestorePoints(IRBuilder<> &IRB, Function &F,
432 ArrayRef<Instruction *> StackRestorePoints,
433 Value *StaticTop, bool NeedDynamicTop) {
434 assert(StaticTop && "The stack top isn't set.");
435
436 if (StackRestorePoints.empty())
437 return nullptr;
438
439 // We need the current value of the shadow stack pointer to restore
440 // after longjmp or exception catching.
441
442 // FIXME: On some platforms this could be handled by the longjmp/exception
443 // runtime itself.
444
445 AllocaInst *DynamicTop = nullptr;
446 if (NeedDynamicTop) {
447 // If we also have dynamic alloca's, the stack pointer value changes
448 // throughout the function. For now we store it in an alloca.
449 DynamicTop = IRB.CreateAlloca(StackPtrTy, /*ArraySize=*/nullptr,
450 "unsafe_stack_dynamic_ptr");
451 IRB.CreateStore(StaticTop, DynamicTop);
452 }
453
454 // Restore current stack pointer after longjmp/exception catch.
455 for (Instruction *I : StackRestorePoints) {
456 ++NumUnsafeStackRestorePoints;
457
458 IRB.SetInsertPoint(I->getNextNode());
459 Value *CurrentTop =
460 DynamicTop ? IRB.CreateLoad(StackPtrTy, DynamicTop) : StaticTop;
461 IRB.CreateStore(CurrentTop, UnsafeStackPtr);
462 }
463
464 return DynamicTop;
465}
466
467void SafeStack::checkStackGuard(IRBuilder<> &IRB, Function &F, Instruction &RI,
468 AllocaInst *StackGuardSlot, Value *StackGuard) {
469 Value *V = IRB.CreateLoad(StackPtrTy, StackGuardSlot);
470 Value *Cmp = IRB.CreateICmpNE(StackGuard, V);
471
474 MDNode *Weights = MDBuilder(F.getContext())
475 .createBranchWeights(SuccessProb.getNumerator(),
476 FailureProb.getNumerator());
477 Instruction *CheckTerm =
478 SplitBlockAndInsertIfThen(Cmp, &RI, /* Unreachable */ true, Weights, DTU);
479 IRBuilder<> IRBFail(CheckTerm);
480 // FIXME: respect -fsanitize-trap / -ftrap-function here?
481 RTLIB::LibcallImpl StackChkFailImpl =
482 Libcalls.getLibcallImpl(RTLIB::STACKPROTECTOR_CHECK_FAIL);
483 if (StackChkFailImpl == RTLIB::Unsupported) {
484 F.getContext().emitError(
485 "no libcall available for stackprotector check fail");
486 return;
487 }
488
489 StringRef StackChkFailName =
491
492 FunctionCallee StackChkFail =
493 F.getParent()->getOrInsertFunction(StackChkFailName, IRB.getVoidTy());
494 IRBFail.CreateCall(StackChkFail, {});
495}
496
497/// We explicitly compute and set the unsafe stack layout for all unsafe
498/// static alloca instructions. We save the unsafe "base pointer" in the
499/// prologue into a local variable and restore it in the epilogue.
500Value *SafeStack::moveStaticAllocasToUnsafeStack(
501 IRBuilder<> &IRB, Function &F, ArrayRef<AllocaInst *> StaticAllocas,
502 ArrayRef<Argument *> ByValArguments, Instruction *BasePointer,
503 AllocaInst *StackGuardSlot) {
504 if (StaticAllocas.empty() && ByValArguments.empty())
505 return BasePointer;
506
507 DIBuilder DIB(*F.getParent());
508
509 StackLifetime SSC(F, StaticAllocas, StackLifetime::LivenessType::May);
510 static const StackLifetime::LiveRange NoColoringRange(1, true);
511 if (ClColoring)
512 SSC.run();
513
514 for (const auto *I : SSC.getMarkers()) {
515 auto *Op = dyn_cast<Instruction>(I->getOperand(1));
516 const_cast<IntrinsicInst *>(I)->eraseFromParent();
517 // Remove the operand bitcast, too, if it has no more uses left.
518 if (Op && Op->use_empty())
519 Op->eraseFromParent();
520 }
521
522 // Unsafe stack always grows down.
523 StackLayout SSL(StackAlignment);
524 if (StackGuardSlot) {
525 SSL.addObject(StackGuardSlot, getStaticAllocaAllocationSize(StackGuardSlot),
526 StackGuardSlot->getAlign(), SSC.getFullLiveRange());
527 }
528
529 for (Argument *Arg : ByValArguments) {
530 Type *Ty = Arg->getParamByValType();
531 uint64_t Size = DL.getTypeStoreSize(Ty);
532 if (Size == 0)
533 Size = 1; // Don't create zero-sized stack objects.
534
535 // Ensure the object is properly aligned.
536 Align Align = DL.getPrefTypeAlign(Ty);
537 if (auto A = Arg->getParamAlign())
538 Align = std::max(Align, *A);
539 SSL.addObject(Arg, Size, Align, SSC.getFullLiveRange());
540 }
541
542 for (AllocaInst *AI : StaticAllocas) {
543 uint64_t Size = getStaticAllocaAllocationSize(AI);
544 if (Size == 0)
545 Size = 1; // Don't create zero-sized stack objects.
546
547 SSL.addObject(AI, Size, AI->getAlign(),
548 ClColoring ? SSC.getLiveRange(AI) : NoColoringRange);
549 }
550
551 SSL.computeLayout();
552 Align FrameAlignment = SSL.getFrameAlignment();
553
554 // FIXME: tell SSL that we start at a less-then-MaxAlignment aligned location
555 // (AlignmentSkew).
556 if (FrameAlignment > StackAlignment) {
557 // Re-align the base pointer according to the max requested alignment.
558 IRB.SetInsertPoint(BasePointer->getNextNode());
559 BasePointer = IRB.CreateIntrinsic(
560 StackPtrTy, Intrinsic::ptrmask,
561 {BasePointer, ConstantInt::get(AddrTy, ~(FrameAlignment.value() - 1))});
562 }
563
564 IRB.SetInsertPoint(BasePointer->getNextNode());
565
566 if (StackGuardSlot) {
567 unsigned Offset = SSL.getObjectOffset(StackGuardSlot);
568 Value *Off =
569 IRB.CreatePtrAdd(BasePointer, ConstantInt::get(Int32Ty, -Offset));
570 Value *NewAI =
571 IRB.CreateBitCast(Off, StackGuardSlot->getType(), "StackGuardSlot");
572
573 // Replace alloc with the new location.
574 StackGuardSlot->replaceAllUsesWith(NewAI);
575 StackGuardSlot->eraseFromParent();
576 }
577
578 for (Argument *Arg : ByValArguments) {
579 unsigned Offset = SSL.getObjectOffset(Arg);
580 MaybeAlign Align(SSL.getObjectAlignment(Arg));
581 Type *Ty = Arg->getParamByValType();
582
583 uint64_t Size = DL.getTypeStoreSize(Ty);
584 if (Size == 0)
585 Size = 1; // Don't create zero-sized stack objects.
586
587 Value *Off =
588 IRB.CreatePtrAdd(BasePointer, ConstantInt::get(Int32Ty, -Offset));
589 Value *NewArg = IRB.CreateBitCast(Off, Arg->getType(),
590 Arg->getName() + ".unsafe-byval");
591
592 // Replace alloc with the new location.
593 replaceDbgDeclare(Arg, BasePointer, DIB, DIExpression::ApplyOffset,
594 -Offset);
595 Arg->replaceAllUsesWith(NewArg);
597 IRB.CreateMemCpy(Off, Align, Arg, Arg->getParamAlign(), Size);
598 }
599
600 // Allocate space for every unsafe static AllocaInst on the unsafe stack.
601 for (AllocaInst *AI : StaticAllocas) {
602 IRB.SetInsertPoint(AI);
603 unsigned Offset = SSL.getObjectOffset(AI);
604
605 replaceDbgDeclare(AI, BasePointer, DIB, DIExpression::ApplyOffset, -Offset);
606 replaceDbgValueForAlloca(AI, BasePointer, DIB, -Offset);
607
608 // Replace uses of the alloca with the new location.
609 // Insert address calculation close to each use to work around PR27844.
610 std::string Name = std::string(AI->getName()) + ".unsafe";
611 while (!AI->use_empty()) {
612 Use &U = *AI->use_begin();
613 Instruction *User = cast<Instruction>(U.getUser());
614
615 // Drop lifetime markers now that this is no longer an alloca.
616 // SafeStack has already performed its own stack coloring.
617 if (User->isLifetimeStartOrEnd()) {
618 User->eraseFromParent();
619 continue;
620 }
621
622 Instruction *InsertBefore;
623 if (auto *PHI = dyn_cast<PHINode>(User))
624 InsertBefore = PHI->getIncomingBlock(U)->getTerminator();
625 else
626 InsertBefore = User;
627
628 IRBuilder<> IRBUser(InsertBefore);
629 Value *Off =
630 IRBUser.CreatePtrAdd(BasePointer, ConstantInt::get(Int32Ty, -Offset));
631 Value *Replacement =
632 IRBUser.CreateAddrSpaceCast(Off, AI->getType(), Name);
633
634 if (auto *PHI = dyn_cast<PHINode>(User))
635 // PHI nodes may have multiple incoming edges from the same BB (why??),
636 // all must be updated at once with the same incoming value.
637 PHI->setIncomingValueForBlock(PHI->getIncomingBlock(U), Replacement);
638 else
639 U.set(Replacement);
640 }
641
642 AI->eraseFromParent();
643 }
644
645 // Re-align BasePointer so that our callees would see it aligned as
646 // expected.
647 // FIXME: no need to update BasePointer in leaf functions.
648 unsigned FrameSize = alignTo(SSL.getFrameSize(), StackAlignment);
649
650 MDBuilder MDB(F.getContext());
652 Data.push_back(MDB.createString("unsafe-stack-size"));
653 Data.push_back(MDB.createConstant(ConstantInt::get(Int32Ty, FrameSize)));
654 MDNode *MD = MDTuple::get(F.getContext(), Data);
655 F.setMetadata(LLVMContext::MD_annotation, MD);
656
657 // Update shadow stack pointer in the function epilogue.
658 IRB.SetInsertPoint(BasePointer->getNextNode());
659
660 Value *StaticTop =
661 IRB.CreatePtrAdd(BasePointer, ConstantInt::get(Int32Ty, -FrameSize),
662 "unsafe_stack_static_top");
663 IRB.CreateStore(StaticTop, UnsafeStackPtr);
664 return StaticTop;
665}
666
667void SafeStack::moveDynamicAllocasToUnsafeStack(
668 Function &F, Value *UnsafeStackPtr, AllocaInst *DynamicTop,
669 ArrayRef<AllocaInst *> DynamicAllocas) {
670 DIBuilder DIB(*F.getParent());
671
672 for (AllocaInst *AI : DynamicAllocas) {
673 IRBuilder<> IRB(AI);
674
675 // Compute the new SP value (after AI).
676 Value *Size = IRB.CreateAllocationSize(AddrTy, AI);
677 Value *SP = IRB.CreateLoad(StackPtrTy, UnsafeStackPtr);
678 SP = IRB.CreatePtrAdd(SP, IRB.CreateNeg(Size));
679
680 // Align the SP value to satisfy the AllocaInst and stack alignments.
681 auto Align = std::max(AI->getAlign(), StackAlignment);
682
683 Value *NewTop = IRB.CreateIntrinsic(
684 StackPtrTy, Intrinsic::ptrmask,
685 {SP, ConstantInt::getSigned(AddrTy, ~uint64_t(Align.value() - 1))});
686
687 // Save the stack pointer.
688 IRB.CreateStore(NewTop, UnsafeStackPtr);
689 if (DynamicTop)
690 IRB.CreateStore(NewTop, DynamicTop);
691
692 Value *NewAI = IRB.CreatePointerCast(NewTop, AI->getType());
693 if (AI->hasName() && isa<Instruction>(NewAI))
694 NewAI->takeName(AI);
695
697 AI->replaceAllUsesWith(NewAI);
698 AI->eraseFromParent();
699 }
700
701 if (!DynamicAllocas.empty()) {
702 // Now go through the instructions again, replacing stacksave/stackrestore.
703 for (Instruction &I : llvm::make_early_inc_range(instructions(&F))) {
704 auto *II = dyn_cast<IntrinsicInst>(&I);
705 if (!II)
706 continue;
707
708 if (II->getIntrinsicID() == Intrinsic::stacksave) {
709 IRBuilder<> IRB(II);
710 Instruction *LI = IRB.CreateLoad(StackPtrTy, UnsafeStackPtr);
711 LI->takeName(II);
712 II->replaceAllUsesWith(LI);
713 II->eraseFromParent();
714 } else if (II->getIntrinsicID() == Intrinsic::stackrestore) {
715 IRBuilder<> IRB(II);
716 Instruction *SI = IRB.CreateStore(II->getArgOperand(0), UnsafeStackPtr);
717 SI->takeName(II);
718 assert(II->use_empty());
719 II->eraseFromParent();
720 }
721 }
722 }
723}
724
725bool SafeStack::ShouldInlinePointerAddress(CallInst &CI) {
727 if (CI.hasFnAttr(Attribute::AlwaysInline) &&
728 isInlineViable(*Callee).isSuccess())
729 return true;
730 if (Callee->isInterposable() || Callee->hasFnAttribute(Attribute::NoInline) ||
731 CI.isNoInline())
732 return false;
733 return true;
734}
735
736void SafeStack::TryInlinePointerAddress() {
737 auto *CI = dyn_cast<CallInst>(UnsafeStackPtr);
738 if (!CI)
739 return;
740
741 if(F.hasOptNone())
742 return;
743
745 if (!Callee || Callee->isDeclaration())
746 return;
747
748 if (!ShouldInlinePointerAddress(*CI))
749 return;
750
751 InlineFunctionInfo IFI;
752 InlineFunction(*CI, IFI);
753}
754
755bool SafeStack::run() {
756 assert(F.hasFnAttribute(Attribute::SafeStack) &&
757 "Can't run SafeStack on a function without the attribute");
758 assert(!F.isDeclaration() && "Can't run SafeStack on a function declaration");
759
760 ++NumFunctions;
761
762 SmallVector<AllocaInst *, 16> StaticAllocas;
763 SmallVector<AllocaInst *, 4> DynamicAllocas;
764 SmallVector<Argument *, 4> ByValArguments;
765 SmallVector<Instruction *, 4> Returns;
766
767 // Collect all points where stack gets unwound and needs to be restored
768 // This is only necessary because the runtime (setjmp and unwind code) is
769 // not aware of the unsafe stack and won't unwind/restore it properly.
770 // To work around this problem without changing the runtime, we insert
771 // instrumentation to restore the unsafe stack pointer when necessary.
772 SmallVector<Instruction *, 4> StackRestorePoints;
773
774 // Find all static and dynamic alloca instructions that must be moved to the
775 // unsafe stack, all return instructions and stack restore points.
776 findInsts(F, StaticAllocas, DynamicAllocas, ByValArguments, Returns,
777 StackRestorePoints);
778
779 if (StaticAllocas.empty() && DynamicAllocas.empty() &&
780 ByValArguments.empty() && StackRestorePoints.empty())
781 return false; // Nothing to do in this function.
782
783 if (!StaticAllocas.empty() || !DynamicAllocas.empty() ||
784 !ByValArguments.empty())
785 ++NumUnsafeStackFunctions; // This function has the unsafe stack.
786
787 if (!StackRestorePoints.empty())
788 ++NumUnsafeStackRestorePointsFunctions;
789
790 IRBuilder<> IRB(&F.front(), F.begin()->getFirstInsertionPt());
791 // Calls must always have a debug location, or else inlining breaks. So
792 // we explicitly set a artificial debug location here.
793 if (DISubprogram *SP = F.getSubprogram())
795 DILocation::get(SP->getContext(), SP->getScopeLine(), 0, SP));
797 // FIXME: A more correct implementation of SafeStackUsePointerAddress would
798 // change the libcall availability in RuntimeLibcallsInfo
799 StringRef SafestackPointerAddressName =
801 RTLIB::impl___safestack_pointer_address);
802
803 FunctionCallee Fn = F.getParent()->getOrInsertFunction(
804 SafestackPointerAddressName, IRB.getPtrTy(0));
805 UnsafeStackPtr = IRB.CreateCall(Fn);
806 } else {
807 UnsafeStackPtr = TL.getSafeStackPointerLocation(IRB, Libcalls);
808 if (!UnsafeStackPtr) {
809 F.getContext().emitError(
810 "no location available for safestack pointer address");
811 UnsafeStackPtr = PoisonValue::get(StackPtrTy);
812 }
813 }
814
815 // Load the current stack pointer (we'll also use it as a base pointer).
816 // FIXME: use a dedicated register for it ?
817 Instruction *BasePointer =
818 IRB.CreateLoad(StackPtrTy, UnsafeStackPtr, false, "unsafe_stack_ptr");
819 assert(BasePointer->getType() == StackPtrTy);
820
821 AllocaInst *StackGuardSlot = nullptr;
822 // FIXME: implement weaker forms of stack protector.
823 if (F.hasFnAttribute(Attribute::StackProtect) ||
824 F.hasFnAttribute(Attribute::StackProtectStrong) ||
825 F.hasFnAttribute(Attribute::StackProtectReq)) {
826 Value *StackGuard = getStackGuard(IRB, F);
827 StackGuardSlot = IRB.CreateAlloca(StackPtrTy, nullptr);
828 IRB.CreateStore(StackGuard, StackGuardSlot);
829
830 for (Instruction *RI : Returns) {
831 IRBuilder<> IRBRet(RI);
832 checkStackGuard(IRBRet, F, *RI, StackGuardSlot, StackGuard);
833 }
834 }
835
836 // The top of the unsafe stack after all unsafe static allocas are
837 // allocated.
838 Value *StaticTop = moveStaticAllocasToUnsafeStack(
839 IRB, F, StaticAllocas, ByValArguments, BasePointer, StackGuardSlot);
840
841 // Safe stack object that stores the current unsafe stack top. It is updated
842 // as unsafe dynamic (non-constant-sized) allocas are allocated and freed.
843 // This is only needed if we need to restore stack pointer after longjmp
844 // or exceptions, and we have dynamic allocations.
845 // FIXME: a better alternative might be to store the unsafe stack pointer
846 // before setjmp / invoke instructions.
847 AllocaInst *DynamicTop = createStackRestorePoints(
848 IRB, F, StackRestorePoints, StaticTop, !DynamicAllocas.empty());
849
850 // Handle dynamic allocas.
851 moveDynamicAllocasToUnsafeStack(F, UnsafeStackPtr, DynamicTop,
852 DynamicAllocas);
853
854 // Restore the unsafe stack pointer before each return.
855 for (Instruction *RI : Returns) {
856 IRB.SetInsertPoint(RI);
857 IRB.CreateStore(BasePointer, UnsafeStackPtr);
858 }
859
860 TryInlinePointerAddress();
861
862 LLVM_DEBUG(dbgs() << "[SafeStack] safestack applied\n");
863 return true;
864}
865
866class SafeStackLegacyPass : public FunctionPass {
867 const TargetMachine *TM = nullptr;
868
869public:
870 static char ID; // Pass identification, replacement for typeid..
871
872 SafeStackLegacyPass() : FunctionPass(ID) {}
873
874 void getAnalysisUsage(AnalysisUsage &AU) const override {
875 AU.addRequired<LibcallLoweringInfoWrapper>();
876 AU.addRequired<TargetPassConfig>();
877 AU.addRequired<TargetLibraryInfoWrapperPass>();
878 AU.addRequired<AssumptionCacheTracker>();
879 AU.addPreserved<DominatorTreeWrapperPass>();
880 }
881
882 bool runOnFunction(Function &F) override {
883 LLVM_DEBUG(dbgs() << "[SafeStack] Function: " << F.getName() << "\n");
884
885 if (!F.hasFnAttribute(Attribute::SafeStack)) {
886 LLVM_DEBUG(dbgs() << "[SafeStack] safestack is not requested"
887 " for this function\n");
888 return false;
889 }
890
891 if (F.isDeclaration()) {
892 LLVM_DEBUG(dbgs() << "[SafeStack] function definition"
893 " is not available\n");
894 return false;
895 }
896
897 TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
898 const TargetSubtargetInfo *Subtarget = TM->getSubtargetImpl(F);
899 auto *TL = Subtarget->getTargetLowering();
900 if (!TL)
901 report_fatal_error("TargetLowering instance is required");
902
903 const LibcallLoweringInfo &Libcalls =
904 getAnalysis<LibcallLoweringInfoWrapper>().getLibcallLowering(
905 *F.getParent(), *Subtarget);
906
907 auto *DL = &F.getDataLayout();
908 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
909 auto &ACT = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
910
911 // Compute DT and LI only for functions that have the attribute.
912 // This is only useful because the legacy pass manager doesn't let us
913 // compute analyzes lazily.
914
915 DominatorTree *DT;
916 bool ShouldPreserveDominatorTree;
917 std::optional<DominatorTree> LazilyComputedDomTree;
918
919 // Do we already have a DominatorTree available from the previous pass?
920 // Note that we should *NOT* require it, to avoid the case where we end up
921 // not needing it, but the legacy PM would have computed it for us anyways.
922 if (auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>()) {
923 DT = &DTWP->getDomTree();
924 ShouldPreserveDominatorTree = true;
925 } else {
926 // Otherwise, we need to compute it.
927 LazilyComputedDomTree.emplace(F);
928 DT = &*LazilyComputedDomTree;
929 ShouldPreserveDominatorTree = false;
930 }
931
932 // Likewise, lazily compute loop info.
933 LoopInfo LI(*DT);
934
935 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
936
937 ScalarEvolution SE(F, TLI, ACT, *DT, LI);
938
939 return SafeStack(F, *TL, Libcalls, *DL,
940 ShouldPreserveDominatorTree ? &DTU : nullptr, SE)
941 .run();
942 }
943};
944
945} // end anonymous namespace
946
949 LLVM_DEBUG(dbgs() << "[SafeStack] Function: " << F.getName() << "\n");
950
951 if (!F.hasFnAttribute(Attribute::SafeStack)) {
952 LLVM_DEBUG(dbgs() << "[SafeStack] safestack is not requested"
953 " for this function\n");
954 return PreservedAnalyses::all();
955 }
956
957 if (F.isDeclaration()) {
958 LLVM_DEBUG(dbgs() << "[SafeStack] function definition"
959 " is not available\n");
960 return PreservedAnalyses::all();
961 }
962
963 const TargetSubtargetInfo *Subtarget = TM->getSubtargetImpl(F);
964 auto *TL = Subtarget->getTargetLowering();
965
966 auto &DL = F.getDataLayout();
967
968 // preserve DominatorTree
969 auto &DT = FAM.getResult<DominatorTreeAnalysis>(F);
970 auto &SE = FAM.getResult<ScalarEvolutionAnalysis>(F);
971
972 auto &MAMProxy = FAM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
973 const LibcallLoweringModuleAnalysisResult *LibcallLowering =
974 MAMProxy.getCachedResult<LibcallLoweringModuleAnalysis>(*F.getParent());
975
976 if (!LibcallLowering) {
977 F.getContext().emitError("'" + LibcallLoweringModuleAnalysis::name() +
978 "' analysis required");
979 return PreservedAnalyses::all();
980 }
981
982 const LibcallLoweringInfo &Libcalls =
983 LibcallLowering->getLibcallLowering(*Subtarget);
984
985 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
986
987 bool Changed = SafeStack(F, *TL, Libcalls, DL, &DTU, SE).run();
988
989 if (!Changed)
990 return PreservedAnalyses::all();
993 return PA;
994}
995
996char SafeStackLegacyPass::ID = 0;
997
999 "Safe Stack instrumentation pass", false, false)
1003INITIALIZE_PASS_END(SafeStackLegacyPass, DEBUG_TYPE,
1004 "Safe Stack instrumentation pass", false, false)
1005
1006FunctionPass *llvm::createSafeStackPass() { return new SafeStackLegacyPass(); }
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Rewrite undef for PHI
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
VarLocInsertPt getNextNode(const DbgRecord *DVR)
Expand Atomic instructions
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static bool runOnFunction(Function &F, bool PostInlining)
#define DEBUG_TYPE
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
This defines the Use class.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Machine Check Debug Module
This file contains the declarations for metadata subclasses.
uint64_t IntrinsicInst * II
FunctionAnalysisManager FAM
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
static cl::opt< bool > SafeStackUsePointerAddress("safestack-use-pointer-address", cl::init(false), cl::Hidden)
Use __safestack_pointer_address even if the platform has a faster way of access safe stack pointer.
static cl::opt< bool > ClColoring("safe-stack-coloring", cl::desc("enable safe stack coloring"), cl::Hidden, cl::init(true))
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static Value * getStackGuard(const TargetLoweringBase &TLI, const LibcallLoweringInfo &Libcalls, Module *M, IRBuilder<> &B, bool *SupportsSelectionDAGSP=nullptr)
Create a stack guard loading and populate whether SelectionDAG SSP is supported.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
an instruction to allocate memory on the stack
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PointerType * getType() const
Overload to return most specific pointer type.
LLVM_ABI std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
static BranchProbability getBranchProbStackProtector(bool IsLikely)
bool doesNotCapture(unsigned OpNo) const
Determine whether this data operand is not captured.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool doesNotAccessMemory(unsigned OpNo) const
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
bool isNoInline() const
Return true if the call should not be inlined.
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
This class represents a function call, abstracting a target machine's calling convention.
static ConstantInt * getSigned(IntegerType *Ty, int64_t V, bool ImplicitTrunc=false)
Return a ConstantInt with the specified value for the specified type.
Definition Constants.h:135
LLVM_ABI ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Analysis pass which computes a DominatorTree.
Definition Dominators.h:283
Legacy analysis pass which computes a DominatorTree.
Definition Dominators.h:321
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
Definition IRBuilder.h:1837
LLVM_ABI Value * CreateAllocationSize(Type *DestTy, AllocaInst *AI)
Get allocation size of an alloca as a runtime Value* (handles both static and dynamic allocas and vsc...
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memcpy between the specified pointers.
Definition IRBuilder.h:686
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2223
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition IRBuilder.h:2025
void SetCurrentDebugLocation(DebugLoc L)
Set location information used by debugging information.
Definition IRBuilder.h:247
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2312
Value * CreateNeg(Value *V, const Twine &Name="", bool HasNSW=false)
Definition IRBuilder.h:1788
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2176
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition IRBuilder.h:1854
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition IRBuilder.h:1867
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:2487
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Definition IRBuilder.h:604
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition IRBuilder.h:207
Type * getVoidTy()
Fetch the type representing void.
Definition IRBuilder.h:599
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2788
bool isSuccess() const
Definition InlineCost.h:190
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Tracks which library functions to use for a particular subtarget.
LLVM_ABI RTLIB::LibcallImpl getLibcallImpl(RTLIB::Libcall Call) const
Return the lowering's selection of implementation call for Call.
Record a mapping from subtarget to LibcallLoweringInfo.
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1529
This is the common base class for memset/memcpy/memmove.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition Analysis.h:132
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM)
Analysis pass that exposes the ScalarEvolution for a function.
The main scalar evolution driver.
LLVM_ABI const SCEV * removePointerBase(const SCEV *S)
Compute an expression equivalent to S - getPointerBase(S).
LLVM_ABI uint64_t getTypeSizeInBits(Type *Ty) const
Return the size in bits of the specified type, for which isSCEVable must return true.
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
ConstantRange getSignedRange(const SCEV *S)
Determine the signed range for a particular SCEV.
ConstantRange getUnsignedRange(const SCEV *S)
Determine the unsigned range for a particular SCEV.
LLVM_ABI const SCEV * getPointerBase(const SCEV *V)
Transitively follow the chain of pointer-type operands until reaching a SCEV that does not have a sin...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
virtual Value * getIRStackGuard(IRBuilderBase &IRB, const LibcallLoweringInfo &Libcalls) const
If the target has a standard location for the stack protector guard, returns the address of that loca...
virtual void insertSSPDeclarations(Module &M, const LibcallLoweringInfo &Libcalls) const
Inserts necessary declarations for SSP (stack protection) purpose.
virtual Value * getSafeStackPointerLocation(IRBuilderBase &IRB, const LibcallLoweringInfo &Libcalls) const
Returns the target-specific address of the unsafe stack pointer.
virtual const TargetSubtargetInfo * getSubtargetImpl(const Function &) const
Virtual method implemented by subclasses that returns a reference to that target's TargetSubtargetInf...
Target-Independent Code Generator Pass Configuration Options.
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetLowering * getTargetLowering() const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:553
use_iterator use_begin()
Definition Value.h:365
bool use_empty() const
Definition Value.h:347
bool hasName() const
Definition Value.h:262
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:403
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
Changed
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
initializer< Ty > init(const Ty &Val)
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
@ User
could "use" a pointer
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI InlineResult InlineFunction(CallBase &CB, InlineFunctionInfo &IFI, bool MergeAttributes=false, AAResults *CalleeAAR=nullptr, bool InsertLifetime=true, Function *ForwardVarArgsTo=nullptr, OptimizationRemarkEmitter *ORE=nullptr)
This function inlines the called function into the basic block of the caller.
LLVM_ABI FunctionPass * createSafeStackPass()
This pass splits the stack into a safe stack and an unsafe stack to protect against stack-based overf...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
OuterAnalysisManagerProxy< ModuleAnalysisManager, Function > ModuleAnalysisManagerFunctionProxy
Provide the ModuleAnalysisManager to Function proxy.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:634
LLVM_ABI InlineResult isInlineViable(Function &Callee)
Check if it is mechanically possible to inline the function Callee, based on the contents of the func...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
FunctionAddr VTableAddr uintptr_t uintptr_t Data
Definition InstrProf.h:189
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
DWARFExpression::Operation Op
LLVM_ABI void replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress, DIBuilder &Builder, int Offset=0)
Replaces multiple dbg.value records when the alloca it describes is replaced with a new value.
Definition Local.cpp:2000
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI bool replaceDbgDeclare(Value *Address, Value *NewAddress, DIBuilder &Builder, uint8_t DIExprFlags, int Offset)
Replaces dbg.declare record when the address it describes is replaced with a new value.
Definition Local.cpp:1960
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
static constexpr Align Constant()
Allow constructions of constexpr Align.
Definition Alignment.h:88
static StringRef getLibcallImplName(RTLIB::LibcallImpl CallImpl)
Get the libcall routine name for the specified libcall implementation.