24#include "llvm/IR/IntrinsicsSPIRV.h"
32#include <unordered_set>
56 cl::desc(
"Emit OpName for all instructions"),
60#define GET_BuiltinGroup_DECL
61#include "SPIRVGenTables.inc"
66class GlobalVariableUsers {
67 template <
typename T1,
typename T2>
68 using OneToManyMapTy = DenseMap<T1, SmallPtrSet<T2, 4>>;
70 OneToManyMapTy<const GlobalVariable *, const Function *> GlobalIsUsedByFun;
72 void collectGlobalUsers(
73 const GlobalVariable *GV,
74 OneToManyMapTy<const GlobalVariable *, const GlobalVariable *>
75 &GlobalIsUsedByGlobal) {
77 while (!
Stack.empty()) {
81 GlobalIsUsedByFun[GV].insert(
I->getFunction());
86 GlobalIsUsedByGlobal[GV].insert(UserGV);
91 Stack.append(
C->user_begin(),
C->user_end());
95 bool propagateGlobalToGlobalUsers(
96 OneToManyMapTy<const GlobalVariable *, const GlobalVariable *>
97 &GlobalIsUsedByGlobal) {
100 for (
auto &[GV, UserGlobals] : GlobalIsUsedByGlobal) {
101 OldUsersGlobals.
assign(UserGlobals.begin(), UserGlobals.end());
102 for (
const GlobalVariable *UserGV : OldUsersGlobals) {
103 auto It = GlobalIsUsedByGlobal.find(UserGV);
104 if (It == GlobalIsUsedByGlobal.end())
112 void propagateGlobalToFunctionReferences(
113 OneToManyMapTy<const GlobalVariable *, const GlobalVariable *>
114 &GlobalIsUsedByGlobal) {
115 for (
auto &[GV, UserGlobals] : GlobalIsUsedByGlobal) {
116 auto &UserFunctions = GlobalIsUsedByFun[GV];
117 for (
const GlobalVariable *UserGV : UserGlobals) {
118 auto It = GlobalIsUsedByFun.find(UserGV);
119 if (It == GlobalIsUsedByFun.end())
130 OneToManyMapTy<const GlobalVariable *, const GlobalVariable *>
131 GlobalIsUsedByGlobal;
132 GlobalIsUsedByFun.clear();
133 for (GlobalVariable &GV :
M.globals())
134 collectGlobalUsers(&GV, GlobalIsUsedByGlobal);
137 while (propagateGlobalToGlobalUsers(GlobalIsUsedByGlobal))
140 propagateGlobalToFunctionReferences(GlobalIsUsedByGlobal);
143 using FunctionSetType =
typename decltype(GlobalIsUsedByFun)::mapped_type;
144 const FunctionSetType &
145 getTransitiveUserFunctions(
const GlobalVariable &GV)
const {
146 auto It = GlobalIsUsedByFun.find(&GV);
147 if (It != GlobalIsUsedByFun.end())
150 static const FunctionSetType
Empty{};
155static bool isaGEP(
const Value *V) {
159class SPIRVEmitIntrinsics
161 public InstVisitor<SPIRVEmitIntrinsics, Instruction *> {
162 SPIRVTargetMachine *TM =
nullptr;
163 SPIRVGlobalRegistry *GR =
nullptr;
165 bool TrackConstants =
true;
166 bool HaveFunPtrs =
false;
167 DenseMap<Instruction *, Constant *> AggrConsts;
168 DenseMap<Instruction *, Type *> AggrConstTypes;
169 DenseSet<Instruction *> AggrStores;
170 GlobalVariableUsers GVUsers;
171 std::unordered_set<Value *> Named;
174 DenseMap<Function *, SmallVector<std::pair<unsigned, Type *>>> FDeclPtrTys;
177 bool CanTodoType =
true;
178 unsigned TodoTypeSz = 0;
179 DenseMap<Value *, bool> TodoType;
180 void insertTodoType(
Value *
Op) {
182 if (CanTodoType && !isaGEP(
Op)) {
183 auto It = TodoType.try_emplace(
Op,
true);
188 void eraseTodoType(
Value *
Op) {
189 auto It = TodoType.find(
Op);
190 if (It != TodoType.end() && It->second) {
198 auto It = TodoType.find(
Op);
199 return It != TodoType.end() && It->second;
203 std::unordered_set<Instruction *> TypeValidated;
206 enum WellKnownTypes { Event };
209 Type *deduceElementType(
Value *
I,
bool UnknownElemTypeI8);
210 Type *deduceElementTypeHelper(
Value *
I,
bool UnknownElemTypeI8);
211 Type *deduceElementTypeHelper(
Value *
I, std::unordered_set<Value *> &Visited,
212 bool UnknownElemTypeI8,
213 bool IgnoreKnownType =
false);
214 Type *deduceElementTypeByValueDeep(
Type *ValueTy,
Value *Operand,
215 bool UnknownElemTypeI8);
216 Type *deduceElementTypeByValueDeep(
Type *ValueTy,
Value *Operand,
217 std::unordered_set<Value *> &Visited,
218 bool UnknownElemTypeI8);
220 std::unordered_set<Value *> &Visited,
221 bool UnknownElemTypeI8);
223 bool UnknownElemTypeI8);
226 Type *deduceNestedTypeHelper(User *U,
bool UnknownElemTypeI8);
227 Type *deduceNestedTypeHelper(User *U,
Type *Ty,
228 std::unordered_set<Value *> &Visited,
229 bool UnknownElemTypeI8);
232 void deduceOperandElementType(Instruction *
I,
233 SmallPtrSet<Instruction *, 4> *IncompleteRets,
234 const SmallPtrSet<Value *, 4> *AskOps =
nullptr,
235 bool IsPostprocessing =
false);
240 Type *reconstructType(
Value *
Op,
bool UnknownElemTypeI8,
241 bool IsPostprocessing);
243 void replaceMemInstrUses(Instruction *Old, Instruction *New,
IRBuilder<> &
B);
245 bool insertAssignPtrTypeIntrs(Instruction *
I,
IRBuilder<> &
B,
246 bool UnknownElemTypeI8);
248 void insertAssignPtrTypeTargetExt(TargetExtType *AssignedType,
Value *V,
250 void replacePointerOperandWithPtrCast(Instruction *
I,
Value *Pointer,
251 Type *ExpectedElementType,
252 unsigned OperandToReplace,
254 void insertPtrCastOrAssignTypeInstr(Instruction *
I,
IRBuilder<> &
B);
255 bool shouldTryToAddMemAliasingDecoration(Instruction *Inst);
257 void insertConstantsForFPFastMathDefault(
Module &M);
258 void processGlobalValue(GlobalVariable &GV,
IRBuilder<> &
B);
260 void processParamTypesByFunHeader(Function *
F,
IRBuilder<> &
B);
261 Type *deduceFunParamElementType(Function *
F,
unsigned OpIdx);
262 Type *deduceFunParamElementType(Function *
F,
unsigned OpIdx,
263 std::unordered_set<Function *> &FVisited);
265 bool deduceOperandElementTypeCalledFunction(
267 Type *&KnownElemTy,
bool &Incomplete);
268 void deduceOperandElementTypeFunctionPointer(
270 Type *&KnownElemTy,
bool IsPostprocessing);
271 bool deduceOperandElementTypeFunctionRet(
272 Instruction *
I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
273 const SmallPtrSet<Value *, 4> *AskOps,
bool IsPostprocessing,
276 CallInst *buildSpvPtrcast(Function *
F,
Value *
Op,
Type *ElemTy);
277 void replaceUsesOfWithSpvPtrcast(
Value *
Op,
Type *ElemTy, Instruction *
I,
278 DenseMap<Function *, CallInst *> Ptrcasts);
280 DenseSet<std::pair<Value *, Value *>> &VisitedSubst);
283 DenseSet<std::pair<Value *, Value *>> &VisitedSubst);
284 void propagateElemTypeRec(
Value *
Op,
Type *PtrElemTy,
Type *CastElemTy,
285 DenseSet<std::pair<Value *, Value *>> &VisitedSubst,
286 std::unordered_set<Value *> &Visited,
287 DenseMap<Function *, CallInst *> Ptrcasts);
290 void replaceAllUsesWithAndErase(
IRBuilder<> &
B, Instruction *Src,
291 Instruction *Dest,
bool DeleteOld =
true);
295 GetElementPtrInst *simplifyZeroLengthArrayGepInst(GetElementPtrInst *
GEP);
298 bool postprocessTypes(
Module &M);
299 bool processFunctionPointers(
Module &M);
300 void parseFunDeclarations(
Module &M);
301 void useRoundingMode(ConstrainedFPIntrinsic *FPI,
IRBuilder<> &
B);
303 void emitUnstructuredLoopControls(Function &
F,
IRBuilder<> &
B);
319 bool walkLogicalAccessChain(
320 GetElementPtrInst &
GEP,
321 const std::function<
void(
Type *PointedType, uint64_t Index)>
330 Type *getGEPType(GetElementPtrInst *
GEP);
337 Type *getGEPTypeLogical(GetElementPtrInst *
GEP);
339 Instruction *buildLogicalAccessChainFromGEP(GetElementPtrInst &
GEP);
343 SPIRVEmitIntrinsics(SPIRVTargetMachine *TM =
nullptr)
344 : ModulePass(ID), TM(TM) {}
347 Instruction *visitGetElementPtrInst(GetElementPtrInst &
I);
350 Instruction *visitInsertElementInst(InsertElementInst &
I);
351 Instruction *visitExtractElementInst(ExtractElementInst &
I);
353 Instruction *visitExtractValueInst(ExtractValueInst &
I);
357 Instruction *visitAtomicCmpXchgInst(AtomicCmpXchgInst &
I);
361 StringRef getPassName()
const override {
return "SPIRV emit intrinsics"; }
363 bool runOnModule(
Module &M)
override;
365 void getAnalysisUsage(AnalysisUsage &AU)
const override {
366 ModulePass::getAnalysisUsage(AU);
375 return II->getIntrinsicID() == Intrinsic::experimental_convergence_entry ||
376 II->getIntrinsicID() == Intrinsic::experimental_convergence_loop ||
377 II->getIntrinsicID() == Intrinsic::experimental_convergence_anchor;
380bool expectIgnoredInIRTranslation(
const Instruction *
I) {
384 switch (
II->getIntrinsicID()) {
385 case Intrinsic::invariant_start:
386 case Intrinsic::spv_resource_handlefrombinding:
387 case Intrinsic::spv_resource_getpointer:
397 if (
II->getIntrinsicID() == Intrinsic::spv_ptrcast) {
398 Value *V =
II->getArgOperand(0);
399 return getPointerRoot(V);
407char SPIRVEmitIntrinsics::ID = 0;
430 B.SetInsertPoint(
I->getParent()->getFirstNonPHIOrDbgOrAlloca());
436 B.SetCurrentDebugLocation(
I->getDebugLoc());
437 if (
I->getType()->isVoidTy())
438 B.SetInsertPoint(
I->getNextNode());
440 B.SetInsertPoint(*
I->getInsertionPointAfterDef());
445 switch (Intr->getIntrinsicID()) {
446 case Intrinsic::invariant_start:
447 case Intrinsic::invariant_end:
455 if (
I->getType()->isTokenTy())
457 "does not support token type",
462 if (!
I->hasName() ||
I->getType()->isAggregateType() ||
463 expectIgnoredInIRTranslation(
I))
474 if (
F &&
F->getName().starts_with(
"llvm.spv.alloca"))
485 std::vector<Value *> Args = {
488 B.CreateIntrinsic(Intrinsic::spv_assign_name, {
I->getType()}, Args);
491void SPIRVEmitIntrinsics::replaceAllUsesWith(
Value *Src,
Value *Dest,
495 if (isTodoType(Src)) {
498 insertTodoType(Dest);
502void SPIRVEmitIntrinsics::replaceAllUsesWithAndErase(
IRBuilder<> &
B,
507 std::string
Name = Src->hasName() ? Src->getName().str() :
"";
508 Src->eraseFromParent();
511 if (Named.insert(Dest).second)
536Type *SPIRVEmitIntrinsics::reconstructType(
Value *
Op,
bool UnknownElemTypeI8,
537 bool IsPostprocessing) {
552 if (UnknownElemTypeI8) {
553 if (!IsPostprocessing)
561CallInst *SPIRVEmitIntrinsics::buildSpvPtrcast(Function *
F,
Value *
Op,
569 B.SetInsertPointPastAllocas(OpA->getParent());
572 B.SetInsertPoint(
F->getEntryBlock().getFirstNonPHIOrDbgOrAlloca());
574 Type *OpTy =
Op->getType();
578 CallInst *PtrCasted =
579 B.CreateIntrinsic(Intrinsic::spv_ptrcast, {
Types},
Args);
584void SPIRVEmitIntrinsics::replaceUsesOfWithSpvPtrcast(
586 DenseMap<Function *, CallInst *> Ptrcasts) {
588 CallInst *PtrCastedI =
nullptr;
589 auto It = Ptrcasts.
find(
F);
590 if (It == Ptrcasts.
end()) {
591 PtrCastedI = buildSpvPtrcast(
F,
Op, ElemTy);
592 Ptrcasts[
F] = PtrCastedI;
594 PtrCastedI = It->second;
596 I->replaceUsesOfWith(
Op, PtrCastedI);
599void SPIRVEmitIntrinsics::propagateElemType(
601 DenseSet<std::pair<Value *, Value *>> &VisitedSubst) {
602 DenseMap<Function *, CallInst *> Ptrcasts;
604 for (
auto *U :
Users) {
607 if (!VisitedSubst.insert(std::make_pair(U,
Op)).second)
612 if (isaGEP(UI) || TypeValidated.find(UI) != TypeValidated.end())
613 replaceUsesOfWithSpvPtrcast(
Op, ElemTy, UI, Ptrcasts);
617void SPIRVEmitIntrinsics::propagateElemTypeRec(
619 DenseSet<std::pair<Value *, Value *>> &VisitedSubst) {
620 std::unordered_set<Value *> Visited;
621 DenseMap<Function *, CallInst *> Ptrcasts;
622 propagateElemTypeRec(
Op, PtrElemTy, CastElemTy, VisitedSubst, Visited,
623 std::move(Ptrcasts));
626void SPIRVEmitIntrinsics::propagateElemTypeRec(
628 DenseSet<std::pair<Value *, Value *>> &VisitedSubst,
629 std::unordered_set<Value *> &Visited,
630 DenseMap<Function *, CallInst *> Ptrcasts) {
631 if (!Visited.insert(
Op).second)
634 for (
auto *U :
Users) {
637 if (!VisitedSubst.insert(std::make_pair(U,
Op)).second)
642 if (isaGEP(UI) || TypeValidated.find(UI) != TypeValidated.end())
643 replaceUsesOfWithSpvPtrcast(
Op, CastElemTy, UI, Ptrcasts);
651SPIRVEmitIntrinsics::deduceElementTypeByValueDeep(
Type *ValueTy,
Value *Operand,
652 bool UnknownElemTypeI8) {
653 std::unordered_set<Value *> Visited;
654 return deduceElementTypeByValueDeep(ValueTy, Operand, Visited,
658Type *SPIRVEmitIntrinsics::deduceElementTypeByValueDeep(
659 Type *ValueTy,
Value *Operand, std::unordered_set<Value *> &Visited,
660 bool UnknownElemTypeI8) {
665 deduceElementTypeHelper(Operand, Visited, UnknownElemTypeI8))
676Type *SPIRVEmitIntrinsics::deduceElementTypeByUsersDeep(
677 Value *
Op, std::unordered_set<Value *> &Visited,
bool UnknownElemTypeI8) {
689 for (User *OpU :
Op->users()) {
691 if (
Type *Ty = deduceElementTypeHelper(Inst, Visited, UnknownElemTypeI8))
704 if ((DemangledName.
starts_with(
"__spirv_ocl_printf(") ||
713Type *SPIRVEmitIntrinsics::deduceElementTypeHelper(
Value *
I,
714 bool UnknownElemTypeI8) {
715 std::unordered_set<Value *> Visited;
716 return deduceElementTypeHelper(
I, Visited, UnknownElemTypeI8);
719void SPIRVEmitIntrinsics::maybeAssignPtrType(
Type *&Ty,
Value *
Op,
Type *RefTy,
720 bool UnknownElemTypeI8) {
722 if (!UnknownElemTypeI8)
729bool SPIRVEmitIntrinsics::walkLogicalAccessChain(
730 GetElementPtrInst &
GEP,
731 const std::function<
void(
Type *, uint64_t)> &OnLiteralIndexing,
732 const std::function<
void(
Type *,
Value *)> &OnDynamicIndexing) {
740 Value *Src = getPointerRoot(
GEP.getPointerOperand());
741 Type *CurType = deduceElementType(Src,
true);
750 OnDynamicIndexing(AT->getElementType(), Operand);
751 return AT ==
nullptr;
759 uint32_t EltTypeSize =
DL.getTypeSizeInBits(AT->getElementType()) / 8;
763 CurType = AT->getElementType();
764 OnLiteralIndexing(CurType, Index);
766 uint32_t StructSize =
DL.getTypeSizeInBits(ST) / 8;
769 const auto &STL =
DL.getStructLayout(ST);
770 unsigned Element = STL->getElementContainingOffset(
Offset);
771 Offset -= STL->getElementOffset(Element);
772 CurType =
ST->getElementType(Element);
773 OnLiteralIndexing(CurType, Element);
775 Type *EltTy = VT->getElementType();
776 TypeSize EltSizeBits =
DL.getTypeSizeInBits(EltTy);
777 assert(EltSizeBits % 8 == 0 &&
778 "Element type size in bits must be a multiple of 8.");
779 uint32_t EltTypeSize = EltSizeBits / 8;
784 OnLiteralIndexing(CurType, Index);
796SPIRVEmitIntrinsics::buildLogicalAccessChainFromGEP(GetElementPtrInst &
GEP) {
799 B.SetInsertPoint(&
GEP);
801 std::vector<Value *> Indices;
802 Indices.push_back(ConstantInt::get(
803 IntegerType::getInt32Ty(CurrF->
getContext()), 0,
false));
804 walkLogicalAccessChain(
806 [&Indices, &
B](
Type *EltType, uint64_t Index) {
808 ConstantInt::get(
B.getInt64Ty(), Index,
false));
811 uint32_t EltTypeSize =
DL.getTypeSizeInBits(EltType) / 8;
813 Offset, ConstantInt::get(
Offset->getType(), EltTypeSize,
815 Indices.push_back(Index);
819 SmallVector<Value *, 4>
Args;
820 Args.push_back(
B.getInt1(
GEP.isInBounds()));
821 Args.push_back(
GEP.getOperand(0));
823 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_gep, {
Types}, {
Args});
824 replaceAllUsesWithAndErase(
B, &
GEP, NewI);
828Type *SPIRVEmitIntrinsics::getGEPTypeLogical(GetElementPtrInst *
GEP) {
830 Type *CurType =
GEP->getResultElementType();
832 bool Interrupted = walkLogicalAccessChain(
833 *
GEP, [&CurType](
Type *EltType, uint64_t Index) { CurType = EltType; },
836 return Interrupted ?
GEP->getResultElementType() : CurType;
839Type *SPIRVEmitIntrinsics::getGEPType(GetElementPtrInst *
Ref) {
840 if (
Ref->getSourceElementType() ==
841 IntegerType::getInt8Ty(CurrF->
getContext()) &&
843 return getGEPTypeLogical(
Ref);
850 Ty =
Ref->getSourceElementType();
854 Ty =
Ref->getResultElementType();
859Type *SPIRVEmitIntrinsics::deduceElementTypeHelper(
860 Value *
I, std::unordered_set<Value *> &Visited,
bool UnknownElemTypeI8,
861 bool IgnoreKnownType) {
867 if (!IgnoreKnownType)
872 if (!Visited.insert(
I).second)
879 maybeAssignPtrType(Ty,
I,
Ref->getAllocatedType(), UnknownElemTypeI8);
881 Ty = getGEPType(
Ref);
883 Ty = SGEP->getResultElementType();
888 KnownTy =
Op->getType();
890 maybeAssignPtrType(Ty,
I, ElemTy, UnknownElemTypeI8);
893 Ty = SPIRV::getOriginalFunctionType(*Fn);
896 Ty = deduceElementTypeByValueDeep(
898 Ref->getNumOperands() > 0 ?
Ref->getOperand(0) :
nullptr, Visited,
902 Type *RefTy = deduceElementTypeHelper(
Ref->getPointerOperand(), Visited,
904 maybeAssignPtrType(Ty,
I, RefTy, UnknownElemTypeI8);
906 maybeAssignPtrType(Ty,
I,
Ref->getDestTy(), UnknownElemTypeI8);
908 if (
Type *Src =
Ref->getSrcTy(), *Dest =
Ref->getDestTy();
910 Ty = deduceElementTypeHelper(
Ref->getOperand(0), Visited,
915 Ty = deduceElementTypeHelper(
Op, Visited, UnknownElemTypeI8);
919 Ty = deduceElementTypeHelper(
Op, Visited, UnknownElemTypeI8);
921 Type *BestTy =
nullptr;
923 DenseMap<Type *, unsigned> PhiTys;
924 for (
int i =
Ref->getNumIncomingValues() - 1; i >= 0; --i) {
925 Ty = deduceElementTypeByUsersDeep(
Ref->getIncomingValue(i), Visited,
932 if (It.first->second > MaxN) {
933 MaxN = It.first->second;
941 for (
Value *
Op : {
Ref->getTrueValue(),
Ref->getFalseValue()}) {
942 Ty = deduceElementTypeByUsersDeep(
Op, Visited, UnknownElemTypeI8);
947 static StringMap<unsigned> ResTypeByArg = {
951 {
"__spirv_GenericCastToPtr_ToGlobal", 0},
952 {
"__spirv_GenericCastToPtr_ToLocal", 0},
953 {
"__spirv_GenericCastToPtr_ToPrivate", 0},
954 {
"__spirv_GenericCastToPtrExplicit_ToGlobal", 0},
955 {
"__spirv_GenericCastToPtrExplicit_ToLocal", 0},
956 {
"__spirv_GenericCastToPtrExplicit_ToPrivate", 0}};
960 if (
II &&
II->getIntrinsicID() == Intrinsic::spv_resource_getpointer) {
962 if (HandleType->getTargetExtName() ==
"spirv.Image" ||
963 HandleType->getTargetExtName() ==
"spirv.SignedImage") {
964 for (User *U :
II->users()) {
969 }
else if (HandleType->getTargetExtName() ==
"spirv.VulkanBuffer") {
971 Ty = HandleType->getTypeParameter(0);
983 }
else if (
II &&
II->getIntrinsicID() ==
984 Intrinsic::spv_generic_cast_to_ptr_explicit) {
985 Ty = deduceElementTypeHelper(CI->getArgOperand(0), Visited,
987 }
else if (Function *CalledF = CI->getCalledFunction()) {
988 std::string DemangledName =
990 if (DemangledName.length() > 0)
991 DemangledName = SPIRV::lookupBuiltinNameHelper(DemangledName);
992 auto AsArgIt = ResTypeByArg.
find(DemangledName);
993 if (AsArgIt != ResTypeByArg.
end())
994 Ty = deduceElementTypeHelper(CI->getArgOperand(AsArgIt->second),
995 Visited, UnknownElemTypeI8);
1002 if (Ty && !IgnoreKnownType) {
1013Type *SPIRVEmitIntrinsics::deduceNestedTypeHelper(User *U,
1014 bool UnknownElemTypeI8) {
1015 std::unordered_set<Value *> Visited;
1016 return deduceNestedTypeHelper(U,
U->getType(), Visited, UnknownElemTypeI8);
1019Type *SPIRVEmitIntrinsics::deduceNestedTypeHelper(
1020 User *U,
Type *OrigTy, std::unordered_set<Value *> &Visited,
1021 bool UnknownElemTypeI8) {
1030 if (!Visited.insert(U).second)
1035 bool Change =
false;
1036 for (
unsigned i = 0; i <
U->getNumOperands(); ++i) {
1038 assert(
Op &&
"Operands should not be null.");
1039 Type *OpTy =
Op->getType();
1042 if (
Type *NestedTy =
1043 deduceElementTypeHelper(
Op, Visited, UnknownElemTypeI8))
1050 Change |= Ty != OpTy;
1058 if (
Value *
Op =
U->getNumOperands() > 0 ?
U->getOperand(0) :
nullptr) {
1059 Type *OpTy = ArrTy->getElementType();
1062 if (
Type *NestedTy =
1063 deduceElementTypeHelper(
Op, Visited, UnknownElemTypeI8))
1070 Type *NewTy = ArrayType::get(Ty, ArrTy->getNumElements());
1076 if (
Value *
Op =
U->getNumOperands() > 0 ?
U->getOperand(0) :
nullptr) {
1077 Type *OpTy = VecTy->getElementType();
1080 if (
Type *NestedTy =
1081 deduceElementTypeHelper(
Op, Visited, UnknownElemTypeI8))
1088 Type *NewTy = VectorType::get(Ty, VecTy->getElementCount());
1098Type *SPIRVEmitIntrinsics::deduceElementType(
Value *
I,
bool UnknownElemTypeI8) {
1099 if (
Type *Ty = deduceElementTypeHelper(
I, UnknownElemTypeI8))
1101 if (!UnknownElemTypeI8)
1104 return IntegerType::getInt8Ty(
I->getContext());
1108 Value *PointerOperand) {
1114 return I->getType();
1122bool SPIRVEmitIntrinsics::deduceOperandElementTypeCalledFunction(
1124 Type *&KnownElemTy,
bool &Incomplete) {
1128 std::string DemangledName =
1130 if (DemangledName.length() > 0 &&
1132 const SPIRVSubtarget &
ST = TM->
getSubtarget<SPIRVSubtarget>(*CalledF);
1133 auto [Grp, Opcode, ExtNo] = SPIRV::mapBuiltinToOpcode(
1134 DemangledName,
ST.getPreferredInstructionSet());
1135 if (Opcode == SPIRV::OpGroupAsyncCopy) {
1136 for (
unsigned i = 0, PtrCnt = 0; i < CI->
arg_size() && PtrCnt < 2; ++i) {
1142 KnownElemTy = ElemTy;
1143 Ops.push_back(std::make_pair(
Op, i));
1145 }
else if (Grp == SPIRV::Atomic || Grp == SPIRV::AtomicFloating) {
1152 case SPIRV::OpAtomicFAddEXT:
1153 case SPIRV::OpAtomicFMinEXT:
1154 case SPIRV::OpAtomicFMaxEXT:
1155 case SPIRV::OpAtomicLoad:
1156 case SPIRV::OpAtomicCompareExchangeWeak:
1157 case SPIRV::OpAtomicCompareExchange:
1158 case SPIRV::OpAtomicExchange:
1159 case SPIRV::OpAtomicIAdd:
1160 case SPIRV::OpAtomicISub:
1161 case SPIRV::OpAtomicOr:
1162 case SPIRV::OpAtomicXor:
1163 case SPIRV::OpAtomicAnd:
1164 case SPIRV::OpAtomicUMin:
1165 case SPIRV::OpAtomicUMax:
1166 case SPIRV::OpAtomicSMin:
1167 case SPIRV::OpAtomicSMax: {
1172 Incomplete = isTodoType(
Op);
1173 Ops.push_back(std::make_pair(
Op, 0));
1175 case SPIRV::OpAtomicStore: {
1184 Incomplete = isTodoType(
Op);
1185 Ops.push_back(std::make_pair(
Op, 0));
1194void SPIRVEmitIntrinsics::deduceOperandElementTypeFunctionPointer(
1196 Type *&KnownElemTy,
bool IsPostprocessing) {
1200 Ops.push_back(std::make_pair(
Op, std::numeric_limits<unsigned>::max()));
1201 FunctionType *FTy = SPIRV::getOriginalFunctionType(*CI);
1202 bool IsNewFTy =
false, IsIncomplete =
false;
1205 Type *ArgTy = Arg->getType();
1210 if (isTodoType(Arg))
1211 IsIncomplete =
true;
1213 IsIncomplete =
true;
1216 ArgTy = FTy->getFunctionParamType(ParmIdx);
1220 Type *RetTy = FTy->getReturnType();
1227 IsIncomplete =
true;
1229 IsIncomplete =
true;
1232 if (!IsPostprocessing && IsIncomplete)
1235 IsNewFTy ? FunctionType::get(RetTy, ArgTys, FTy->isVarArg()) : FTy;
1238bool SPIRVEmitIntrinsics::deduceOperandElementTypeFunctionRet(
1239 Instruction *
I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
1240 const SmallPtrSet<Value *, 4> *AskOps,
bool IsPostprocessing,
1252 DenseSet<std::pair<Value *, Value *>> VisitedSubst{std::make_pair(
I,
Op)};
1253 for (User *U :
F->users()) {
1261 propagateElemType(CI, PrevElemTy, VisitedSubst);
1271 for (Instruction *IncompleteRetI : *IncompleteRets)
1272 deduceOperandElementType(IncompleteRetI,
nullptr, AskOps,
1274 }
else if (IncompleteRets) {
1277 TypeValidated.insert(
I);
1285void SPIRVEmitIntrinsics::deduceOperandElementType(
1286 Instruction *
I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
1287 const SmallPtrSet<Value *, 4> *AskOps,
bool IsPostprocessing) {
1289 Type *KnownElemTy =
nullptr;
1290 bool Incomplete =
false;
1296 Incomplete = isTodoType(
I);
1297 for (
unsigned i = 0; i <
Ref->getNumIncomingValues(); i++) {
1300 Ops.push_back(std::make_pair(
Op, i));
1306 Incomplete = isTodoType(
I);
1307 Ops.push_back(std::make_pair(
Ref->getPointerOperand(), 0));
1314 Incomplete = isTodoType(
I);
1315 Ops.push_back(std::make_pair(
Ref->getOperand(0), 0));
1319 KnownElemTy =
Ref->getSourceElementType();
1320 Ops.push_back(std::make_pair(
Ref->getPointerOperand(),
1325 KnownElemTy =
Ref->getBaseType();
1326 Ops.push_back(std::make_pair(
Ref->getPointerOperand(),
1329 KnownElemTy =
I->getType();
1335 Ops.push_back(std::make_pair(
Ref->getPointerOperand(),
1339 reconstructType(
Ref->getValueOperand(),
false, IsPostprocessing)))
1344 Ops.push_back(std::make_pair(
Ref->getPointerOperand(),
1352 Incomplete = isTodoType(
Ref->getPointerOperand());
1353 Ops.push_back(std::make_pair(
Ref->getPointerOperand(),
1361 Incomplete = isTodoType(
Ref->getPointerOperand());
1362 Ops.push_back(std::make_pair(
Ref->getPointerOperand(),
1368 Incomplete = isTodoType(
I);
1369 for (
unsigned i = 0; i <
Ref->getNumOperands(); i++) {
1372 Ops.push_back(std::make_pair(
Op, i));
1380 if (deduceOperandElementTypeFunctionRet(
I, IncompleteRets, AskOps,
1381 IsPostprocessing, KnownElemTy,
Op,
1384 Incomplete = isTodoType(CurrF);
1385 Ops.push_back(std::make_pair(
Op, 0));
1391 bool Incomplete0 = isTodoType(Op0);
1392 bool Incomplete1 = isTodoType(Op1);
1394 Type *ElemTy0 = (Incomplete0 && !Incomplete1 && ElemTy1)
1396 : GR->findDeducedElementType(Op0);
1398 KnownElemTy = ElemTy0;
1399 Incomplete = Incomplete0;
1400 Ops.push_back(std::make_pair(Op1, 1));
1401 }
else if (ElemTy1) {
1402 KnownElemTy = ElemTy1;
1403 Incomplete = Incomplete1;
1404 Ops.push_back(std::make_pair(Op0, 0));
1408 deduceOperandElementTypeCalledFunction(CI,
Ops, KnownElemTy, Incomplete);
1409 else if (HaveFunPtrs)
1410 deduceOperandElementTypeFunctionPointer(CI,
Ops, KnownElemTy,
1415 if (!KnownElemTy ||
Ops.size() == 0)
1420 for (
auto &OpIt :
Ops) {
1424 Type *AskTy =
nullptr;
1425 CallInst *AskCI =
nullptr;
1426 if (IsPostprocessing && AskOps) {
1432 if (Ty == KnownElemTy)
1435 Type *OpTy =
Op->getType();
1436 if (
Op->hasUseList() &&
1443 else if (!IsPostprocessing)
1447 if (AssignCI ==
nullptr) {
1456 DenseSet<std::pair<Value *, Value *>> VisitedSubst{
1457 std::make_pair(
I,
Op)};
1458 propagateElemTypeRec(
Op, KnownElemTy, PrevElemTy, VisitedSubst);
1462 CallInst *PtrCastI =
1463 buildSpvPtrcast(
I->getParent()->getParent(),
Op, KnownElemTy);
1464 if (OpIt.second == std::numeric_limits<unsigned>::max())
1467 I->setOperand(OpIt.second, PtrCastI);
1470 TypeValidated.insert(
I);
1473void SPIRVEmitIntrinsics::replaceMemInstrUses(Instruction *Old,
1478 if (isAssignTypeInstr(U)) {
1479 B.SetInsertPoint(U);
1480 SmallVector<Value *, 2>
Args = {
New,
U->getOperand(1)};
1481 CallInst *AssignCI =
1482 B.CreateIntrinsic(Intrinsic::spv_assign_type, {
New->getType()},
Args);
1484 U->eraseFromParent();
1487 U->replaceUsesOfWith(Old, New);
1492 New->copyMetadata(*Old);
1496void SPIRVEmitIntrinsics::preprocessUndefs(
IRBuilder<> &
B) {
1497 std::queue<Instruction *> Worklist;
1501 while (!Worklist.empty()) {
1503 bool BPrepared =
false;
1506 for (
auto &
Op :
I->operands()) {
1508 if (!AggrUndef || !
Op->getType()->isAggregateType())
1515 auto *IntrUndef =
B.CreateIntrinsic(Intrinsic::spv_undef, {});
1516 Worklist.push(IntrUndef);
1517 I->replaceUsesOfWith(
Op, IntrUndef);
1518 AggrConsts[IntrUndef] = AggrUndef;
1519 AggrConstTypes[IntrUndef] = AggrUndef->getType();
1524void SPIRVEmitIntrinsics::preprocessCompositeConstants(
IRBuilder<> &
B) {
1525 std::queue<Instruction *> Worklist;
1529 while (!Worklist.empty()) {
1530 auto *
I = Worklist.front();
1533 bool KeepInst =
false;
1534 for (
const auto &
Op :
I->operands()) {
1536 Type *ResTy =
nullptr;
1539 ResTy = COp->getType();
1551 ResTy =
Op->getType()->isVectorTy() ? COp->getType() :
B.getInt32Ty();
1556 for (
unsigned i = 0; i < COp->getNumElements(); ++i)
1557 Args.push_back(COp->getElementAsConstant(i));
1561 IsPhi ?
B.SetInsertPointPastAllocas(
I->getParent()->getParent())
1562 :
B.SetInsertPoint(
I);
1566 B.CreateIntrinsic(Intrinsic::spv_const_composite, {ResTy}, {
Args});
1570 AggrConsts[CI] = AggrConst;
1571 AggrConstTypes[CI] = deduceNestedTypeHelper(AggrConst,
false);
1583 B.CreateIntrinsic(Intrinsic::spv_assign_decoration, {
I->getType()},
1588 unsigned RoundingModeDeco,
1595 ConstantInt::get(
Int32Ty, SPIRV::Decoration::FPRoundingMode)),
1604 MDNode *SaturatedConversionNode =
1606 Int32Ty, SPIRV::Decoration::SaturatedConversion))});
1613 if (Fu->isIntrinsic()) {
1614 unsigned const int IntrinsicId = Fu->getIntrinsicID();
1615 switch (IntrinsicId) {
1616 case Intrinsic::fptosi_sat:
1617 case Intrinsic::fptoui_sat:
1636 MDString *ConstraintString =
MDString::get(Ctx,
IA->getConstraintString());
1644 B.SetInsertPoint(&
Call);
1645 B.CreateIntrinsic(Intrinsic::spv_inline_asm, {
Args});
1650void SPIRVEmitIntrinsics::useRoundingMode(ConstrainedFPIntrinsic *FPI,
1653 if (!
RM.has_value())
1655 unsigned RoundingModeDeco = std::numeric_limits<unsigned>::max();
1656 switch (
RM.value()) {
1660 case RoundingMode::NearestTiesToEven:
1661 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTE;
1663 case RoundingMode::TowardNegative:
1664 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTN;
1666 case RoundingMode::TowardPositive:
1667 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTP;
1669 case RoundingMode::TowardZero:
1670 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTZ;
1672 case RoundingMode::Dynamic:
1673 case RoundingMode::NearestTiesToAway:
1677 if (RoundingModeDeco == std::numeric_limits<unsigned>::max())
1683Instruction *SPIRVEmitIntrinsics::visitSwitchInst(SwitchInst &
I) {
1687 B.SetInsertPoint(&
I);
1688 SmallVector<Value *, 4>
Args;
1690 Args.push_back(
I.getCondition());
1693 for (
auto &Case :
I.cases()) {
1694 Args.push_back(Case.getCaseValue());
1695 BBCases.
push_back(Case.getCaseSuccessor());
1698 CallInst *NewI =
B.CreateIntrinsic(Intrinsic::spv_switch,
1699 {
I.getOperand(0)->getType()}, {
Args});
1703 I.eraseFromParent();
1706 B.SetInsertPoint(ParentBB);
1707 IndirectBrInst *BrI =
B.CreateIndirectBr(
1710 for (BasicBlock *BBCase : BBCases)
1716 if (
GEP->getNumIndices() == 0)
1719 return CI->getZExtValue() == 0;
1724Instruction *SPIRVEmitIntrinsics::visitIntrinsicInst(IntrinsicInst &
I) {
1730 B.SetInsertPoint(&
I);
1732 SmallVector<Value *, 4>
Args;
1733 Args.push_back(
B.getInt1(
true));
1734 Args.push_back(
I.getOperand(0));
1735 Args.push_back(
B.getInt32(0));
1736 for (
unsigned J = 0; J < SGEP->getNumIndices(); ++J)
1737 Args.push_back(SGEP->getIndexOperand(J));
1739 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_gep, Types, Args);
1740 replaceAllUsesWithAndErase(
B, &
I, NewI);
1744Instruction *SPIRVEmitIntrinsics::visitGetElementPtrInst(GetElementPtrInst &
I) {
1746 B.SetInsertPoint(&
I);
1754 if (
I.getSourceElementType() ==
1755 IntegerType::getInt8Ty(CurrF->
getContext())) {
1756 return buildLogicalAccessChainFromGEP(
I);
1761 Value *PtrOp =
I.getPointerOperand();
1762 Type *SrcElemTy =
I.getSourceElementType();
1763 Type *DeducedPointeeTy = deduceElementType(PtrOp,
true);
1766 if (ArrTy->getElementType() == SrcElemTy) {
1768 Type *FirstIdxType =
I.getOperand(1)->getType();
1769 NewIndices.
push_back(ConstantInt::get(FirstIdxType, 0));
1770 for (
Value *Idx :
I.indices())
1774 SmallVector<Value *, 4>
Args;
1775 Args.push_back(
B.getInt1(
I.isInBounds()));
1776 Args.push_back(
I.getPointerOperand());
1779 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_gep, {
Types}, {
Args});
1780 replaceAllUsesWithAndErase(
B, &
I, NewI);
1787 SmallVector<Value *, 4>
Args;
1788 Args.push_back(
B.getInt1(
I.isInBounds()));
1790 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_gep, {
Types}, {
Args});
1791 replaceAllUsesWithAndErase(
B, &
I, NewI);
1795Instruction *SPIRVEmitIntrinsics::visitBitCastInst(BitCastInst &
I) {
1797 B.SetInsertPoint(&
I);
1806 I.eraseFromParent();
1812 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_bitcast, {
Types}, {
Args});
1813 replaceAllUsesWithAndErase(
B, &
I, NewI);
1817void SPIRVEmitIntrinsics::insertAssignPtrTypeTargetExt(
1819 Type *VTy =
V->getType();
1824 if (ElemTy != AssignedType)
1837 if (CurrentType == AssignedType)
1844 " for value " +
V->getName(),
1852void SPIRVEmitIntrinsics::replacePointerOperandWithPtrCast(
1853 Instruction *
I,
Value *Pointer,
Type *ExpectedElementType,
1855 TypeValidated.insert(
I);
1858 Type *PointerElemTy = deduceElementTypeHelper(Pointer,
false);
1859 if (PointerElemTy == ExpectedElementType ||
1865 MetadataAsValue *VMD =
buildMD(ExpectedElementVal);
1867 bool FirstPtrCastOrAssignPtrType =
true;
1873 for (
auto User :
Pointer->users()) {
1876 (
II->getIntrinsicID() != Intrinsic::spv_assign_ptr_type &&
1877 II->getIntrinsicID() != Intrinsic::spv_ptrcast) ||
1878 II->getOperand(0) != Pointer)
1883 FirstPtrCastOrAssignPtrType =
false;
1884 if (
II->getOperand(1) != VMD ||
1891 if (
II->getIntrinsicID() != Intrinsic::spv_ptrcast)
1896 if (
II->getParent() !=
I->getParent())
1899 I->setOperand(OperandToReplace,
II);
1905 if (FirstPtrCastOrAssignPtrType) {
1910 }
else if (isTodoType(Pointer)) {
1911 eraseTodoType(Pointer);
1918 DenseSet<std::pair<Value *, Value *>> VisitedSubst{
1919 std::make_pair(
I, Pointer)};
1921 propagateElemType(Pointer, PrevElemTy, VisitedSubst);
1933 auto *PtrCastI =
B.CreateIntrinsic(Intrinsic::spv_ptrcast, {
Types},
Args);
1939void SPIRVEmitIntrinsics::insertPtrCastOrAssignTypeInstr(Instruction *
I,
1944 replacePointerOperandWithPtrCast(
1945 I,
SI->getValueOperand(), IntegerType::getInt8Ty(CurrF->
getContext()),
1951 Type *OpTy =
Op->getType();
1954 if (OpTy ==
Op->getType())
1955 OpTy = deduceElementTypeByValueDeep(OpTy,
Op,
false);
1956 replacePointerOperandWithPtrCast(
I, Pointer, OpTy, 1,
B);
1961 Type *OpTy = LI->getType();
1966 Type *NewOpTy = OpTy;
1967 OpTy = deduceElementTypeByValueDeep(OpTy, LI,
false);
1968 if (OpTy == NewOpTy)
1969 insertTodoType(Pointer);
1972 replacePointerOperandWithPtrCast(
I, Pointer, OpTy, 0,
B);
1977 Type *OpTy =
nullptr;
1989 OpTy = GEPI->getSourceElementType();
1991 replacePointerOperandWithPtrCast(
I, Pointer, OpTy, 0,
B);
1993 insertTodoType(Pointer);
2005 std::string DemangledName =
2009 bool HaveTypes =
false;
2027 for (User *U : CalledArg->
users()) {
2029 if ((ElemTy = deduceElementTypeHelper(Inst,
false)) !=
nullptr)
2035 HaveTypes |= ElemTy !=
nullptr;
2040 if (DemangledName.empty() && !HaveTypes)
2058 Type *ExpectedType =
2060 if (!ExpectedType && !DemangledName.empty())
2061 ExpectedType = SPIRV::parseBuiltinCallArgumentBaseType(
2062 DemangledName,
OpIdx,
I->getContext());
2063 if (!ExpectedType || ExpectedType->
isVoidTy())
2071 replacePointerOperandWithPtrCast(CI, ArgOperand, ExpectedType,
OpIdx,
B);
2075Instruction *SPIRVEmitIntrinsics::visitInsertElementInst(InsertElementInst &
I) {
2082 I.getOperand(1)->getType(),
2083 I.getOperand(2)->getType()};
2085 B.SetInsertPoint(&
I);
2087 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_insertelt, {
Types}, {
Args});
2088 replaceAllUsesWithAndErase(
B, &
I, NewI);
2093SPIRVEmitIntrinsics::visitExtractElementInst(ExtractElementInst &
I) {
2100 B.SetInsertPoint(&
I);
2102 I.getIndexOperand()->getType()};
2103 SmallVector<Value *, 2>
Args = {
I.getVectorOperand(),
I.getIndexOperand()};
2104 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_extractelt, {
Types}, {
Args});
2105 replaceAllUsesWithAndErase(
B, &
I, NewI);
2109Instruction *SPIRVEmitIntrinsics::visitInsertValueInst(InsertValueInst &
I) {
2111 B.SetInsertPoint(&
I);
2114 Value *AggregateOp =
I.getAggregateOperand();
2118 Args.push_back(AggregateOp);
2119 Args.push_back(
I.getInsertedValueOperand());
2120 for (
auto &
Op :
I.indices())
2121 Args.push_back(
B.getInt32(
Op));
2123 B.CreateIntrinsic(Intrinsic::spv_insertv, {
Types}, {
Args});
2124 replaceMemInstrUses(&
I, NewI,
B);
2128Instruction *SPIRVEmitIntrinsics::visitExtractValueInst(ExtractValueInst &
I) {
2129 if (
I.getAggregateOperand()->getType()->isAggregateType())
2132 B.SetInsertPoint(&
I);
2134 for (
auto &
Op :
I.indices())
2135 Args.push_back(
B.getInt32(
Op));
2137 B.CreateIntrinsic(Intrinsic::spv_extractv, {
I.getType()}, {
Args});
2138 replaceAllUsesWithAndErase(
B, &
I, NewI);
2142Instruction *SPIRVEmitIntrinsics::visitLoadInst(LoadInst &
I) {
2143 if (!
I.getType()->isAggregateType())
2146 B.SetInsertPoint(&
I);
2147 TrackConstants =
false;
2152 B.CreateIntrinsic(Intrinsic::spv_load, {
I.getOperand(0)->getType()},
2153 {
I.getPointerOperand(),
B.getInt16(Flags),
2154 B.getInt32(
I.getAlign().value())});
2155 replaceMemInstrUses(&
I, NewI,
B);
2159Instruction *SPIRVEmitIntrinsics::visitStoreInst(StoreInst &
I) {
2163 B.SetInsertPoint(&
I);
2164 TrackConstants =
false;
2168 auto *PtrOp =
I.getPointerOperand();
2170 if (
I.getValueOperand()->getType()->isAggregateType()) {
2178 "Unexpected argument of aggregate type, should be spv_extractv!");
2182 auto *NewI =
B.CreateIntrinsic(
2183 Intrinsic::spv_store, {
I.getValueOperand()->getType(), PtrOp->
getType()},
2184 {
I.getValueOperand(), PtrOp,
B.getInt16(Flags),
2185 B.getInt32(
I.getAlign().value())});
2187 I.eraseFromParent();
2191Instruction *SPIRVEmitIntrinsics::visitAllocaInst(AllocaInst &
I) {
2192 Value *ArraySize =
nullptr;
2193 if (
I.isArrayAllocation()) {
2196 SPIRV::Extension::SPV_INTEL_variable_length_array))
2198 "array allocation: this instruction requires the following "
2199 "SPIR-V extension: SPV_INTEL_variable_length_array",
2201 ArraySize =
I.getArraySize();
2204 B.SetInsertPoint(&
I);
2205 TrackConstants =
false;
2206 Type *PtrTy =
I.getType();
2209 ?
B.CreateIntrinsic(Intrinsic::spv_alloca_array,
2210 {PtrTy, ArraySize->
getType()},
2211 {ArraySize,
B.getInt32(
I.getAlign().value())})
2212 :
B.CreateIntrinsic(
Intrinsic::spv_alloca, {PtrTy},
2213 {
B.getInt32(
I.getAlign().value())});
2214 replaceAllUsesWithAndErase(
B, &
I, NewI);
2218Instruction *SPIRVEmitIntrinsics::visitAtomicCmpXchgInst(AtomicCmpXchgInst &
I) {
2219 assert(
I.getType()->isAggregateType() &&
"Aggregate result is expected");
2221 B.SetInsertPoint(&
I);
2223 Args.push_back(
B.getInt32(
2224 static_cast<uint32_t
>(
getMemScope(
I.getContext(),
I.getSyncScopeID()))));
2225 Args.push_back(
B.getInt32(
2227 Args.push_back(
B.getInt32(
2229 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_cmpxchg,
2230 {
I.getPointerOperand()->getType()}, {
Args});
2231 replaceMemInstrUses(&
I, NewI,
B);
2235Instruction *SPIRVEmitIntrinsics::visitUnreachableInst(UnreachableInst &
I) {
2237 B.SetInsertPoint(&
I);
2238 B.CreateIntrinsic(Intrinsic::spv_unreachable, {});
2247 static const StringSet<> ArtificialGlobals{
"llvm.global.annotations",
2248 "llvm.compiler.used",
"llvm.used"};
2253 auto &UserFunctions = GVUsers.getTransitiveUserFunctions(GV);
2254 if (UserFunctions.contains(
F))
2259 if (!UserFunctions.empty())
2264 const Module &M = *
F->getParent();
2265 const Function &FirstDefinition = *M.getFunctionDefs().
begin();
2266 return F == &FirstDefinition;
2269void SPIRVEmitIntrinsics::processGlobalValue(GlobalVariable &GV,
2280 deduceElementTypeHelper(&GV,
false);
2284 auto *InitInst =
B.CreateIntrinsic(Intrinsic::spv_init_global,
2286 InitInst->setArgOperand(1, Init);
2289 B.CreateIntrinsic(Intrinsic::spv_unref_global, GV.
getType(), &GV);
2295bool SPIRVEmitIntrinsics::insertAssignPtrTypeIntrs(Instruction *
I,
2297 bool UnknownElemTypeI8) {
2303 if (
Type *ElemTy = deduceElementType(
I, UnknownElemTypeI8)) {
2310void SPIRVEmitIntrinsics::insertAssignTypeIntrs(Instruction *
I,
2313 static StringMap<unsigned> ResTypeWellKnown = {
2314 {
"async_work_group_copy", WellKnownTypes::Event},
2315 {
"async_work_group_strided_copy", WellKnownTypes::Event},
2316 {
"__spirv_GroupAsyncCopy", WellKnownTypes::Event}};
2320 bool IsKnown =
false;
2325 std::string DemangledName =
2328 if (DemangledName.length() > 0)
2330 SPIRV::lookupBuiltinNameHelper(DemangledName, &DecorationId);
2331 auto ResIt = ResTypeWellKnown.
find(DemangledName);
2332 if (ResIt != ResTypeWellKnown.
end()) {
2335 switch (ResIt->second) {
2336 case WellKnownTypes::Event:
2343 switch (DecorationId) {
2346 case FPDecorationId::SAT:
2349 case FPDecorationId::RTE:
2351 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTE,
B);
2353 case FPDecorationId::RTZ:
2355 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTZ,
B);
2357 case FPDecorationId::RTP:
2359 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTP,
B);
2361 case FPDecorationId::RTN:
2363 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTN,
B);
2369 Type *Ty =
I->getType();
2372 Type *TypeToAssign = Ty;
2374 if (
II->getIntrinsicID() == Intrinsic::spv_const_composite ||
2375 II->getIntrinsicID() == Intrinsic::spv_undef) {
2376 auto It = AggrConstTypes.
find(
II);
2377 if (It == AggrConstTypes.
end())
2379 TypeToAssign = It->second;
2385 for (
const auto &
Op :
I->operands()) {
2392 Type *OpTy =
Op->getType();
2394 CallInst *AssignCI =
2399 Type *OpTy =
Op->getType();
2414 CallInst *AssignCI =
2424bool SPIRVEmitIntrinsics::shouldTryToAddMemAliasingDecoration(
2425 Instruction *Inst) {
2427 if (!STI->
canUseExtension(SPIRV::Extension::SPV_INTEL_memory_access_aliasing))
2438 case Intrinsic::spv_load:
2439 case Intrinsic::spv_store:
2446 const std::string
Prefix =
"__spirv_Atomic";
2447 const bool IsAtomic =
Name.find(Prefix) == 0;
2455void SPIRVEmitIntrinsics::insertSpirvDecorations(Instruction *
I,
2457 if (MDNode *MD =
I->getMetadata(
"spirv.Decorations")) {
2459 B.CreateIntrinsic(Intrinsic::spv_assign_decoration, {
I->getType()},
2464 auto processMemAliasingDecoration = [&](
unsigned Kind) {
2465 if (MDNode *AliasListMD =
I->getMetadata(Kind)) {
2466 if (shouldTryToAddMemAliasingDecoration(
I)) {
2467 uint32_t Dec =
Kind == LLVMContext::MD_alias_scope
2468 ? SPIRV::Decoration::AliasScopeINTEL
2469 : SPIRV::Decoration::NoAliasINTEL;
2471 I, ConstantInt::get(
B.getInt32Ty(), Dec),
2474 B.CreateIntrinsic(Intrinsic::spv_assign_aliasing_decoration,
2475 {
I->getType()}, {
Args});
2479 processMemAliasingDecoration(LLVMContext::MD_alias_scope);
2480 processMemAliasingDecoration(LLVMContext::MD_noalias);
2483 if (MDNode *MD =
I->getMetadata(LLVMContext::MD_fpmath)) {
2485 bool AllowFPMaxError =
2487 if (!AllowFPMaxError)
2491 B.CreateIntrinsic(Intrinsic::spv_assign_fpmaxerror_decoration,
2500 &FPFastMathDefaultInfoMap,
2502 auto it = FPFastMathDefaultInfoMap.
find(
F);
2503 if (it != FPFastMathDefaultInfoMap.
end())
2511 SPIRV::FPFastMathMode::None);
2513 SPIRV::FPFastMathMode::None);
2515 SPIRV::FPFastMathMode::None);
2516 return FPFastMathDefaultInfoMap[
F] = std::move(FPFastMathDefaultInfoVec);
2522 size_t BitWidth = Ty->getScalarSizeInBits();
2526 assert(Index >= 0 && Index < 3 &&
2527 "Expected FPFastMathDefaultInfo for half, float, or double");
2528 assert(FPFastMathDefaultInfoVec.
size() == 3 &&
2529 "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
2530 return FPFastMathDefaultInfoVec[Index];
2533void SPIRVEmitIntrinsics::insertConstantsForFPFastMathDefault(
Module &M) {
2535 if (!
ST->canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2))
2544 auto Node =
M.getNamedMetadata(
"spirv.ExecutionMode");
2546 if (!
M.getNamedMetadata(
"opencl.enable.FP_CONTRACT")) {
2554 ConstantInt::get(Type::getInt32Ty(
M.getContext()), 0);
2557 [[maybe_unused]] GlobalVariable *GV =
2558 new GlobalVariable(M,
2559 Type::getInt32Ty(
M.getContext()),
2573 DenseMap<Function *, SPIRV::FPFastMathDefaultInfoVector>
2574 FPFastMathDefaultInfoMap;
2576 for (
unsigned i = 0; i <
Node->getNumOperands(); i++) {
2585 if (EM == SPIRV::ExecutionMode::FPFastMathDefault) {
2587 "Expected 4 operands for FPFastMathDefault");
2593 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2595 SPIRV::FPFastMathDefaultInfo &
Info =
2598 Info.FPFastMathDefault =
true;
2599 }
else if (EM == SPIRV::ExecutionMode::ContractionOff) {
2601 "Expected no operands for ContractionOff");
2605 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2607 for (SPIRV::FPFastMathDefaultInfo &Info : FPFastMathDefaultInfoVec) {
2608 Info.ContractionOff =
true;
2610 }
else if (EM == SPIRV::ExecutionMode::SignedZeroInfNanPreserve) {
2612 "Expected 1 operand for SignedZeroInfNanPreserve");
2613 unsigned TargetWidth =
2618 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2622 assert(Index >= 0 && Index < 3 &&
2623 "Expected FPFastMathDefaultInfo for half, float, or double");
2624 assert(FPFastMathDefaultInfoVec.
size() == 3 &&
2625 "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
2626 FPFastMathDefaultInfoVec[
Index].SignedZeroInfNanPreserve =
true;
2630 std::unordered_map<unsigned, GlobalVariable *> GlobalVars;
2631 for (
auto &[Func, FPFastMathDefaultInfoVec] : FPFastMathDefaultInfoMap) {
2632 if (FPFastMathDefaultInfoVec.
empty())
2635 for (
const SPIRV::FPFastMathDefaultInfo &Info : FPFastMathDefaultInfoVec) {
2636 assert(
Info.Ty &&
"Expected target type for FPFastMathDefaultInfo");
2639 if (Flags == SPIRV::FPFastMathMode::None && !
Info.ContractionOff &&
2640 !
Info.SignedZeroInfNanPreserve && !
Info.FPFastMathDefault)
2644 if (
Info.ContractionOff && (Flags & SPIRV::FPFastMathMode::AllowContract))
2646 "and AllowContract");
2648 if (
Info.SignedZeroInfNanPreserve &&
2650 (SPIRV::FPFastMathMode::NotNaN | SPIRV::FPFastMathMode::NotInf |
2651 SPIRV::FPFastMathMode::NSZ))) {
2652 if (
Info.FPFastMathDefault)
2654 "SignedZeroInfNanPreserve but at least one of "
2655 "NotNaN/NotInf/NSZ is enabled.");
2658 if ((Flags & SPIRV::FPFastMathMode::AllowTransform) &&
2659 !((Flags & SPIRV::FPFastMathMode::AllowReassoc) &&
2660 (Flags & SPIRV::FPFastMathMode::AllowContract))) {
2662 "AllowTransform requires AllowReassoc and "
2663 "AllowContract to be set.");
2666 auto it = GlobalVars.find(Flags);
2667 GlobalVariable *GV =
nullptr;
2668 if (it != GlobalVars.end()) {
2674 ConstantInt::get(Type::getInt32Ty(
M.getContext()), Flags);
2677 GV =
new GlobalVariable(M,
2678 Type::getInt32Ty(
M.getContext()),
2683 GlobalVars[
Flags] = GV;
2689void SPIRVEmitIntrinsics::processInstrAfterVisit(Instruction *
I,
2692 bool IsConstComposite =
2693 II &&
II->getIntrinsicID() == Intrinsic::spv_const_composite;
2694 if (IsConstComposite && TrackConstants) {
2696 auto t = AggrConsts.
find(
I);
2700 {
II->getType(),
II->getType()}, t->second,
I, {},
B);
2702 NewOp->setArgOperand(0,
I);
2705 for (
const auto &
Op :
I->operands()) {
2709 unsigned OpNo =
Op.getOperandNo();
2710 if (
II && ((
II->getIntrinsicID() == Intrinsic::spv_gep && OpNo == 0) ||
2711 (
II->paramHasAttr(OpNo, Attribute::ImmArg))))
2715 IsPhi ?
B.SetInsertPointPastAllocas(
I->getParent()->getParent())
2716 :
B.SetInsertPoint(
I);
2719 Type *OpTy =
Op->getType();
2727 {OpTy, OpTyVal->
getType()},
Op, OpTyVal, {},
B);
2729 if (!IsConstComposite &&
isPointerTy(OpTy) && OpElemTy !=
nullptr &&
2730 OpElemTy != IntegerType::getInt8Ty(
I->getContext())) {
2732 SmallVector<Value *, 2>
Args = {
2735 CallInst *PtrCasted =
2736 B.CreateIntrinsic(Intrinsic::spv_ptrcast, {
Types},
Args);
2741 I->setOperand(OpNo, NewOp);
2743 if (Named.insert(
I).second)
2747Type *SPIRVEmitIntrinsics::deduceFunParamElementType(Function *
F,
2749 std::unordered_set<Function *> FVisited;
2750 return deduceFunParamElementType(
F,
OpIdx, FVisited);
2753Type *SPIRVEmitIntrinsics::deduceFunParamElementType(
2754 Function *
F,
unsigned OpIdx, std::unordered_set<Function *> &FVisited) {
2756 if (!FVisited.insert(
F).second)
2759 std::unordered_set<Value *> Visited;
2762 for (User *U :
F->users()) {
2774 if (
Type *Ty = deduceElementTypeHelper(OpArg, Visited,
false))
2777 for (User *OpU : OpArg->
users()) {
2779 if (!Inst || Inst == CI)
2782 if (
Type *Ty = deduceElementTypeHelper(Inst, Visited,
false))
2789 if (FVisited.find(OuterF) != FVisited.end())
2791 for (
unsigned i = 0; i < OuterF->
arg_size(); ++i) {
2792 if (OuterF->
getArg(i) == OpArg) {
2793 Lookup.push_back(std::make_pair(OuterF, i));
2800 for (
auto &Pair :
Lookup) {
2801 if (
Type *Ty = deduceFunParamElementType(Pair.first, Pair.second, FVisited))
2808void SPIRVEmitIntrinsics::processParamTypesByFunHeader(Function *
F,
2810 B.SetInsertPointPastAllocas(
F);
2824 for (User *U :
F->users()) {
2840 for (User *U : Arg->
users()) {
2844 CI->
getParent()->getParent() == CurrF) {
2846 deduceOperandElementTypeFunctionPointer(CI,
Ops, ElemTy,
false);
2857void SPIRVEmitIntrinsics::processParamTypes(Function *
F,
IRBuilder<> &
B) {
2858 B.SetInsertPointPastAllocas(
F);
2864 if (!ElemTy && (ElemTy = deduceFunParamElementType(
F,
OpIdx)) !=
nullptr) {
2866 DenseSet<std::pair<Value *, Value *>> VisitedSubst;
2868 propagateElemType(Arg, IntegerType::getInt8Ty(
F->getContext()),
2880 bool IsNewFTy =
false;
2896bool SPIRVEmitIntrinsics::processFunctionPointers(
Module &M) {
2899 if (
F.isIntrinsic())
2901 if (
F.isDeclaration()) {
2902 for (User *U :
F.users()) {
2915 for (User *U :
F.users()) {
2917 if (!
II ||
II->arg_size() != 3 ||
II->getOperand(0) != &
F)
2919 if (
II->getIntrinsicID() == Intrinsic::spv_assign_ptr_type ||
2920 II->getIntrinsicID() == Intrinsic::spv_ptrcast) {
2927 if (Worklist.
empty())
2930 LLVMContext &Ctx =
M.getContext();
2935 for (Function *
F : Worklist) {
2937 for (
const auto &Arg :
F->args())
2939 IRB.CreateCall(
F, Args);
2941 IRB.CreateRetVoid();
2947void SPIRVEmitIntrinsics::applyDemangledPtrArgTypes(
IRBuilder<> &
B) {
2948 DenseMap<Function *, CallInst *> Ptrcasts;
2949 for (
auto It : FDeclPtrTys) {
2951 for (
auto *U :
F->users()) {
2956 for (
auto [Idx, ElemTy] : It.second) {
2964 B.SetInsertPointPastAllocas(Arg->
getParent());
2968 }
else if (isaGEP(Param)) {
2969 replaceUsesOfWithSpvPtrcast(Param,
normalizeType(ElemTy), CI,
2978 .getFirstNonPHIOrDbgOrAlloca());
2999SPIRVEmitIntrinsics::simplifyZeroLengthArrayGepInst(GetElementPtrInst *
GEP) {
3006 Type *SrcTy =
GEP->getSourceElementType();
3007 SmallVector<Value *, 8> Indices(
GEP->indices());
3009 if (ArrTy && ArrTy->getNumElements() == 0 &&
3011 Indices.erase(Indices.begin());
3012 SrcTy = ArrTy->getElementType();
3014 GEP->getNoWrapFlags(),
"",
3015 GEP->getIterator());
3020void SPIRVEmitIntrinsics::emitUnstructuredLoopControls(Function &
F,
3026 if (!
ST->canUseExtension(
3027 SPIRV::Extension::SPV_INTEL_unstructured_loop_controls))
3030 for (BasicBlock &BB :
F) {
3032 MDNode *LoopMD =
Term->getMetadata(LLVMContext::MD_loop);
3038 unsigned LC =
Ops[0];
3039 if (LC == SPIRV::LoopControl::None)
3043 B.SetInsertPoint(Term);
3044 SmallVector<Value *, 4> IntrArgs;
3046 for (
unsigned I = 1;
I <
Ops.size(); ++
I)
3048 B.CreateIntrinsic(Intrinsic::spv_loop_control_intel, IntrArgs);
3052bool SPIRVEmitIntrinsics::runOnFunction(Function &Func) {
3053 if (
Func.isDeclaration())
3057 GR =
ST.getSPIRVGlobalRegistry();
3061 ST.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers);
3066 AggrConstTypes.
clear();
3071 SmallPtrSet<Instruction *, 4> DeadInsts;
3076 if ((!
GEP && !SGEP) || GR->findDeducedElementType(&
I))
3080 GR->addDeducedElementType(SGEP,
3085 GetElementPtrInst *NewGEP = simplifyZeroLengthArrayGepInst(
GEP);
3087 GEP->replaceAllUsesWith(NewGEP);
3091 if (
Type *GepTy = getGEPType(
GEP))
3095 for (
auto *
I : DeadInsts) {
3096 assert(
I->use_empty() &&
"Dead instruction should not have any uses left");
3097 I->eraseFromParent();
3100 processParamTypesByFunHeader(CurrF,
B);
3109 Type *ElTy =
SI->getValueOperand()->getType();
3114 B.SetInsertPoint(&
Func.getEntryBlock(),
Func.getEntryBlock().begin());
3115 for (
auto &GV :
Func.getParent()->globals())
3116 processGlobalValue(GV,
B);
3118 preprocessUndefs(
B);
3119 preprocessCompositeConstants(
B);
3123 applyDemangledPtrArgTypes(
B);
3126 for (
auto &
I : Worklist) {
3128 if (isConvergenceIntrinsic(
I))
3131 bool Postpone = insertAssignPtrTypeIntrs(
I,
B,
false);
3133 insertAssignTypeIntrs(
I,
B);
3134 insertPtrCastOrAssignTypeInstr(
I,
B);
3138 if (Postpone && !GR->findAssignPtrTypeInstr(
I))
3139 insertAssignPtrTypeIntrs(
I,
B,
true);
3142 useRoundingMode(FPI,
B);
3147 SmallPtrSet<Instruction *, 4> IncompleteRets;
3149 deduceOperandElementType(&
I, &IncompleteRets);
3153 for (BasicBlock &BB : Func)
3154 for (PHINode &Phi : BB.
phis())
3156 deduceOperandElementType(&Phi,
nullptr);
3158 for (
auto *
I : Worklist) {
3159 TrackConstants =
true;
3169 if (isConvergenceIntrinsic(
I))
3173 processInstrAfterVisit(
I,
B);
3176 emitUnstructuredLoopControls(Func,
B);
3182bool SPIRVEmitIntrinsics::postprocessTypes(
Module &M) {
3183 if (!GR || TodoTypeSz == 0)
3186 unsigned SzTodo = TodoTypeSz;
3187 DenseMap<Value *, SmallPtrSet<Value *, 4>> ToProcess;
3192 CallInst *AssignCI = GR->findAssignPtrTypeInstr(
Op);
3193 Type *KnownTy = GR->findDeducedElementType(
Op);
3194 if (!KnownTy || !AssignCI)
3200 std::unordered_set<Value *> Visited;
3201 if (
Type *ElemTy = deduceElementTypeHelper(
Op, Visited,
false,
true)) {
3202 if (ElemTy != KnownTy) {
3203 DenseSet<std::pair<Value *, Value *>> VisitedSubst;
3204 propagateElemType(CI, ElemTy, VisitedSubst);
3211 if (
Op->hasUseList()) {
3212 for (User *U :
Op->users()) {
3219 if (TodoTypeSz == 0)
3224 SmallPtrSet<Instruction *, 4> IncompleteRets;
3226 auto It = ToProcess.
find(&
I);
3227 if (It == ToProcess.
end())
3229 It->second.remove_if([
this](
Value *V) {
return !isTodoType(V); });
3230 if (It->second.size() == 0)
3232 deduceOperandElementType(&
I, &IncompleteRets, &It->second,
true);
3233 if (TodoTypeSz == 0)
3238 return SzTodo > TodoTypeSz;
3242void SPIRVEmitIntrinsics::parseFunDeclarations(
Module &M) {
3244 if (!
F.isDeclaration() ||
F.isIntrinsic())
3248 if (DemangledName.empty())
3252 auto [Grp, Opcode, ExtNo] = SPIRV::mapBuiltinToOpcode(
3253 DemangledName,
ST.getPreferredInstructionSet());
3254 if (Opcode != SPIRV::OpGroupAsyncCopy)
3257 SmallVector<unsigned> Idxs;
3266 LLVMContext &Ctx =
F.getContext();
3268 SPIRV::parseBuiltinTypeStr(TypeStrs, DemangledName, Ctx);
3269 if (!TypeStrs.
size())
3272 for (
unsigned Idx : Idxs) {
3273 if (Idx >= TypeStrs.
size())
3276 SPIRV::parseBuiltinCallArgumentType(TypeStrs[Idx].trim(), Ctx))
3279 FDeclPtrTys[&
F].push_back(std::make_pair(Idx, ElemTy));
3284bool SPIRVEmitIntrinsics::runOnModule(
Module &M) {
3287 parseFunDeclarations(M);
3288 insertConstantsForFPFastMathDefault(M);
3299 if (!
F.isDeclaration() && !
F.isIntrinsic()) {
3301 processParamTypes(&
F,
B);
3305 CanTodoType =
false;
3306 Changed |= postprocessTypes(M);
3309 Changed |= processFunctionPointers(M);
3315 return new SPIRVEmitIntrinsics(TM);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static void replaceAllUsesWith(Value *Old, Value *New, SmallPtrSet< BasicBlock *, 32 > &FreshBBs, bool IsHuge)
Replace all old uses with new ones, and push the updated BBs into FreshBBs.
static Type * getPointeeType(Value *Ptr, const DataLayout &DL)
This file defines the DenseSet and SmallDenseSet classes.
static bool runOnFunction(Function &F, bool PostInlining)
iv Induction Variable Users
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Machine Check Debug Module
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
static unsigned getNumElements(Type *Ty)
static bool isMemInstrToReplace(Instruction *I)
static bool isAggrConstForceInt32(const Value *V)
static SPIRV::FPFastMathDefaultInfoVector & getOrCreateFPFastMathDefaultInfoVec(const Module &M, DenseMap< Function *, SPIRV::FPFastMathDefaultInfoVector > &FPFastMathDefaultInfoMap, Function *F)
static Type * getAtomicElemTy(SPIRVGlobalRegistry *GR, Instruction *I, Value *PointerOperand)
static void reportFatalOnTokenType(const Instruction *I)
static void setInsertPointAfterDef(IRBuilder<> &B, Instruction *I)
static void emitAssignName(Instruction *I, IRBuilder<> &B)
static Type * getPointeeTypeByCallInst(StringRef DemangledName, Function *CalledF, unsigned OpIdx)
static void createRoundingModeDecoration(Instruction *I, unsigned RoundingModeDeco, IRBuilder<> &B)
static void createDecorationIntrinsic(Instruction *I, MDNode *Node, IRBuilder<> &B)
static SPIRV::FPFastMathDefaultInfo & getFPFastMathDefaultInfo(SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec, const Type *Ty)
static cl::opt< bool > SpirvEmitOpNames("spirv-emit-op-names", cl::desc("Emit OpName for all instructions"), cl::init(false))
static bool IsKernelArgInt8(Function *F, StoreInst *SI)
static void addSaturatedDecorationToIntrinsic(Instruction *I, IRBuilder<> &B)
static bool isFirstIndexZero(const GetElementPtrInst *GEP)
static void setInsertPointSkippingPhis(IRBuilder<> &B, Instruction *I)
static FunctionType * getFunctionPointerElemType(Function *F, SPIRVGlobalRegistry *GR)
static void createSaturatedConversionDecoration(Instruction *I, IRBuilder<> &B)
static bool shouldEmitIntrinsicsForGlobalValue(const GlobalVariableUsers &GVUsers, const GlobalVariable &GV, const Function *F)
static Type * restoreMutatedType(SPIRVGlobalRegistry *GR, Instruction *I, Type *Ty)
static bool requireAssignType(Instruction *I)
static void insertSpirvDecorations(MachineFunction &MF, SPIRVGlobalRegistry *GR, MachineIRBuilder MIB)
static void visit(BasicBlock &Start, std::function< bool(BasicBlock *)> op)
StringSet - A set-like wrapper for the StringMap.
static SymbolRef::Type getType(const Symbol *Sym)
LocallyHashedType DenseMapInfo< LocallyHashedType >::Empty
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
static int Lookup(ArrayRef< TableEntry > Table, unsigned Opcode)
This class represents an incoming formal argument to a Function.
const Function * getParent() const
static unsigned getPointerOperandIndex()
static unsigned getPointerOperandIndex()
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
const Function * getParent() const
Return the enclosing method, or null if none.
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
static LLVM_ABI BlockAddress * get(Function *F, BasicBlock *BB)
Return a BlockAddress for the specified function and basic block.
bool isInlineAsm() const
Check if this call is an inline asm statement.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
const DataLayout & getDataLayout() const
Get the data layout of the module this function belongs to.
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Type * getReturnType() const
Returns the type of the ret val.
Argument * getArg(unsigned i) const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static unsigned getPointerOperandIndex()
PointerType * getType() const
Global values are always pointers.
@ InternalLinkage
Rename collisions when linking (static functions).
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
LLVM_ABI void addDestination(BasicBlock *Dest)
Add a destination.
Base class for instruction visitors.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
This is an important class for using LLVM in a threaded context.
static unsigned getPointerOperandIndex()
const MDOperand & getOperand(unsigned I) const
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
unsigned getNumOperands() const
Return number of MDNode operands.
static LLVM_ABI MDString * get(LLVMContext &Context, StringRef Str)
Flags
Flags values. These may be or'd together.
ModulePass class - This class is used to implement unstructured interprocedural optimizations and ana...
A Module instance is used to store all the information related to an LLVM module.
void addAssignPtrTypeInstr(Value *Val, CallInst *AssignPtrTyCI)
void buildAssignPtr(IRBuilder<> &B, Type *ElemTy, Value *Arg)
Type * findDeducedCompositeType(const Value *Val)
void replaceAllUsesWith(Value *Old, Value *New, bool DeleteOld=true)
void addDeducedElementType(Value *Val, Type *Ty)
void addReturnType(const Function *ArgF, TypedPointerType *DerivedTy)
Type * findMutated(const Value *Val)
void addDeducedCompositeType(Value *Val, Type *Ty)
void buildAssignType(IRBuilder<> &B, Type *Ty, Value *Arg)
Type * findDeducedElementType(const Value *Val)
void updateAssignType(CallInst *AssignCI, Value *Arg, Value *OfType)
CallInst * findAssignPtrTypeInstr(const Value *Val)
const SPIRVTargetLowering * getTargetLowering() const override
bool isLogicalSPIRV() const
bool canUseExtension(SPIRV::Extension::Extension E) const
const SPIRVSubtarget * getSubtargetImpl() const
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
void assign(size_type NumElts, ValueParamT Elt)
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
static unsigned getPointerOperandIndex()
iterator find(StringRef Key)
StringRef - Represent a constant reference to a string, i.e.
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
StringSet - A wrapper for StringMap that provides set-like functionality.
bool contains(StringRef key) const
Check if the set contains the given key.
static LLVM_ABI StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
static unsigned getPointerOperandIndex()
static LLVM_ABI TargetExtType * get(LLVMContext &Context, StringRef Name, ArrayRef< Type * > Types={}, ArrayRef< unsigned > Ints={})
Return a target extension type having the specified name and optional type and integer parameters.
const STC & getSubtarget(const Function &F) const
This method returns a pointer to the specified type of TargetSubtargetInfo.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isArrayTy() const
True if this is an instance of ArrayType.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
bool isPointerTy() const
True if this is an instance of PointerType.
Type * getArrayElementType() const
LLVM_ABI StringRef getTargetExtName() const
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
bool isStructTy() const
True if this is an instance of StructType.
bool isTargetExtTy() const
Return true if this is a target extension type.
bool isAggregateType() const
Return true if the type is an aggregate type.
static LLVM_ABI Type * getDoubleTy(LLVMContext &C)
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
bool isVoidTy() const
Return true if this is 'void'.
static LLVM_ABI bool isValidElementType(Type *ElemTy)
Return true if the specified type is valid as a element type.
static LLVM_ABI TypedPointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
void setOperand(unsigned i, Value *Val)
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
user_iterator user_begin()
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
iterator_range< user_iterator > users()
void mutateType(Type *Ty)
Mutate the type of this Value to be of the specified type.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
std::pair< iterator, bool > insert(const ValueT &V)
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
const ParentTy * getParent() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ SPIR_KERNEL
Used for SPIR kernel functions.
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
bool match(Val *V, const Pattern &P)
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
initializer< Ty > init(const Ty &Val)
@ User
could "use" a pointer
NodeAddr< PhiNode * > Phi
NodeAddr< NodeBase * > Node
NodeAddr< FuncNode * > Func
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
FunctionAddr VTableAddr Value
bool isTypedPointerWrapper(const TargetExtType *ExtTy)
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
ModulePass * createSPIRVEmitIntrinsicsPass(SPIRVTargetMachine *TM)
unsigned getPointerAddressSpace(const Type *T)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
CallInst * buildIntrWithMD(Intrinsic::ID IntrID, ArrayRef< Type * > Types, Value *Arg, Value *Arg2, ArrayRef< Constant * > Imms, IRBuilder<> &B)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
bool isNestedPointer(const Type *Ty)
Function * getOrCreateBackendServiceFunction(Module &M)
MetadataAsValue * buildMD(Value *Arg)
std::string getOclOrSpirvBuiltinDemangledName(StringRef Name)
SmallVector< unsigned, 1 > getSpirvLoopControlOperandsFromLoopMetadata(MDNode *LoopMD)
auto reverse(ContainerTy &&C)
Type * getTypedPointerWrapper(Type *ElemTy, unsigned AS)
bool isPointerTy(const Type *T)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
bool set_union(S1Ty &S1, const S2Ty &S2)
set_union(A, B) - Compute A := A u B, return whether A changed.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id)
@ Ref
The access may reference the value stored in memory.
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
DWARFExpression::Operation Op
Type * getPointeeTypeByAttr(Argument *Arg)
bool hasPointeeTypeAttr(Argument *Arg)
constexpr unsigned BitWidth
bool isEquivalentTypes(Type *Ty1, Type *Ty2)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
bool hasInitializer(const GlobalVariable *GV)
Type * normalizeType(Type *Ty)
@ Enabled
Convert any .debug_str_offsets tables to DWARF64 if needed.
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
PoisonValue * getNormalizedPoisonValue(Type *Ty)
bool isUntypedPointerTy(const Type *T)
Type * reconstitutePeeledArrayType(Type *Ty)
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
static size_t computeFPFastMathDefaultInfoVecIndex(size_t BitWidth)