32#include "llvm/IR/IntrinsicsSPIRV.h"
36#define DEBUG_TYPE "spirv-isel"
43 std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>;
48 std::optional<Register> Bias;
49 std::optional<Register>
Offset;
50 std::optional<Register> MinLod;
51 std::optional<Register> GradX;
52 std::optional<Register> GradY;
53 std::optional<Register> Lod;
54 std::optional<Register> Compare;
57llvm::SPIRV::SelectionControl::SelectionControl
58getSelectionOperandForImm(
int Imm) {
60 return SPIRV::SelectionControl::Flatten;
62 return SPIRV::SelectionControl::DontFlatten;
64 return SPIRV::SelectionControl::None;
68#define GET_GLOBALISEL_PREDICATE_BITSET
69#include "SPIRVGenGlobalISel.inc"
70#undef GET_GLOBALISEL_PREDICATE_BITSET
97#define GET_GLOBALISEL_PREDICATES_DECL
98#include "SPIRVGenGlobalISel.inc"
99#undef GET_GLOBALISEL_PREDICATES_DECL
101#define GET_GLOBALISEL_TEMPORARIES_DECL
102#include "SPIRVGenGlobalISel.inc"
103#undef GET_GLOBALISEL_TEMPORARIES_DECL
127 unsigned BitSetOpcode)
const;
131 unsigned BitSetOpcode)
const;
135 unsigned BitSetOpcode,
bool SwapPrimarySide)
const;
139 unsigned BitSetOpcode,
140 bool SwapPrimarySide)
const;
147 unsigned Opcode)
const;
150 unsigned Opcode)
const;
169 unsigned NewOpcode,
unsigned NegateOpcode = 0)
const;
180 unsigned OpType)
const;
229 template <
bool Signed>
232 template <
bool Signed>
239 template <
typename PickOpcodeFn>
242 PickOpcodeFn &&PickOpcode)
const;
256 template <
typename PickOpcodeFn>
259 PickOpcodeFn &&PickOpcode)
const;
274 bool IsSigned)
const;
276 bool IsSigned,
unsigned Opcode)
const;
278 bool IsSigned)
const;
284 bool IsSigned)
const;
317 GL::GLSLExtInst GLInst,
bool setMIFlags =
true,
318 bool useMISrc =
true,
320 bool selectExtInst(
Register ResVReg, SPIRVTypeInst ResType, MachineInstr &
I,
321 CL::OpenCLExtInst CLInst,
bool setMIFlags =
true,
322 bool useMISrc =
true,
324 bool selectExtInst(
Register ResVReg, SPIRVTypeInst ResType, MachineInstr &
I,
325 CL::OpenCLExtInst CLInst, GL::GLSLExtInst GLInst,
326 bool setMIFlags =
true,
bool useMISrc =
true,
328 bool selectExtInst(
Register ResVReg, SPIRVTypeInst ResType, MachineInstr &
I,
329 const ExtInstList &ExtInsts,
bool setMIFlags =
true,
330 bool useMISrc =
true,
333 bool selectLog10(
Register ResVReg, SPIRVTypeInst ResType,
334 MachineInstr &
I)
const;
336 bool selectSaturate(
Register ResVReg, SPIRVTypeInst ResType,
337 MachineInstr &
I)
const;
339 bool selectWaveOpInst(
Register ResVReg, SPIRVTypeInst ResType,
340 MachineInstr &
I,
unsigned Opcode)
const;
342 bool selectWaveActiveCountBits(
Register ResVReg, SPIRVTypeInst ResType,
343 MachineInstr &
I)
const;
345 bool selectWaveActiveAllEqual(
Register ResVReg, SPIRVTypeInst ResType,
346 MachineInstr &
I)
const;
350 bool selectHandleFromBinding(
Register &ResVReg, SPIRVTypeInst ResType,
351 MachineInstr &
I)
const;
353 bool selectCounterHandleFromBinding(
Register &ResVReg, SPIRVTypeInst ResType,
354 MachineInstr &
I)
const;
356 bool selectReadImageIntrinsic(
Register &ResVReg, SPIRVTypeInst ResType,
357 MachineInstr &
I)
const;
358 bool selectSampleBasicIntrinsic(
Register &ResVReg, SPIRVTypeInst ResType,
359 MachineInstr &
I)
const;
360 bool selectSampleBiasIntrinsic(
Register &ResVReg, SPIRVTypeInst ResType,
361 MachineInstr &
I)
const;
362 bool selectSampleGradIntrinsic(
Register &ResVReg, SPIRVTypeInst ResType,
363 MachineInstr &
I)
const;
364 bool selectSampleLevelIntrinsic(
Register &ResVReg, SPIRVTypeInst ResType,
365 MachineInstr &
I)
const;
366 bool selectSampleCmpIntrinsic(
Register &ResVReg, SPIRVTypeInst ResType,
367 MachineInstr &
I)
const;
368 bool selectSampleCmpLevelZeroIntrinsic(
Register &ResVReg,
369 SPIRVTypeInst ResType,
370 MachineInstr &
I)
const;
371 bool selectGatherIntrinsic(
Register &ResVReg, SPIRVTypeInst ResType,
372 MachineInstr &
I)
const;
373 bool selectImageWriteIntrinsic(MachineInstr &
I)
const;
374 bool selectResourceGetPointer(
Register &ResVReg, SPIRVTypeInst ResType,
375 MachineInstr &
I)
const;
376 bool selectPushConstantGetPointer(
Register &ResVReg, SPIRVTypeInst ResType,
377 MachineInstr &
I)
const;
378 bool selectResourceNonUniformIndex(
Register &ResVReg, SPIRVTypeInst ResType,
379 MachineInstr &
I)
const;
380 bool selectModf(
Register ResVReg, SPIRVTypeInst ResType,
381 MachineInstr &
I)
const;
382 bool selectUpdateCounter(
Register &ResVReg, SPIRVTypeInst ResType,
383 MachineInstr &
I)
const;
384 bool selectFrexp(
Register ResVReg, SPIRVTypeInst ResType,
385 MachineInstr &
I)
const;
386 bool selectSincos(
Register ResVReg, SPIRVTypeInst ResType,
387 MachineInstr &
I)
const;
388 bool selectExp10(
Register ResVReg, SPIRVTypeInst ResType,
389 MachineInstr &
I)
const;
390 bool selectDerivativeInst(
Register ResVReg, SPIRVTypeInst ResType,
391 MachineInstr &
I,
const unsigned DPdOpCode)
const;
393 Register buildI32Constant(uint32_t Val, MachineInstr &
I,
394 SPIRVTypeInst ResType =
nullptr)
const;
396 Register buildZerosVal(SPIRVTypeInst ResType, MachineInstr &
I)
const;
397 bool isScalarOrVectorIntConstantZero(
Register Reg)
const;
398 Register buildZerosValF(SPIRVTypeInst ResType, MachineInstr &
I)
const;
400 MachineInstr &
I)
const;
401 Register buildOnesValF(SPIRVTypeInst ResType, MachineInstr &
I)
const;
403 bool wrapIntoSpecConstantOp(MachineInstr &
I,
406 Register getUcharPtrTypeReg(MachineInstr &
I,
407 SPIRV::StorageClass::StorageClass SC)
const;
408 MachineInstrBuilder buildSpecConstantOp(MachineInstr &
I,
Register Dest,
410 uint32_t Opcode)
const;
411 MachineInstrBuilder buildConstGenericPtr(MachineInstr &
I,
Register SrcPtr,
412 SPIRVTypeInst SrcPtrTy)
const;
413 Register buildPointerToResource(SPIRVTypeInst ResType,
414 SPIRV::StorageClass::StorageClass SC,
415 uint32_t Set, uint32_t
Binding,
416 uint32_t ArraySize,
Register IndexReg,
418 MachineIRBuilder MIRBuilder)
const;
419 SPIRVTypeInst widenTypeToVec4(SPIRVTypeInst
Type, MachineInstr &
I)
const;
420 bool extractSubvector(
Register &ResVReg, SPIRVTypeInst ResType,
421 Register &ReadReg, MachineInstr &InsertionPoint)
const;
422 bool generateImageReadOrFetch(
Register &ResVReg, SPIRVTypeInst ResType,
424 DebugLoc Loc, MachineInstr &Pos)
const;
425 bool generateSampleImage(
Register ResVReg, SPIRVTypeInst ResType,
427 Register CoordinateReg,
const ImageOperands &ImOps,
430 bool loadVec3BuiltinInputID(SPIRV::BuiltIn::BuiltIn BuiltInValue,
431 Register ResVReg, SPIRVTypeInst ResType,
432 MachineInstr &
I)
const;
433 bool loadBuiltinInputID(SPIRV::BuiltIn::BuiltIn BuiltInValue,
434 Register ResVReg, SPIRVTypeInst ResType,
435 MachineInstr &
I)
const;
436 bool loadHandleBeforePosition(
Register &HandleReg, SPIRVTypeInst ResType,
437 GIntrinsic &HandleDef, MachineInstr &Pos)
const;
438 void decorateUsesAsNonUniform(
Register &NonUniformReg)
const;
439 void errorIfInstrOutsideShader(MachineInstr &
I)
const;
442bool sampledTypeIsSignedInteger(
const llvm::Type *HandleType) {
444 if (
TET->getTargetExtName() ==
"spirv.Image") {
447 assert(
TET->getTargetExtName() ==
"spirv.SignedImage");
448 return TET->getTypeParameter(0)->isIntegerTy();
452#define GET_GLOBALISEL_IMPL
453#include "SPIRVGenGlobalISel.inc"
454#undef GET_GLOBALISEL_IMPL
460 TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
463#include
"SPIRVGenGlobalISel.inc"
466#include
"SPIRVGenGlobalISel.inc"
478 InstructionSelector::setupMF(MF, VT, CoverageInfo, PSI, BFI);
482void SPIRVInstructionSelector::resetVRegsType(MachineFunction &MF) {
483 if (HasVRegsReset == &MF)
498 for (
const auto &
MBB : MF) {
499 for (
const auto &
MI :
MBB) {
502 if (
MI.getOpcode() != SPIRV::ASSIGN_TYPE)
506 LLT DstType = MRI.
getType(DstReg);
508 LLT SrcType = MRI.
getType(SrcReg);
509 if (DstType != SrcType)
514 if (DstRC != SrcRC && SrcRC)
526 while (!Stack.empty()) {
531 switch (
MI->getOpcode()) {
532 case TargetOpcode::G_INTRINSIC:
533 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
534 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
536 Intrinsic::spv_const_composite)
539 case TargetOpcode::G_BUILD_VECTOR:
540 case TargetOpcode::G_SPLAT_VECTOR:
542 i < OpDef->getNumOperands(); i++) {
547 Stack.push_back(OpNestedDef);
550 case TargetOpcode::G_CONSTANT:
551 case TargetOpcode::G_FCONSTANT:
552 case TargetOpcode::G_IMPLICIT_DEF:
553 case SPIRV::OpConstantTrue:
554 case SPIRV::OpConstantFalse:
555 case SPIRV::OpConstantI:
556 case SPIRV::OpConstantF:
557 case SPIRV::OpConstantComposite:
558 case SPIRV::OpConstantCompositeContinuedINTEL:
559 case SPIRV::OpConstantSampler:
560 case SPIRV::OpConstantNull:
562 case SPIRV::OpConstantFunctionPointerINTEL:
589 case Intrinsic::spv_all:
590 case Intrinsic::spv_alloca:
591 case Intrinsic::spv_any:
592 case Intrinsic::spv_bitcast:
593 case Intrinsic::spv_const_composite:
594 case Intrinsic::spv_cross:
595 case Intrinsic::spv_degrees:
596 case Intrinsic::spv_distance:
597 case Intrinsic::spv_extractelt:
598 case Intrinsic::spv_extractv:
599 case Intrinsic::spv_faceforward:
600 case Intrinsic::spv_fdot:
601 case Intrinsic::spv_firstbitlow:
602 case Intrinsic::spv_firstbitshigh:
603 case Intrinsic::spv_firstbituhigh:
604 case Intrinsic::spv_frac:
605 case Intrinsic::spv_gep:
606 case Intrinsic::spv_global_offset:
607 case Intrinsic::spv_global_size:
608 case Intrinsic::spv_group_id:
609 case Intrinsic::spv_insertelt:
610 case Intrinsic::spv_insertv:
611 case Intrinsic::spv_isinf:
612 case Intrinsic::spv_isnan:
613 case Intrinsic::spv_lerp:
614 case Intrinsic::spv_length:
615 case Intrinsic::spv_normalize:
616 case Intrinsic::spv_num_subgroups:
617 case Intrinsic::spv_num_workgroups:
618 case Intrinsic::spv_ptrcast:
619 case Intrinsic::spv_radians:
620 case Intrinsic::spv_reflect:
621 case Intrinsic::spv_refract:
622 case Intrinsic::spv_resource_getpointer:
623 case Intrinsic::spv_resource_handlefrombinding:
624 case Intrinsic::spv_resource_handlefromimplicitbinding:
625 case Intrinsic::spv_resource_nonuniformindex:
626 case Intrinsic::spv_resource_sample:
627 case Intrinsic::spv_rsqrt:
628 case Intrinsic::spv_saturate:
629 case Intrinsic::spv_sdot:
630 case Intrinsic::spv_sign:
631 case Intrinsic::spv_smoothstep:
632 case Intrinsic::spv_step:
633 case Intrinsic::spv_subgroup_id:
634 case Intrinsic::spv_subgroup_local_invocation_id:
635 case Intrinsic::spv_subgroup_max_size:
636 case Intrinsic::spv_subgroup_size:
637 case Intrinsic::spv_thread_id:
638 case Intrinsic::spv_thread_id_in_group:
639 case Intrinsic::spv_udot:
640 case Intrinsic::spv_undef:
641 case Intrinsic::spv_value_md:
642 case Intrinsic::spv_workgroup_size:
654 case SPIRV::OpTypeVoid:
655 case SPIRV::OpTypeBool:
656 case SPIRV::OpTypeInt:
657 case SPIRV::OpTypeFloat:
658 case SPIRV::OpTypeVector:
659 case SPIRV::OpTypeMatrix:
660 case SPIRV::OpTypeImage:
661 case SPIRV::OpTypeSampler:
662 case SPIRV::OpTypeSampledImage:
663 case SPIRV::OpTypeArray:
664 case SPIRV::OpTypeRuntimeArray:
665 case SPIRV::OpTypeStruct:
666 case SPIRV::OpTypeOpaque:
667 case SPIRV::OpTypePointer:
668 case SPIRV::OpTypeFunction:
669 case SPIRV::OpTypeEvent:
670 case SPIRV::OpTypeDeviceEvent:
671 case SPIRV::OpTypeReserveId:
672 case SPIRV::OpTypeQueue:
673 case SPIRV::OpTypePipe:
674 case SPIRV::OpTypeForwardPointer:
675 case SPIRV::OpTypePipeStorage:
676 case SPIRV::OpTypeNamedBarrier:
677 case SPIRV::OpTypeAccelerationStructureNV:
678 case SPIRV::OpTypeCooperativeMatrixNV:
679 case SPIRV::OpTypeCooperativeMatrixKHR:
689 if (
MI.getNumDefs() == 0)
692 for (
const auto &MO :
MI.all_defs()) {
694 if (
Reg.isPhysical()) {
699 if (
UseMI.getOpcode() != SPIRV::OpName) {
706 if (
MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE ||
MI.isFakeUse() ||
707 MI.isLifetimeMarker()) {
710 <<
"Not dead: Opcode is LOCAL_ESCAPE, fake use, or lifetime marker.\n");
721 if (
MI.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
722 MI.getOpcode() == TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS) {
725 LLVM_DEBUG(
dbgs() <<
"Dead: Intrinsic with no real side effects.\n");
730 if (
MI.mayStore() ||
MI.isCall() ||
731 (
MI.mayLoad() &&
MI.hasOrderedMemoryRef()) ||
MI.isPosition() ||
732 MI.isDebugInstr() ||
MI.isTerminator() ||
MI.isJumpTableDebugInfo()) {
733 LLVM_DEBUG(
dbgs() <<
"Not dead: instruction has side effects.\n");
744 LLVM_DEBUG(
dbgs() <<
"Dead: known opcode with no side effects\n");
751void SPIRVInstructionSelector::removeOpNamesForDeadMI(MachineInstr &
MI)
const {
753 for (
const auto &MO :
MI.all_defs()) {
757 SmallVector<MachineInstr *, 4> UselessOpNames;
760 "There is still a use of the dead function.");
763 for (MachineInstr *OpNameMI : UselessOpNames) {
765 OpNameMI->eraseFromParent();
770void SPIRVInstructionSelector::removeDeadInstruction(MachineInstr &
MI)
const {
773 removeOpNamesForDeadMI(
MI);
774 MI.eraseFromParent();
777bool SPIRVInstructionSelector::select(MachineInstr &
I) {
778 resetVRegsType(*
I.getParent()->getParent());
780 assert(
I.getParent() &&
"Instruction should be in a basic block!");
781 assert(
I.getParent()->getParent() &&
"Instruction should be in a function!");
786 removeDeadInstruction(
I);
793 if (Opcode == SPIRV::ASSIGN_TYPE) {
794 Register DstReg =
I.getOperand(0).getReg();
795 Register SrcReg =
I.getOperand(1).getReg();
798 Def->getOpcode() != TargetOpcode::G_CONSTANT &&
799 Def->getOpcode() != TargetOpcode::G_FCONSTANT) {
800 if (
Def->getOpcode() == TargetOpcode::G_SELECT) {
801 Register SelectDstReg =
Def->getOperand(0).getReg();
802 bool SuccessToSelectSelect [[maybe_unused]] = selectSelect(
804 assert(SuccessToSelectSelect);
806 Def->eraseFromParent();
813 bool Res = selectImpl(
I, *CoverageInfo);
815 if (!Res &&
Def->getOpcode() != TargetOpcode::G_CONSTANT) {
816 dbgs() <<
"Unexpected pattern in ASSIGN_TYPE.\nInstruction: ";
820 assert(Res ||
Def->getOpcode() == TargetOpcode::G_CONSTANT);
832 }
else if (
I.getNumDefs() == 1) {
844 removeDeadInstruction(
I);
849 if (
I.getNumOperands() !=
I.getNumExplicitOperands()) {
850 LLVM_DEBUG(
errs() <<
"Generic instr has unexpected implicit operands\n");
856 bool HasDefs =
I.getNumDefs() > 0;
859 assert(!HasDefs || ResType ||
I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
860 I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
861 if (spvSelect(ResVReg, ResType,
I)) {
863 for (
unsigned i = 0; i <
I.getNumDefs(); ++i)
874 case TargetOpcode::G_CONSTANT:
875 case TargetOpcode::G_FCONSTANT:
877 case TargetOpcode::G_SADDO:
878 case TargetOpcode::G_SSUBO:
885 MachineInstr &
I)
const {
888 if (DstRC != SrcRC && SrcRC)
890 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(TargetOpcode::COPY))
897bool SPIRVInstructionSelector::spvSelect(
Register ResVReg,
898 SPIRVTypeInst ResType,
899 MachineInstr &
I)
const {
900 const unsigned Opcode =
I.getOpcode();
902 return selectImpl(
I, *CoverageInfo);
904 case TargetOpcode::G_CONSTANT:
905 case TargetOpcode::G_FCONSTANT:
906 return selectConst(ResVReg, ResType,
I);
907 case TargetOpcode::G_GLOBAL_VALUE:
908 return selectGlobalValue(ResVReg,
I);
909 case TargetOpcode::G_IMPLICIT_DEF:
910 return selectOpUndef(ResVReg, ResType,
I);
911 case TargetOpcode::G_FREEZE:
912 return selectFreeze(ResVReg, ResType,
I);
914 case TargetOpcode::G_INTRINSIC:
915 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
916 case TargetOpcode::G_INTRINSIC_CONVERGENT:
917 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
918 return selectIntrinsic(ResVReg, ResType,
I);
919 case TargetOpcode::G_BITREVERSE:
920 return selectBitreverse(ResVReg, ResType,
I);
922 case TargetOpcode::G_BUILD_VECTOR:
923 return selectBuildVector(ResVReg, ResType,
I);
924 case TargetOpcode::G_SPLAT_VECTOR:
925 return selectSplatVector(ResVReg, ResType,
I);
927 case TargetOpcode::G_SHUFFLE_VECTOR: {
928 MachineBasicBlock &BB = *
I.getParent();
929 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorShuffle))
932 .
addUse(
I.getOperand(1).getReg())
933 .
addUse(
I.getOperand(2).getReg());
934 for (
auto V :
I.getOperand(3).getShuffleMask())
939 case TargetOpcode::G_MEMMOVE:
940 case TargetOpcode::G_MEMCPY:
941 case TargetOpcode::G_MEMSET:
942 return selectMemOperation(ResVReg,
I);
944 case TargetOpcode::G_ICMP:
945 return selectICmp(ResVReg, ResType,
I);
946 case TargetOpcode::G_FCMP:
947 return selectFCmp(ResVReg, ResType,
I);
949 case TargetOpcode::G_FRAME_INDEX:
950 return selectFrameIndex(ResVReg, ResType,
I);
952 case TargetOpcode::G_LOAD:
953 return selectLoad(ResVReg, ResType,
I);
954 case TargetOpcode::G_STORE:
955 return selectStore(
I);
957 case TargetOpcode::G_BR:
958 return selectBranch(
I);
959 case TargetOpcode::G_BRCOND:
960 return selectBranchCond(
I);
962 case TargetOpcode::G_PHI:
963 return selectPhi(ResVReg,
I);
965 case TargetOpcode::G_FPTOSI:
966 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToS);
967 case TargetOpcode::G_FPTOUI:
968 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToU);
970 case TargetOpcode::G_FPTOSI_SAT:
971 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToS);
972 case TargetOpcode::G_FPTOUI_SAT:
973 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToU);
975 case TargetOpcode::G_SITOFP:
976 return selectIToF(ResVReg, ResType,
I,
true, SPIRV::OpConvertSToF);
977 case TargetOpcode::G_UITOFP:
978 return selectIToF(ResVReg, ResType,
I,
false, SPIRV::OpConvertUToF);
980 case TargetOpcode::G_CTPOP:
981 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitCount);
982 case TargetOpcode::G_SMIN:
983 return selectExtInst(ResVReg, ResType,
I, CL::s_min, GL::SMin);
984 case TargetOpcode::G_UMIN:
985 return selectExtInst(ResVReg, ResType,
I, CL::u_min, GL::UMin);
987 case TargetOpcode::G_SMAX:
988 return selectExtInst(ResVReg, ResType,
I, CL::s_max, GL::SMax);
989 case TargetOpcode::G_UMAX:
990 return selectExtInst(ResVReg, ResType,
I, CL::u_max, GL::UMax);
992 case TargetOpcode::G_SCMP:
993 return selectSUCmp(ResVReg, ResType,
I,
true);
994 case TargetOpcode::G_UCMP:
995 return selectSUCmp(ResVReg, ResType,
I,
false);
996 case TargetOpcode::G_LROUND:
997 case TargetOpcode::G_LLROUND: {
1000 MRI->
setRegClass(regForLround, &SPIRV::iIDRegClass);
1002 regForLround, *(
I.getParent()->getParent()));
1004 CL::round, GL::Round,
false);
1006 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConvertFToS))
1013 case TargetOpcode::G_STRICT_FMA:
1014 case TargetOpcode::G_FMA: {
1017 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpFmaKHR))
1020 .
addUse(
I.getOperand(1).getReg())
1021 .
addUse(
I.getOperand(2).getReg())
1022 .
addUse(
I.getOperand(3).getReg())
1027 return selectExtInst(ResVReg, ResType,
I, CL::fma, GL::Fma);
1030 case TargetOpcode::G_STRICT_FLDEXP:
1031 return selectExtInst(ResVReg, ResType,
I, CL::ldexp);
1033 case TargetOpcode::G_FPOW:
1034 return selectExtInst(ResVReg, ResType,
I, CL::pow, GL::Pow);
1035 case TargetOpcode::G_FPOWI:
1036 return selectExtInst(ResVReg, ResType,
I, CL::pown);
1038 case TargetOpcode::G_FEXP:
1039 return selectExtInst(ResVReg, ResType,
I, CL::exp, GL::Exp);
1040 case TargetOpcode::G_FEXP2:
1041 return selectExtInst(ResVReg, ResType,
I, CL::exp2, GL::Exp2);
1042 case TargetOpcode::G_FEXP10:
1043 return selectExp10(ResVReg, ResType,
I);
1045 case TargetOpcode::G_FMODF:
1046 return selectModf(ResVReg, ResType,
I);
1047 case TargetOpcode::G_FSINCOS:
1048 return selectSincos(ResVReg, ResType,
I);
1050 case TargetOpcode::G_FLOG:
1051 return selectExtInst(ResVReg, ResType,
I, CL::log, GL::Log);
1052 case TargetOpcode::G_FLOG2:
1053 return selectExtInst(ResVReg, ResType,
I, CL::log2, GL::Log2);
1054 case TargetOpcode::G_FLOG10:
1055 return selectLog10(ResVReg, ResType,
I);
1057 case TargetOpcode::G_FABS:
1058 return selectExtInst(ResVReg, ResType,
I, CL::fabs, GL::FAbs);
1059 case TargetOpcode::G_ABS:
1060 return selectExtInst(ResVReg, ResType,
I, CL::s_abs, GL::SAbs);
1062 case TargetOpcode::G_FMINNUM:
1063 case TargetOpcode::G_FMINIMUM:
1064 return selectExtInst(ResVReg, ResType,
I, CL::fmin, GL::NMin);
1065 case TargetOpcode::G_FMAXNUM:
1066 case TargetOpcode::G_FMAXIMUM:
1067 return selectExtInst(ResVReg, ResType,
I, CL::fmax, GL::NMax);
1069 case TargetOpcode::G_FCOPYSIGN:
1070 return selectExtInst(ResVReg, ResType,
I, CL::copysign);
1072 case TargetOpcode::G_FCEIL:
1073 return selectExtInst(ResVReg, ResType,
I, CL::ceil, GL::Ceil);
1074 case TargetOpcode::G_FFLOOR:
1075 return selectExtInst(ResVReg, ResType,
I, CL::floor, GL::Floor);
1077 case TargetOpcode::G_FCOS:
1078 return selectExtInst(ResVReg, ResType,
I, CL::cos, GL::Cos);
1079 case TargetOpcode::G_FSIN:
1080 return selectExtInst(ResVReg, ResType,
I, CL::sin, GL::Sin);
1081 case TargetOpcode::G_FTAN:
1082 return selectExtInst(ResVReg, ResType,
I, CL::tan, GL::Tan);
1083 case TargetOpcode::G_FACOS:
1084 return selectExtInst(ResVReg, ResType,
I, CL::acos, GL::Acos);
1085 case TargetOpcode::G_FASIN:
1086 return selectExtInst(ResVReg, ResType,
I, CL::asin, GL::Asin);
1087 case TargetOpcode::G_FATAN:
1088 return selectExtInst(ResVReg, ResType,
I, CL::atan, GL::Atan);
1089 case TargetOpcode::G_FATAN2:
1090 return selectExtInst(ResVReg, ResType,
I, CL::atan2, GL::Atan2);
1091 case TargetOpcode::G_FCOSH:
1092 return selectExtInst(ResVReg, ResType,
I, CL::cosh, GL::Cosh);
1093 case TargetOpcode::G_FSINH:
1094 return selectExtInst(ResVReg, ResType,
I, CL::sinh, GL::Sinh);
1095 case TargetOpcode::G_FTANH:
1096 return selectExtInst(ResVReg, ResType,
I, CL::tanh, GL::Tanh);
1098 case TargetOpcode::G_STRICT_FSQRT:
1099 case TargetOpcode::G_FSQRT:
1100 return selectExtInst(ResVReg, ResType,
I, CL::sqrt, GL::Sqrt);
1102 case TargetOpcode::G_CTTZ:
1103 case TargetOpcode::G_CTTZ_ZERO_UNDEF:
1104 return selectExtInst(ResVReg, ResType,
I, CL::ctz);
1105 case TargetOpcode::G_CTLZ:
1106 case TargetOpcode::G_CTLZ_ZERO_UNDEF:
1107 return selectExtInst(ResVReg, ResType,
I, CL::clz);
1109 case TargetOpcode::G_INTRINSIC_ROUND:
1110 return selectExtInst(ResVReg, ResType,
I, CL::round, GL::Round);
1111 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
1112 return selectExtInst(ResVReg, ResType,
I, CL::rint, GL::RoundEven);
1113 case TargetOpcode::G_INTRINSIC_TRUNC:
1114 return selectExtInst(ResVReg, ResType,
I, CL::trunc, GL::Trunc);
1115 case TargetOpcode::G_FRINT:
1116 case TargetOpcode::G_FNEARBYINT:
1117 return selectExtInst(ResVReg, ResType,
I, CL::rint, GL::RoundEven);
1119 case TargetOpcode::G_SMULH:
1120 return selectExtInst(ResVReg, ResType,
I, CL::s_mul_hi);
1121 case TargetOpcode::G_UMULH:
1122 return selectExtInst(ResVReg, ResType,
I, CL::u_mul_hi);
1124 case TargetOpcode::G_SADDSAT:
1125 return selectExtInst(ResVReg, ResType,
I, CL::s_add_sat);
1126 case TargetOpcode::G_UADDSAT:
1127 return selectExtInst(ResVReg, ResType,
I, CL::u_add_sat);
1128 case TargetOpcode::G_SSUBSAT:
1129 return selectExtInst(ResVReg, ResType,
I, CL::s_sub_sat);
1130 case TargetOpcode::G_USUBSAT:
1131 return selectExtInst(ResVReg, ResType,
I, CL::u_sub_sat);
1133 case TargetOpcode::G_FFREXP:
1134 return selectFrexp(ResVReg, ResType,
I);
1136 case TargetOpcode::G_UADDO:
1137 return selectOverflowArith(ResVReg, ResType,
I,
1138 ResType->
getOpcode() == SPIRV::OpTypeVector
1139 ? SPIRV::OpIAddCarryV
1140 : SPIRV::OpIAddCarryS);
1141 case TargetOpcode::G_USUBO:
1142 return selectOverflowArith(ResVReg, ResType,
I,
1143 ResType->
getOpcode() == SPIRV::OpTypeVector
1144 ? SPIRV::OpISubBorrowV
1145 : SPIRV::OpISubBorrowS);
1146 case TargetOpcode::G_UMULO:
1147 return selectOverflowArith(ResVReg, ResType,
I, SPIRV::OpUMulExtended);
1148 case TargetOpcode::G_SMULO:
1149 return selectOverflowArith(ResVReg, ResType,
I, SPIRV::OpSMulExtended);
1151 case TargetOpcode::G_SEXT:
1152 return selectExt(ResVReg, ResType,
I,
true);
1153 case TargetOpcode::G_ANYEXT:
1154 case TargetOpcode::G_ZEXT:
1155 return selectExt(ResVReg, ResType,
I,
false);
1156 case TargetOpcode::G_TRUNC:
1157 return selectTrunc(ResVReg, ResType,
I);
1158 case TargetOpcode::G_FPTRUNC:
1159 case TargetOpcode::G_FPEXT:
1160 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpFConvert);
1162 case TargetOpcode::G_PTRTOINT:
1163 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertPtrToU);
1164 case TargetOpcode::G_INTTOPTR:
1165 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertUToPtr);
1166 case TargetOpcode::G_BITCAST:
1167 return selectBitcast(ResVReg, ResType,
I);
1168 case TargetOpcode::G_ADDRSPACE_CAST:
1169 return selectAddrSpaceCast(ResVReg, ResType,
I);
1170 case TargetOpcode::G_PTR_ADD: {
1172 assert(
I.getOperand(1).isReg() &&
I.getOperand(2).isReg());
1176 assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
1177 (*II).getOpcode() == TargetOpcode::COPY ||
1178 (*II).getOpcode() == SPIRV::OpVariable) &&
1179 getImm(
I.getOperand(2), MRI));
1181 bool IsGVInit =
false;
1185 UseIt != UseEnd; UseIt = std::next(UseIt)) {
1186 if ((*UseIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
1187 (*UseIt).getOpcode() == SPIRV::OpSpecConstantOp ||
1188 (*UseIt).getOpcode() == SPIRV::OpVariable) {
1198 if (GVPointeeType && ResPointeeType && GVPointeeType != ResPointeeType) {
1211 "incompatible result and operand types in a bitcast");
1213 MachineInstrBuilder MIB =
1214 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBitcast))
1221 : SPIRV::OpInBoundsPtrAccessChain))
1225 .
addUse(
I.getOperand(2).getReg())
1228 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSpecConstantOp))
1232 static_cast<uint32_t
>(SPIRV::Opcode::InBoundsPtrAccessChain))
1234 .
addUse(
I.getOperand(2).getReg())
1243 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSpecConstantOp))
1246 .
addImm(
static_cast<uint32_t
>(
1247 SPIRV::Opcode::InBoundsPtrAccessChain))
1250 .
addUse(
I.getOperand(2).getReg());
1255 case TargetOpcode::G_ATOMICRMW_OR:
1256 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicOr);
1257 case TargetOpcode::G_ATOMICRMW_ADD:
1258 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicIAdd);
1259 case TargetOpcode::G_ATOMICRMW_AND:
1260 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicAnd);
1261 case TargetOpcode::G_ATOMICRMW_MAX:
1262 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicSMax);
1263 case TargetOpcode::G_ATOMICRMW_MIN:
1264 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicSMin);
1265 case TargetOpcode::G_ATOMICRMW_SUB:
1266 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicISub);
1267 case TargetOpcode::G_ATOMICRMW_XOR:
1268 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicXor);
1269 case TargetOpcode::G_ATOMICRMW_UMAX:
1270 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicUMax);
1271 case TargetOpcode::G_ATOMICRMW_UMIN:
1272 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicUMin);
1273 case TargetOpcode::G_ATOMICRMW_XCHG:
1274 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicExchange);
1275 case TargetOpcode::G_ATOMIC_CMPXCHG:
1276 return selectAtomicCmpXchg(ResVReg, ResType,
I);
1278 case TargetOpcode::G_ATOMICRMW_FADD:
1279 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFAddEXT);
1280 case TargetOpcode::G_ATOMICRMW_FSUB:
1282 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFAddEXT,
1283 ResType->
getOpcode() == SPIRV::OpTypeVector
1285 : SPIRV::OpFNegate);
1286 case TargetOpcode::G_ATOMICRMW_FMIN:
1287 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFMinEXT);
1288 case TargetOpcode::G_ATOMICRMW_FMAX:
1289 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFMaxEXT);
1291 case TargetOpcode::G_FENCE:
1292 return selectFence(
I);
1294 case TargetOpcode::G_STACKSAVE:
1295 return selectStackSave(ResVReg, ResType,
I);
1296 case TargetOpcode::G_STACKRESTORE:
1297 return selectStackRestore(
I);
1299 case TargetOpcode::G_UNMERGE_VALUES:
1305 case TargetOpcode::G_TRAP:
1306 case TargetOpcode::G_UBSANTRAP:
1307 case TargetOpcode::DBG_LABEL:
1309 case TargetOpcode::G_DEBUGTRAP:
1310 return selectDebugTrap(ResVReg, ResType,
I);
1317bool SPIRVInstructionSelector::selectDebugTrap(
Register ResVReg,
1318 SPIRVTypeInst ResType,
1319 MachineInstr &
I)
const {
1320 unsigned Opcode = SPIRV::OpNop;
1327bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
1328 SPIRVTypeInst ResType,
1330 GL::GLSLExtInst GLInst,
1331 bool setMIFlags,
bool useMISrc,
1334 SPIRV::InstructionSet::InstructionSet::GLSL_std_450)) {
1335 std::string DiagMsg;
1336 raw_string_ostream OS(DiagMsg);
1337 I.print(OS,
true,
false,
false,
false);
1338 DiagMsg +=
" is only supported with the GLSL extended instruction set.\n";
1341 return selectExtInst(ResVReg, ResType,
I,
1342 {{SPIRV::InstructionSet::GLSL_std_450, GLInst}},
1343 setMIFlags, useMISrc, SrcRegs);
1346bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
1347 SPIRVTypeInst ResType,
1349 CL::OpenCLExtInst CLInst,
1350 bool setMIFlags,
bool useMISrc,
1352 return selectExtInst(ResVReg, ResType,
I,
1353 {{SPIRV::InstructionSet::OpenCL_std, CLInst}},
1354 setMIFlags, useMISrc, SrcRegs);
1357bool SPIRVInstructionSelector::selectExtInst(
1358 Register ResVReg, SPIRVTypeInst ResType, MachineInstr &
I,
1359 CL::OpenCLExtInst CLInst, GL::GLSLExtInst GLInst,
bool setMIFlags,
1361 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
1362 {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
1363 return selectExtInst(ResVReg, ResType,
I, ExtInsts, setMIFlags, useMISrc,
1367bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
1368 SPIRVTypeInst ResType,
1371 bool setMIFlags,
bool useMISrc,
1374 for (
const auto &[InstructionSet, Opcode] : Insts) {
1378 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1381 .
addImm(
static_cast<uint32_t
>(InstructionSet))
1386 const unsigned NumOps =
I.getNumOperands();
1389 I.getOperand(Index).getType() ==
1390 MachineOperand::MachineOperandType::MO_IntrinsicID)
1393 MIB.
add(
I.getOperand(Index));
1405bool SPIRVInstructionSelector::selectFrexp(
Register ResVReg,
1406 SPIRVTypeInst ResType,
1407 MachineInstr &
I)
const {
1408 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CL::frexp},
1409 {SPIRV::InstructionSet::GLSL_std_450, GL::Frexp}};
1410 for (
const auto &Ex : ExtInsts) {
1411 SPIRV::InstructionSet::InstructionSet
Set = Ex.first;
1412 uint32_t Opcode = Ex.second;
1416 MachineIRBuilder MIRBuilder(
I);
1419 PointeeTy, MIRBuilder, SPIRV::StorageClass::Function);
1424 BuildMI(*It->getParent(), It, It->getDebugLoc(),
TII.get(SPIRV::OpVariable))
1427 .
addImm(
static_cast<uint32_t
>(SPIRV::StorageClass::Function))
1430 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1433 .
addImm(
static_cast<uint32_t
>(Ex.first))
1435 .
add(
I.getOperand(2))
1439 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
1440 .
addDef(
I.getOperand(1).getReg())
1449bool SPIRVInstructionSelector::selectSincos(
Register ResVReg,
1450 SPIRVTypeInst ResType,
1451 MachineInstr &
I)
const {
1452 Register CosResVReg =
I.getOperand(1).getReg();
1453 unsigned SrcIdx =
I.getNumExplicitDefs();
1458 MachineIRBuilder MIRBuilder(
I);
1460 ResType, MIRBuilder, SPIRV::StorageClass::Function);
1465 BuildMI(*It->getParent(), It, It->getDebugLoc(),
TII.get(SPIRV::OpVariable))
1468 .
addImm(
static_cast<uint32_t
>(SPIRV::StorageClass::Function))
1470 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1473 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::OpenCL_std))
1475 .
add(
I.getOperand(SrcIdx))
1478 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
1486 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1489 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::GLSL_std_450))
1491 .
add(
I.getOperand(SrcIdx))
1493 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1496 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::GLSL_std_450))
1498 .
add(
I.getOperand(SrcIdx))
1505bool SPIRVInstructionSelector::selectOpWithSrcs(
Register ResVReg,
1506 SPIRVTypeInst ResType,
1508 std::vector<Register> Srcs,
1509 unsigned Opcode)
const {
1510 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
1520bool SPIRVInstructionSelector::selectUnOp(
Register ResVReg,
1521 SPIRVTypeInst ResType,
1523 unsigned Opcode)
const {
1525 Register SrcReg =
I.getOperand(1).getReg();
1530 unsigned DefOpCode = DefIt->getOpcode();
1531 if (DefOpCode == SPIRV::ASSIGN_TYPE || DefOpCode == TargetOpcode::COPY) {
1534 if (
auto *VRD =
getVRegDef(*MRI, DefIt->getOperand(1).getReg()))
1535 DefOpCode = VRD->getOpcode();
1537 if (DefOpCode == TargetOpcode::G_GLOBAL_VALUE ||
1538 DefOpCode == TargetOpcode::G_CONSTANT ||
1539 DefOpCode == SPIRV::OpVariable || DefOpCode == SPIRV::OpConstantI) {
1545 uint32_t SpecOpcode = 0;
1547 case SPIRV::OpConvertPtrToU:
1548 SpecOpcode =
static_cast<uint32_t
>(SPIRV::Opcode::ConvertPtrToU);
1550 case SPIRV::OpConvertUToPtr:
1551 SpecOpcode =
static_cast<uint32_t
>(SPIRV::Opcode::ConvertUToPtr);
1556 TII.get(SPIRV::OpSpecConstantOp))
1566 return selectOpWithSrcs(ResVReg, ResType,
I, {
I.getOperand(1).getReg()},
1570bool SPIRVInstructionSelector::selectBitcast(
Register ResVReg,
1571 SPIRVTypeInst ResType,
1572 MachineInstr &
I)
const {
1573 Register OpReg =
I.getOperand(1).getReg();
1574 SPIRVTypeInst OpType =
1578 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitcast);
1588 if (
MemOp->isVolatile())
1589 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
1590 if (
MemOp->isNonTemporal())
1591 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
1593 if (!ST->isShader() &&
MemOp->getAlign().value())
1594 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
1598 if (ST->canUseExtension(SPIRV::Extension::SPV_INTEL_memory_access_aliasing)) {
1599 if (
auto *MD =
MemOp->getAAInfo().Scope) {
1603 static_cast<uint32_t>(SPIRV::MemoryOperand::AliasScopeINTELMask);
1605 if (
auto *MD =
MemOp->getAAInfo().NoAlias) {
1609 static_cast<uint32_t>(SPIRV::MemoryOperand::NoAliasINTELMask);
1613 if (SpvMemOp !=
static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
1615 if (SpvMemOp &
static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
1627 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
1629 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
1631 if (SpvMemOp !=
static_cast<uint32_t>(SPIRV::MemoryOperand::None))
1635bool SPIRVInstructionSelector::selectLoad(
Register ResVReg,
1636 SPIRVTypeInst ResType,
1637 MachineInstr &
I)
const {
1639 Register Ptr =
I.getOperand(1 + OpOffset).getReg();
1644 IntPtrDef->getIntrinsicID() == Intrinsic::spv_resource_getpointer) {
1645 Register HandleReg = IntPtrDef->getOperand(2).getReg();
1647 if (HandleType->
getOpcode() == SPIRV::OpTypeImage) {
1651 if (!loadHandleBeforePosition(NewHandleReg, HandleType, *HandleDef,
I)) {
1655 Register IdxReg = IntPtrDef->getOperand(3).getReg();
1656 return generateImageReadOrFetch(ResVReg, ResType, NewHandleReg, IdxReg,
1657 I.getDebugLoc(),
I);
1661 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
1665 if (!
I.getNumMemOperands()) {
1666 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
1668 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
1671 MachineIRBuilder MIRBuilder(
I);
1678bool SPIRVInstructionSelector::selectStore(MachineInstr &
I)
const {
1680 Register StoreVal =
I.getOperand(0 + OpOffset).getReg();
1681 Register Ptr =
I.getOperand(1 + OpOffset).getReg();
1686 IntPtrDef->getIntrinsicID() == Intrinsic::spv_resource_getpointer) {
1687 Register HandleReg = IntPtrDef->getOperand(2).getReg();
1692 if (!loadHandleBeforePosition(NewHandleReg, HandleType, *HandleDef,
I)) {
1696 Register IdxReg = IntPtrDef->getOperand(3).getReg();
1697 if (HandleType->
getOpcode() == SPIRV::OpTypeImage) {
1698 auto BMI =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1699 TII.get(SPIRV::OpImageWrite))
1705 if (sampledTypeIsSignedInteger(LLVMHandleType))
1708 BMI.constrainAllUses(
TII,
TRI, RBI);
1714 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpStore))
1717 if (!
I.getNumMemOperands()) {
1718 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
1720 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
1723 MachineIRBuilder MIRBuilder(
I);
1730bool SPIRVInstructionSelector::selectStackSave(
Register ResVReg,
1731 SPIRVTypeInst ResType,
1732 MachineInstr &
I)
const {
1733 if (!STI.
canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
1735 "llvm.stacksave intrinsic: this instruction requires the following "
1736 "SPIR-V extension: SPV_INTEL_variable_length_array",
1739 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSaveMemoryINTEL))
1746bool SPIRVInstructionSelector::selectStackRestore(MachineInstr &
I)
const {
1747 if (!STI.
canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
1749 "llvm.stackrestore intrinsic: this instruction requires the following "
1750 "SPIR-V extension: SPV_INTEL_variable_length_array",
1752 if (!
I.getOperand(0).isReg())
1755 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpRestoreMemoryINTEL))
1756 .
addUse(
I.getOperand(0).getReg())
1762SPIRVInstructionSelector::getOrCreateMemSetGlobal(MachineInstr &
I)
const {
1763 MachineIRBuilder MIRBuilder(
I);
1764 assert(
I.getOperand(1).isReg() &&
I.getOperand(2).isReg());
1771 GlobalVariable *GV =
new GlobalVariable(*CurFunction.
getParent(), LLVMArrTy,
1775 Type *ValTy = Type::getInt8Ty(
I.getMF()->getFunction().getContext());
1776 Type *ArrTy = ArrayType::get(ValTy, Num);
1778 ArrTy, MIRBuilder, SPIRV::StorageClass::UniformConstant);
1781 ArrTy, MIRBuilder, SPIRV::AccessQualifier::None,
false);
1788 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVariable))
1791 .
addImm(SPIRV::StorageClass::UniformConstant)
1802bool SPIRVInstructionSelector::selectCopyMemory(MachineInstr &
I,
1805 Register DstReg =
I.getOperand(0).getReg();
1810 uint64_t CopySize =
getIConstVal(
I.getOperand(2).getReg(), MRI);
1815 "Unable to determine pointee type size for OpCopyMemory");
1816 const DataLayout &
DL =
I.getMF()->getFunction().getDataLayout();
1817 if (CopySize !=
DL.getTypeStoreSize(
const_cast<Type *
>(LLVMPointeeTy)))
1819 "OpCopyMemory requires the size to match the pointee type size");
1820 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCopyMemory))
1823 if (
I.getNumMemOperands()) {
1824 MachineIRBuilder MIRBuilder(
I);
1831bool SPIRVInstructionSelector::selectCopyMemorySized(MachineInstr &
I,
1834 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCopyMemorySized))
1835 .
addUse(
I.getOperand(0).getReg())
1837 .
addUse(
I.getOperand(2).getReg());
1838 if (
I.getNumMemOperands()) {
1839 MachineIRBuilder MIRBuilder(
I);
1846bool SPIRVInstructionSelector::selectMemOperation(
Register ResVReg,
1847 MachineInstr &
I)
const {
1848 Register SrcReg =
I.getOperand(1).getReg();
1849 if (
I.getOpcode() == TargetOpcode::G_MEMSET) {
1850 Register VarReg = getOrCreateMemSetGlobal(
I);
1853 Type *ValTy = Type::getInt8Ty(
I.getMF()->getFunction().getContext());
1855 ValTy,
I, SPIRV::StorageClass::UniformConstant);
1857 if (!selectOpWithSrcs(SrcReg, SourceTy,
I, {VarReg}, SPIRV::OpBitcast))
1861 if (!selectCopyMemory(
I, SrcReg))
1864 if (!selectCopyMemorySized(
I, SrcReg))
1867 if (ResVReg.
isValid() && ResVReg !=
I.getOperand(0).getReg())
1868 if (!BuildCOPY(ResVReg,
I.getOperand(0).getReg(),
I))
1873bool SPIRVInstructionSelector::selectAtomicRMW(
Register ResVReg,
1874 SPIRVTypeInst ResType,
1877 unsigned NegateOpcode)
const {
1879 const MachineMemOperand *MemOp = *
I.memoperands_begin();
1882 Register ScopeReg = buildI32Constant(Scope,
I);
1884 Register Ptr =
I.getOperand(1).getReg();
1890 Register MemSemReg = buildI32Constant(MemSem ,
I);
1892 Register ValueReg =
I.getOperand(2).getReg();
1893 if (NegateOpcode != 0) {
1896 if (!selectOpWithSrcs(TmpReg, ResType,
I, {ValueReg}, NegateOpcode))
1901 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(NewOpcode))
1912bool SPIRVInstructionSelector::selectUnmergeValues(MachineInstr &
I)
const {
1913 unsigned ArgI =
I.getNumOperands() - 1;
1915 I.getOperand(ArgI).isReg() ?
I.getOperand(ArgI).getReg() :
Register(0);
1916 SPIRVTypeInst SrcType =
1918 if (!SrcType || SrcType->
getOpcode() != SPIRV::OpTypeVector)
1920 "cannot select G_UNMERGE_VALUES with a non-vector argument");
1922 SPIRVTypeInst ScalarType =
1925 unsigned CurrentIndex = 0;
1926 for (
unsigned i = 0; i <
I.getNumDefs(); ++i) {
1927 Register ResVReg =
I.getOperand(i).getReg();
1930 LLT ResLLT = MRI->
getType(ResVReg);
1936 ResType = ScalarType;
1942 if (ResType->
getOpcode() == SPIRV::OpTypeVector) {
1945 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorShuffle))
1951 for (
unsigned j = 0;
j < NumElements; ++
j) {
1952 MIB.
addImm(CurrentIndex + j);
1954 CurrentIndex += NumElements;
1958 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
1970bool SPIRVInstructionSelector::selectFence(MachineInstr &
I)
const {
1973 Register MemSemReg = buildI32Constant(MemSem,
I);
1975 uint32_t
Scope =
static_cast<uint32_t
>(
1977 Register ScopeReg = buildI32Constant(Scope,
I);
1979 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpMemoryBarrier))
1986bool SPIRVInstructionSelector::selectOverflowArith(
Register ResVReg,
1987 SPIRVTypeInst ResType,
1989 unsigned Opcode)
const {
1990 Type *ResTy =
nullptr;
1994 "Not enough info to select the arithmetic with overflow instruction");
1997 "with overflow instruction");
2003 MachineIRBuilder MIRBuilder(
I);
2005 ResTy, MIRBuilder, SPIRV::AccessQualifier::ReadWrite,
false);
2006 assert(
I.getNumDefs() > 1 &&
"Not enought operands");
2012 Register ZeroReg = buildZerosVal(ResType,
I);
2017 if (ResName.
size() > 0)
2022 BuildMI(BB, MIRBuilder.getInsertPt(),
I.getDebugLoc(),
TII.get(Opcode))
2025 for (
unsigned i =
I.getNumDefs(); i <
I.getNumOperands(); ++i)
2026 MIB.
addUse(
I.getOperand(i).getReg());
2031 MRI->
setRegClass(HigherVReg, &SPIRV::iIDRegClass);
2032 for (
unsigned i = 0; i <
I.getNumDefs(); ++i) {
2034 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
2035 .
addDef(i == 1 ? HigherVReg :
I.getOperand(i).getReg())
2042 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpINotEqual))
2043 .
addDef(
I.getOperand(1).getReg())
2051bool SPIRVInstructionSelector::selectAtomicCmpXchg(
Register ResVReg,
2052 SPIRVTypeInst ResType,
2053 MachineInstr &
I)
const {
2057 Register Ptr =
I.getOperand(2).getReg();
2060 const MachineMemOperand *MemOp = *
I.memoperands_begin();
2063 ScopeReg = buildI32Constant(Scope,
I);
2065 unsigned ScSem =
static_cast<uint32_t
>(
2068 unsigned MemSemEq =
static_cast<uint32_t
>(
getMemSemantics(AO)) | ScSem;
2069 Register MemSemEqReg = buildI32Constant(MemSemEq,
I);
2071 unsigned MemSemNeq =
static_cast<uint32_t
>(
getMemSemantics(FO)) | ScSem;
2072 if (MemSemEq == MemSemNeq)
2073 MemSemNeqReg = MemSemEqReg;
2075 MemSemNeqReg = buildI32Constant(MemSemEq,
I);
2078 ScopeReg =
I.getOperand(5).getReg();
2079 MemSemEqReg =
I.getOperand(6).getReg();
2080 MemSemNeqReg =
I.getOperand(7).getReg();
2084 Register Val =
I.getOperand(4).getReg();
2088 BuildMI(*
I.getParent(),
I,
DL,
TII.get(SPIRV::OpAtomicCompareExchange))
2107 BuildMI(*
I.getParent(),
I,
DL,
TII.get(SPIRV::OpCompositeInsert))
2114 BuildMI(*
I.getParent(),
I,
DL,
TII.get(SPIRV::OpCompositeInsert))
2126 case SPIRV::StorageClass::DeviceOnlyINTEL:
2127 case SPIRV::StorageClass::HostOnlyINTEL:
2136 bool IsGRef =
false;
2137 bool IsAllowedRefs =
2139 unsigned Opcode = It.getOpcode();
2140 if (Opcode == SPIRV::OpConstantComposite ||
2141 Opcode == SPIRV::OpVariable ||
2142 isSpvIntrinsic(It, Intrinsic::spv_init_global))
2143 return IsGRef = true;
2144 return Opcode == SPIRV::OpName;
2146 return IsAllowedRefs && IsGRef;
2149Register SPIRVInstructionSelector::getUcharPtrTypeReg(
2150 MachineInstr &
I, SPIRV::StorageClass::StorageClass SC)
const {
2152 Type::getInt8Ty(
I.getMF()->getFunction().getContext()),
I, SC));
2156SPIRVInstructionSelector::buildSpecConstantOp(MachineInstr &
I,
Register Dest,
2158 uint32_t Opcode)
const {
2159 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
2160 TII.get(SPIRV::OpSpecConstantOp))
2168SPIRVInstructionSelector::buildConstGenericPtr(MachineInstr &
I,
Register SrcPtr,
2169 SPIRVTypeInst SrcPtrTy)
const {
2170 SPIRVTypeInst GenericPtrTy =
2174 SPIRV::StorageClass::Generic),
2176 MachineFunction *MF =
I.getParent()->getParent();
2178 MachineInstrBuilder MIB = buildSpecConstantOp(
2180 static_cast<uint32_t
>(SPIRV::Opcode::PtrCastToGeneric));
2190bool SPIRVInstructionSelector::selectAddrSpaceCast(
Register ResVReg,
2191 SPIRVTypeInst ResType,
2192 MachineInstr &
I)
const {
2196 Register SrcPtr =
I.getOperand(1).getReg();
2200 if (SrcPtrTy->
getOpcode() != SPIRV::OpTypePointer ||
2201 ResType->
getOpcode() != SPIRV::OpTypePointer)
2202 return BuildCOPY(ResVReg, SrcPtr,
I);
2212 unsigned SpecOpcode =
2214 ?
static_cast<uint32_t
>(SPIRV::Opcode::PtrCastToGeneric)
2217 ? static_cast<uint32_t>(
SPIRV::Opcode::GenericCastToPtr)
2224 buildSpecConstantOp(
I, ResVReg, SrcPtr, getUcharPtrTypeReg(
I, DstSC),
2226 .constrainAllUses(
TII,
TRI, RBI);
2228 MachineInstrBuilder MIB = buildConstGenericPtr(
I, SrcPtr, SrcPtrTy);
2230 buildSpecConstantOp(
2232 static_cast<uint32_t
>(SPIRV::Opcode::GenericCastToPtr))
2233 .constrainAllUses(
TII,
TRI, RBI);
2240 return BuildCOPY(ResVReg, SrcPtr,
I);
2242 if ((SrcSC == SPIRV::StorageClass::Function &&
2243 DstSC == SPIRV::StorageClass::Private) ||
2244 (DstSC == SPIRV::StorageClass::Function &&
2245 SrcSC == SPIRV::StorageClass::Private))
2246 return BuildCOPY(ResVReg, SrcPtr,
I);
2250 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpPtrCastToGeneric);
2253 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpGenericCastToPtr);
2256 SPIRVTypeInst GenericPtrTy =
2275 return selectUnOp(ResVReg, ResType,
I,
2276 SPIRV::OpPtrCastToCrossWorkgroupINTEL);
2278 return selectUnOp(ResVReg, ResType,
I,
2279 SPIRV::OpCrossWorkgroupCastToPtrINTEL);
2281 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpPtrCastToGeneric);
2283 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpGenericCastToPtr);
2293 return SPIRV::OpFOrdEqual;
2295 return SPIRV::OpFOrdGreaterThanEqual;
2297 return SPIRV::OpFOrdGreaterThan;
2299 return SPIRV::OpFOrdLessThanEqual;
2301 return SPIRV::OpFOrdLessThan;
2303 return SPIRV::OpFOrdNotEqual;
2305 return SPIRV::OpOrdered;
2307 return SPIRV::OpFUnordEqual;
2309 return SPIRV::OpFUnordGreaterThanEqual;
2311 return SPIRV::OpFUnordGreaterThan;
2313 return SPIRV::OpFUnordLessThanEqual;
2315 return SPIRV::OpFUnordLessThan;
2317 return SPIRV::OpFUnordNotEqual;
2319 return SPIRV::OpUnordered;
2329 return SPIRV::OpIEqual;
2331 return SPIRV::OpINotEqual;
2333 return SPIRV::OpSGreaterThanEqual;
2335 return SPIRV::OpSGreaterThan;
2337 return SPIRV::OpSLessThanEqual;
2339 return SPIRV::OpSLessThan;
2341 return SPIRV::OpUGreaterThanEqual;
2343 return SPIRV::OpUGreaterThan;
2345 return SPIRV::OpULessThanEqual;
2347 return SPIRV::OpULessThan;
2356 return SPIRV::OpPtrEqual;
2358 return SPIRV::OpPtrNotEqual;
2369 return SPIRV::OpLogicalEqual;
2371 return SPIRV::OpLogicalNotEqual;
2405bool SPIRVInstructionSelector::selectAnyOrAll(
Register ResVReg,
2406 SPIRVTypeInst ResType,
2408 unsigned OpAnyOrAll)
const {
2409 assert(
I.getNumOperands() == 3);
2410 assert(
I.getOperand(2).isReg());
2412 Register InputRegister =
I.getOperand(2).getReg();
2419 bool IsVectorTy = InputType->
getOpcode() == SPIRV::OpTypeVector;
2420 if (IsBoolTy && !IsVectorTy) {
2421 assert(ResVReg ==
I.getOperand(0).getReg());
2422 return BuildCOPY(ResVReg, InputRegister,
I);
2426 unsigned SpirvNotEqualId =
2427 IsFloatTy ? SPIRV::OpFOrdNotEqual : SPIRV::OpINotEqual;
2429 SPIRVTypeInst SpvBoolTy = SpvBoolScalarTy;
2434 IsBoolTy ? InputRegister
2442 IsFloatTy ? buildZerosValF(InputType,
I) : buildZerosVal(InputType,
I);
2444 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SpirvNotEqualId))
2461bool SPIRVInstructionSelector::selectAll(
Register ResVReg,
2462 SPIRVTypeInst ResType,
2463 MachineInstr &
I)
const {
2464 return selectAnyOrAll(ResVReg, ResType,
I, SPIRV::OpAll);
2467bool SPIRVInstructionSelector::selectAny(
Register ResVReg,
2468 SPIRVTypeInst ResType,
2469 MachineInstr &
I)
const {
2470 return selectAnyOrAll(ResVReg, ResType,
I, SPIRV::OpAny);
2474bool SPIRVInstructionSelector::selectFloatDot(
Register ResVReg,
2475 SPIRVTypeInst ResType,
2476 MachineInstr &
I)
const {
2477 assert(
I.getNumOperands() == 4);
2478 assert(
I.getOperand(2).isReg());
2479 assert(
I.getOperand(3).isReg());
2481 [[maybe_unused]] SPIRVTypeInst VecType =
2486 "dot product requires a vector of at least 2 components");
2488 [[maybe_unused]] SPIRVTypeInst EltType =
2497 .
addUse(
I.getOperand(2).getReg())
2498 .
addUse(
I.getOperand(3).getReg())
2503bool SPIRVInstructionSelector::selectIntegerDot(
Register ResVReg,
2504 SPIRVTypeInst ResType,
2507 assert(
I.getNumOperands() == 4);
2508 assert(
I.getOperand(2).isReg());
2509 assert(
I.getOperand(3).isReg());
2512 auto DotOp =
Signed ? SPIRV::OpSDot : SPIRV::OpUDot;
2516 .
addUse(
I.getOperand(2).getReg())
2517 .
addUse(
I.getOperand(3).getReg())
2524bool SPIRVInstructionSelector::selectIntegerDotExpansion(
2525 Register ResVReg, SPIRVTypeInst ResType, MachineInstr &
I)
const {
2526 assert(
I.getNumOperands() == 4);
2527 assert(
I.getOperand(2).isReg());
2528 assert(
I.getOperand(3).isReg());
2532 Register Vec0 =
I.getOperand(2).getReg();
2533 Register Vec1 =
I.getOperand(3).getReg();
2537 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpIMulV))
2546 "dot product requires a vector of at least 2 components");
2549 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
2559 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
2570 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpIAddS))
2582bool SPIRVInstructionSelector::selectOpIsInf(
Register ResVReg,
2583 SPIRVTypeInst ResType,
2584 MachineInstr &
I)
const {
2586 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpIsInf))
2589 .
addUse(
I.getOperand(2).getReg())
2594bool SPIRVInstructionSelector::selectOpIsNan(
Register ResVReg,
2595 SPIRVTypeInst ResType,
2596 MachineInstr &
I)
const {
2598 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpIsNan))
2601 .
addUse(
I.getOperand(2).getReg())
2606template <
bool Signed>
2607bool SPIRVInstructionSelector::selectDot4AddPacked(
Register ResVReg,
2608 SPIRVTypeInst ResType,
2609 MachineInstr &
I)
const {
2610 assert(
I.getNumOperands() == 5);
2611 assert(
I.getOperand(2).isReg());
2612 assert(
I.getOperand(3).isReg());
2613 assert(
I.getOperand(4).isReg());
2616 Register Acc =
I.getOperand(2).getReg();
2620 auto DotOp =
Signed ? SPIRV::OpSDot : SPIRV::OpUDot;
2622 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(DotOp))
2627 MIB.
addImm(SPIRV::BuiltIn::PackedVectorFormat4x8Bit);
2630 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpIAddS))
2642template <
bool Signed>
2643bool SPIRVInstructionSelector::selectDot4AddPackedExpansion(
2644 Register ResVReg, SPIRVTypeInst ResType, MachineInstr &
I)
const {
2645 assert(
I.getNumOperands() == 5);
2646 assert(
I.getOperand(2).isReg());
2647 assert(
I.getOperand(3).isReg());
2648 assert(
I.getOperand(4).isReg());
2651 Register Acc =
I.getOperand(2).getReg();
2657 Signed ? SPIRV::OpBitFieldSExtract : SPIRV::OpBitFieldUExtract;
2661 for (
unsigned i = 0; i < 4; i++) {
2684 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpIMulS))
2704 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpIAddS))
2719bool SPIRVInstructionSelector::selectSaturate(
Register ResVReg,
2720 SPIRVTypeInst ResType,
2721 MachineInstr &
I)
const {
2722 assert(
I.getNumOperands() == 3);
2723 assert(
I.getOperand(2).isReg());
2725 Register VZero = buildZerosValF(ResType,
I);
2726 Register VOne = buildOnesValF(ResType,
I);
2728 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
2731 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::GLSL_std_450))
2733 .
addUse(
I.getOperand(2).getReg())
2740bool SPIRVInstructionSelector::selectSign(
Register ResVReg,
2741 SPIRVTypeInst ResType,
2742 MachineInstr &
I)
const {
2743 assert(
I.getNumOperands() == 3);
2744 assert(
I.getOperand(2).isReg());
2746 Register InputRegister =
I.getOperand(2).getReg();
2748 auto &
DL =
I.getDebugLoc();
2758 bool NeedsConversion = IsFloatTy || SignBitWidth != ResBitWidth;
2760 auto SignOpcode = IsFloatTy ? GL::FSign : GL::SSign;
2768 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::GLSL_std_450))
2773 if (NeedsConversion) {
2774 auto ConvertOpcode = IsFloatTy ? SPIRV::OpConvertFToS : SPIRV::OpSConvert;
2785bool SPIRVInstructionSelector::selectWaveOpInst(
Register ResVReg,
2786 SPIRVTypeInst ResType,
2788 unsigned Opcode)
const {
2792 auto BMI =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2798 for (
unsigned J = 2; J <
I.getNumOperands(); J++) {
2799 BMI.addUse(
I.getOperand(J).getReg());
2806bool SPIRVInstructionSelector::selectWaveActiveCountBits(
2807 Register ResVReg, SPIRVTypeInst ResType, MachineInstr &
I)
const {
2812 if (!selectWaveOpInst(BallotReg, BallotType,
I,
2813 SPIRV::OpGroupNonUniformBallot))
2818 TII.get(SPIRV::OpGroupNonUniformBallotBitCount))
2823 .
addImm(SPIRV::GroupOperation::Reduce)
2832 if (
Type->getOpcode() != SPIRV::OpTypeVector)
2836 return Type->getOperand(2).getImm();
2839bool SPIRVInstructionSelector::selectWaveActiveAllEqual(
Register ResVReg,
2840 SPIRVTypeInst ResType,
2841 MachineInstr &
I)
const {
2846 Register InputReg =
I.getOperand(2).getReg();
2851 bool IsVector = NumElems > 1;
2854 SPIRVTypeInst ElemInputType = InputType;
2855 SPIRVTypeInst ElemBoolType = ResType;
2868 return selectWaveOpInst(ResVReg, ElemBoolType,
I,
2869 SPIRV::OpGroupNonUniformAllEqual);
2874 ElementResults.
reserve(NumElems);
2876 for (
unsigned Idx = 0; Idx < NumElems; ++Idx) {
2889 ElemInput = Extracted;
2895 BuildMI(BB,
I,
DL,
TII.get(SPIRV::OpGroupNonUniformAllEqual))
2906 auto MIB =
BuildMI(BB,
I,
DL,
TII.get(SPIRV::OpCompositeConstruct))
2917bool SPIRVInstructionSelector::selectWavePrefixBitCount(
Register ResVReg,
2918 SPIRVTypeInst ResType,
2919 MachineInstr &
I)
const {
2921 assert(
I.getNumOperands() == 3);
2923 auto Op =
I.getOperand(2);
2935 if (InputType->
getOpcode() != SPIRV::OpTypeBool)
2957 BuildMI(BB,
I,
DL,
TII.get(SPIRV::OpGroupNonUniformBallotBitCount))
2961 .
addImm(SPIRV::GroupOperation::ExclusiveScan)
2968bool SPIRVInstructionSelector::selectWaveReduceMax(
Register ResVReg,
2969 SPIRVTypeInst ResType,
2971 bool IsUnsigned)
const {
2972 return selectWaveReduce(
2973 ResVReg, ResType,
I, IsUnsigned,
2974 [&](
Register InputRegister,
bool IsUnsigned) {
2975 const bool IsFloatTy =
2977 const auto IntOp = IsUnsigned ? SPIRV::OpGroupNonUniformUMax
2978 : SPIRV::OpGroupNonUniformSMax;
2979 return IsFloatTy ? SPIRV::OpGroupNonUniformFMax : IntOp;
2983bool SPIRVInstructionSelector::selectWaveReduceMin(
Register ResVReg,
2984 SPIRVTypeInst ResType,
2986 bool IsUnsigned)
const {
2987 return selectWaveReduce(
2988 ResVReg, ResType,
I, IsUnsigned,
2989 [&](
Register InputRegister,
bool IsUnsigned) {
2990 const bool IsFloatTy =
2992 const auto IntOp = IsUnsigned ? SPIRV::OpGroupNonUniformUMin
2993 : SPIRV::OpGroupNonUniformSMin;
2994 return IsFloatTy ? SPIRV::OpGroupNonUniformFMin : IntOp;
2998bool SPIRVInstructionSelector::selectWaveReduceSum(
Register ResVReg,
2999 SPIRVTypeInst ResType,
3000 MachineInstr &
I)
const {
3001 return selectWaveReduce(ResVReg, ResType,
I,
false,
3002 [&](
Register InputRegister,
bool IsUnsigned) {
3004 InputRegister, SPIRV::OpTypeFloat);
3005 return IsFloatTy ? SPIRV::OpGroupNonUniformFAdd
3006 : SPIRV::OpGroupNonUniformIAdd;
3010bool SPIRVInstructionSelector::selectWaveReduceProduct(
Register ResVReg,
3011 SPIRVTypeInst ResType,
3012 MachineInstr &
I)
const {
3013 return selectWaveReduce(ResVReg, ResType,
I,
false,
3014 [&](
Register InputRegister,
bool IsUnsigned) {
3016 InputRegister, SPIRV::OpTypeFloat);
3017 return IsFloatTy ? SPIRV::OpGroupNonUniformFMul
3018 : SPIRV::OpGroupNonUniformIMul;
3022template <
typename PickOpcodeFn>
3023bool SPIRVInstructionSelector::selectWaveReduce(
3024 Register ResVReg, SPIRVTypeInst ResType, MachineInstr &
I,
bool IsUnsigned,
3025 PickOpcodeFn &&PickOpcode)
const {
3026 assert(
I.getNumOperands() == 3);
3027 assert(
I.getOperand(2).isReg());
3029 Register InputRegister =
I.getOperand(2).getReg();
3036 const unsigned Opcode = PickOpcode(InputRegister, IsUnsigned);
3042 .
addImm(SPIRV::GroupOperation::Reduce)
3043 .
addUse(
I.getOperand(2).getReg())
3048bool SPIRVInstructionSelector::selectWaveExclusiveScanSum(
3049 Register ResVReg, SPIRVTypeInst ResType, MachineInstr &
I)
const {
3050 return selectWaveExclusiveScan(ResVReg, ResType,
I,
false,
3051 [&](
Register InputRegister,
bool IsUnsigned) {
3053 InputRegister, SPIRV::OpTypeFloat);
3055 ? SPIRV::OpGroupNonUniformFAdd
3056 : SPIRV::OpGroupNonUniformIAdd;
3060bool SPIRVInstructionSelector::selectWaveExclusiveScanProduct(
3061 Register ResVReg, SPIRVTypeInst ResType, MachineInstr &
I)
const {
3062 return selectWaveExclusiveScan(ResVReg, ResType,
I,
false,
3063 [&](
Register InputRegister,
bool IsUnsigned) {
3065 InputRegister, SPIRV::OpTypeFloat);
3067 ? SPIRV::OpGroupNonUniformFMul
3068 : SPIRV::OpGroupNonUniformIMul;
3072template <
typename PickOpcodeFn>
3073bool SPIRVInstructionSelector::selectWaveExclusiveScan(
3074 Register ResVReg, SPIRVTypeInst ResType, MachineInstr &
I,
bool IsUnsigned,
3075 PickOpcodeFn &&PickOpcode)
const {
3076 assert(
I.getNumOperands() == 3);
3077 assert(
I.getOperand(2).isReg());
3079 Register InputRegister =
I.getOperand(2).getReg();
3086 const unsigned Opcode = PickOpcode(InputRegister, IsUnsigned);
3092 .
addImm(SPIRV::GroupOperation::ExclusiveScan)
3093 .
addUse(
I.getOperand(2).getReg())
3098bool SPIRVInstructionSelector::selectBitreverse(
Register ResVReg,
3099 SPIRVTypeInst ResType,
3100 MachineInstr &
I)
const {
3102 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBitReverse))
3105 .
addUse(
I.getOperand(1).getReg())
3110bool SPIRVInstructionSelector::selectFreeze(
Register ResVReg,
3111 SPIRVTypeInst ResType,
3112 MachineInstr &
I)
const {
3118 if (!
I.getOperand(0).isReg() || !
I.getOperand(1).isReg())
3120 Register OpReg =
I.getOperand(1).getReg();
3121 if (MachineInstr *Def = MRI->
getVRegDef(OpReg)) {
3122 if (
Def->getOpcode() == TargetOpcode::COPY)
3125 switch (
Def->getOpcode()) {
3126 case SPIRV::ASSIGN_TYPE:
3127 if (MachineInstr *AssignToDef =
3129 if (AssignToDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
3130 Reg =
Def->getOperand(2).getReg();
3133 case SPIRV::OpUndef:
3134 Reg =
Def->getOperand(1).getReg();
3137 unsigned DestOpCode;
3139 DestOpCode = SPIRV::OpConstantNull;
3141 DestOpCode = TargetOpcode::COPY;
3144 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(DestOpCode))
3145 .
addDef(
I.getOperand(0).getReg())
3153bool SPIRVInstructionSelector::selectBuildVector(
Register ResVReg,
3154 SPIRVTypeInst ResType,
3155 MachineInstr &
I)
const {
3157 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
3159 else if (ResType->
getOpcode() == SPIRV::OpTypeArray)
3163 if (
I.getNumExplicitOperands() -
I.getNumExplicitDefs() !=
N)
3168 for (
unsigned i =
I.getNumExplicitDefs();
3169 i <
I.getNumExplicitOperands() && IsConst; ++i)
3173 if (!IsConst &&
N < 2)
3175 "There must be at least two constituent operands in a vector");
3178 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3179 TII.get(IsConst ? SPIRV::OpConstantComposite
3180 : SPIRV::OpCompositeConstruct))
3183 for (
unsigned i =
I.getNumExplicitDefs(); i <
I.getNumExplicitOperands(); ++i)
3184 MIB.
addUse(
I.getOperand(i).getReg());
3189bool SPIRVInstructionSelector::selectSplatVector(
Register ResVReg,
3190 SPIRVTypeInst ResType,
3191 MachineInstr &
I)
const {
3193 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
3195 else if (ResType->
getOpcode() == SPIRV::OpTypeArray)
3201 if (!
I.getOperand(
OpIdx).isReg())
3208 if (!IsConst &&
N < 2)
3210 "There must be at least two constituent operands in a vector");
3213 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3214 TII.get(IsConst ? SPIRV::OpConstantComposite
3215 : SPIRV::OpCompositeConstruct))
3218 for (
unsigned i = 0; i <
N; ++i)
3224bool SPIRVInstructionSelector::selectDiscard(
Register ResVReg,
3225 SPIRVTypeInst ResType,
3226 MachineInstr &
I)
const {
3231 SPIRV::Extension::SPV_EXT_demote_to_helper_invocation) ||
3233 Opcode = SPIRV::OpDemoteToHelperInvocation;
3235 Opcode = SPIRV::OpKill;
3237 if (MachineInstr *NextI =
I.getNextNode()) {
3239 NextI->eraseFromParent();
3249bool SPIRVInstructionSelector::selectCmp(
Register ResVReg,
3250 SPIRVTypeInst ResType,
unsigned CmpOpc,
3251 MachineInstr &
I)
const {
3252 Register Cmp0 =
I.getOperand(2).getReg();
3253 Register Cmp1 =
I.getOperand(3).getReg();
3256 "CMP operands should have the same type");
3257 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(CmpOpc))
3267bool SPIRVInstructionSelector::selectICmp(
Register ResVReg,
3268 SPIRVTypeInst ResType,
3269 MachineInstr &
I)
const {
3270 auto Pred =
I.getOperand(1).getPredicate();
3273 Register CmpOperand =
I.getOperand(2).getReg();
3280 return selectCmp(ResVReg, ResType, CmpOpc,
I);
3284SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &
I,
3285 SPIRVTypeInst ResType)
const {
3287 SPIRVTypeInst SpvI32Ty =
3290 auto ConstInt = ConstantInt::get(LLVMTy, Val);
3297 ?
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
3300 :
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantI))
3303 .
addImm(APInt(32, Val).getZExtValue());
3305 GR.
add(ConstInt,
MI);
3310bool SPIRVInstructionSelector::selectFCmp(
Register ResVReg,
3311 SPIRVTypeInst ResType,
3312 MachineInstr &
I)
const {
3314 return selectCmp(ResVReg, ResType, CmpOp,
I);
3317bool SPIRVInstructionSelector::selectExp10(
Register ResVReg,
3318 SPIRVTypeInst ResType,
3319 MachineInstr &
I)
const {
3321 return selectExtInst(ResVReg, ResType,
I, CL::exp10);
3328 if (ResType->
getOpcode() != SPIRV::OpTypeVector &&
3329 ResType->
getOpcode() != SPIRV::OpTypeFloat)
3332 MachineIRBuilder MIRBuilder(
I);
3334 SPIRVTypeInst SpirvScalarType = ResType->
getOpcode() == SPIRV::OpTypeVector
3340 "only float operands supported by GLSL extended math");
3343 MIRBuilder, SpirvScalarType);
3345 auto Opcode = ResType->
getOpcode() == SPIRV::OpTypeVector
3346 ? SPIRV::OpVectorTimesScalar
3349 if (!selectOpWithSrcs(ArgReg, ResType,
I,
3350 {
I.getOperand(1).getReg(), ConstReg}, Opcode))
3352 if (!selectExtInst(ResVReg, ResType,
I,
3353 {{SPIRV::InstructionSet::GLSL_std_450, GL::Exp2}},
false,
3363Register SPIRVInstructionSelector::buildZerosVal(SPIRVTypeInst ResType,
3364 MachineInstr &
I)
const {
3367 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
3372bool SPIRVInstructionSelector::isScalarOrVectorIntConstantZero(
3378 if (!CompType || CompType->
getOpcode() != SPIRV::OpTypeInt)
3386 if (
Def->getOpcode() == SPIRV::OpConstantNull)
3389 if (
Def->getOpcode() == TargetOpcode::G_CONSTANT ||
3390 Def->getOpcode() == SPIRV::OpConstantI)
3403 if (
Def->getOpcode() == TargetOpcode::G_BUILD_VECTOR ||
3404 (
Def->getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS &&
3406 Intrinsic::spv_const_composite)) {
3407 unsigned StartOp =
Def->getOpcode() == TargetOpcode::G_BUILD_VECTOR ? 1 : 2;
3408 for (
unsigned i = StartOp; i <
Def->getNumOperands(); ++i) {
3409 if (!IsZero(
Def->getOperand(i).getReg()))
3418Register SPIRVInstructionSelector::buildZerosValF(SPIRVTypeInst ResType,
3419 MachineInstr &
I)
const {
3423 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
3428Register SPIRVInstructionSelector::buildOnesValF(SPIRVTypeInst ResType,
3429 MachineInstr &
I)
const {
3433 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
3439 SPIRVTypeInst ResType,
3440 MachineInstr &
I)
const {
3444 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
3449bool SPIRVInstructionSelector::selectSelect(
Register ResVReg,
3450 SPIRVTypeInst ResType,
3451 MachineInstr &
I)
const {
3452 Register SelectFirstArg =
I.getOperand(2).getReg();
3453 Register SelectSecondArg =
I.getOperand(3).getReg();
3462 SPIRV::OpTypeVector;
3469 Opcode = IsScalarBool ? SPIRV::OpSelectVFSCond : SPIRV::OpSelectVFVCond;
3470 }
else if (IsPtrTy) {
3471 Opcode = IsScalarBool ? SPIRV::OpSelectVPSCond : SPIRV::OpSelectVPVCond;
3473 Opcode = IsScalarBool ? SPIRV::OpSelectVISCond : SPIRV::OpSelectVIVCond;
3477 Opcode = IsScalarBool ? SPIRV::OpSelectSFSCond : SPIRV::OpSelectVFVCond;
3478 }
else if (IsPtrTy) {
3479 Opcode = IsScalarBool ? SPIRV::OpSelectSPSCond : SPIRV::OpSelectVPVCond;
3481 Opcode = IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectVIVCond;
3484 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
3487 .
addUse(
I.getOperand(1).getReg())
3496bool SPIRVInstructionSelector::selectBoolToInt(
Register ResVReg,
3497 SPIRVTypeInst ResType,
3499 MachineInstr &InsertAt,
3500 bool IsSigned)
const {
3502 Register ZeroReg = buildZerosVal(ResType, InsertAt);
3503 Register OneReg = buildOnesVal(IsSigned, ResType, InsertAt);
3504 bool IsScalarBool = GR.
isScalarOfType(BooleanVReg, SPIRV::OpTypeBool);
3506 IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectVIVCond;
3518bool SPIRVInstructionSelector::selectIToF(
Register ResVReg,
3519 SPIRVTypeInst ResType,
3520 MachineInstr &
I,
bool IsSigned,
3521 unsigned Opcode)
const {
3522 Register SrcReg =
I.getOperand(1).getReg();
3528 if (ResType->
getOpcode() == SPIRV::OpTypeVector) {
3533 selectBoolToInt(SrcReg, TmpType,
I.getOperand(1).getReg(),
I,
false);
3535 return selectOpWithSrcs(ResVReg, ResType,
I, {SrcReg}, Opcode);
3538bool SPIRVInstructionSelector::selectExt(
Register ResVReg,
3539 SPIRVTypeInst ResType, MachineInstr &
I,
3540 bool IsSigned)
const {
3541 Register SrcReg =
I.getOperand(1).getReg();
3543 return selectBoolToInt(ResVReg, ResType,
I.getOperand(1).getReg(),
I,
3547 if (ResType == SrcType)
3548 return BuildCOPY(ResVReg, SrcReg,
I);
3550 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
3551 return selectUnOp(ResVReg, ResType,
I, Opcode);
3554bool SPIRVInstructionSelector::selectSUCmp(
Register ResVReg,
3555 SPIRVTypeInst ResType,
3557 bool IsSigned)
const {
3558 MachineIRBuilder MIRBuilder(
I);
3559 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
3574 TII.get(IsSigned ? SPIRV::OpSLessThanEqual : SPIRV::OpULessThanEqual))
3577 .
addUse(
I.getOperand(1).getReg())
3578 .
addUse(
I.getOperand(2).getReg())
3584 TII.get(IsSigned ? SPIRV::OpSLessThan : SPIRV::OpULessThan))
3587 .
addUse(
I.getOperand(1).getReg())
3588 .
addUse(
I.getOperand(2).getReg())
3596 unsigned SelectOpcode =
3597 N > 1 ? SPIRV::OpSelectVIVCond : SPIRV::OpSelectSISCond;
3602 .
addUse(buildOnesVal(
true, ResType,
I))
3603 .
addUse(buildZerosVal(ResType,
I))
3610 .
addUse(buildOnesVal(
false, ResType,
I))
3615bool SPIRVInstructionSelector::selectIntToBool(
Register IntReg,
3618 SPIRVTypeInst IntTy,
3619 SPIRVTypeInst BoolTy)
const {
3622 bool IsVectorTy = IntTy->
getOpcode() == SPIRV::OpTypeVector;
3623 unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
3625 Register One = buildOnesVal(
false, IntTy,
I);
3633 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpINotEqual))
3642bool SPIRVInstructionSelector::selectTrunc(
Register ResVReg,
3643 SPIRVTypeInst ResType,
3644 MachineInstr &
I)
const {
3645 Register IntReg =
I.getOperand(1).getReg();
3648 return selectIntToBool(IntReg, ResVReg,
I, ArgType, ResType);
3649 if (ArgType == ResType)
3650 return BuildCOPY(ResVReg, IntReg,
I);
3652 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
3653 return selectUnOp(ResVReg, ResType,
I, Opcode);
3656bool SPIRVInstructionSelector::selectConst(
Register ResVReg,
3657 SPIRVTypeInst ResType,
3658 MachineInstr &
I)
const {
3659 unsigned Opcode =
I.getOpcode();
3660 unsigned TpOpcode = ResType->
getOpcode();
3662 if (TpOpcode == SPIRV::OpTypePointer || TpOpcode == SPIRV::OpTypeEvent) {
3663 assert(Opcode == TargetOpcode::G_CONSTANT &&
3664 I.getOperand(1).getCImm()->isZero());
3665 MachineBasicBlock &DepMBB =
I.getMF()->front();
3668 }
else if (Opcode == TargetOpcode::G_FCONSTANT) {
3675 return Reg == ResVReg ?
true : BuildCOPY(ResVReg,
Reg,
I);
3678bool SPIRVInstructionSelector::selectOpUndef(
Register ResVReg,
3679 SPIRVTypeInst ResType,
3680 MachineInstr &
I)
const {
3681 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
3688bool SPIRVInstructionSelector::selectInsertVal(
Register ResVReg,
3689 SPIRVTypeInst ResType,
3690 MachineInstr &
I)
const {
3692 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeInsert))
3696 .
addUse(
I.getOperand(3).getReg())
3698 .
addUse(
I.getOperand(2).getReg());
3699 for (
unsigned i = 4; i <
I.getNumOperands(); i++)
3705bool SPIRVInstructionSelector::selectExtractVal(
Register ResVReg,
3706 SPIRVTypeInst ResType,
3707 MachineInstr &
I)
const {
3708 Type *MaybeResTy =
nullptr;
3713 "Expected aggregate type for extractv instruction");
3715 SPIRV::AccessQualifier::ReadWrite,
false);
3719 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
3722 .
addUse(
I.getOperand(2).getReg());
3723 for (
unsigned i = 3; i <
I.getNumOperands(); i++)
3729bool SPIRVInstructionSelector::selectInsertElt(
Register ResVReg,
3730 SPIRVTypeInst ResType,
3731 MachineInstr &
I)
const {
3732 if (
getImm(
I.getOperand(4), MRI))
3733 return selectInsertVal(ResVReg, ResType,
I);
3735 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorInsertDynamic))
3738 .
addUse(
I.getOperand(2).getReg())
3739 .
addUse(
I.getOperand(3).getReg())
3740 .
addUse(
I.getOperand(4).getReg())
3745bool SPIRVInstructionSelector::selectExtractElt(
Register ResVReg,
3746 SPIRVTypeInst ResType,
3747 MachineInstr &
I)
const {
3748 if (
getImm(
I.getOperand(3), MRI))
3749 return selectExtractVal(ResVReg, ResType,
I);
3751 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorExtractDynamic))
3754 .
addUse(
I.getOperand(2).getReg())
3755 .
addUse(
I.getOperand(3).getReg())
3760bool SPIRVInstructionSelector::selectGEP(
Register ResVReg,
3761 SPIRVTypeInst ResType,
3762 MachineInstr &
I)
const {
3763 const bool IsGEPInBounds =
I.getOperand(2).getImm();
3769 ? (IsGEPInBounds ? SPIRV::OpInBoundsAccessChain
3770 : SPIRV::OpAccessChain)
3771 : (IsGEPInBounds ?
SPIRV::OpInBoundsPtrAccessChain
3772 :
SPIRV::OpPtrAccessChain);
3774 auto Res =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
3778 .
addUse(
I.getOperand(3).getReg());
3780 (Opcode == SPIRV::OpPtrAccessChain ||
3781 Opcode == SPIRV::OpInBoundsPtrAccessChain ||
3782 (
getImm(
I.getOperand(4), MRI) &&
foldImm(
I.getOperand(4), MRI) == 0)) &&
3783 "Cannot translate GEP to OpAccessChain. First index must be 0.");
3786 const unsigned StartingIndex =
3787 (Opcode == SPIRV::OpAccessChain || Opcode == SPIRV::OpInBoundsAccessChain)
3790 for (
unsigned i = StartingIndex; i <
I.getNumExplicitOperands(); ++i)
3791 Res.addUse(
I.getOperand(i).getReg());
3792 Res.constrainAllUses(
TII,
TRI, RBI);
3797bool SPIRVInstructionSelector::wrapIntoSpecConstantOp(
3799 unsigned Lim =
I.getNumExplicitOperands();
3800 for (
unsigned i =
I.getNumExplicitDefs() + 1; i < Lim; ++i) {
3801 Register OpReg =
I.getOperand(i).getReg();
3802 MachineInstr *OpDefine = MRI->
getVRegDef(OpReg);
3804 if (!OpDefine || !OpType ||
isConstReg(MRI, OpDefine) ||
3805 OpDefine->
getOpcode() == TargetOpcode::G_ADDRSPACE_CAST ||
3806 OpDefine->
getOpcode() == TargetOpcode::G_INTTOPTR ||
3813 MachineFunction *MF =
I.getMF();
3825 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3826 TII.get(SPIRV::OpSpecConstantOp))
3829 .
addImm(
static_cast<uint32_t
>(SPIRV::Opcode::Bitcast))
3831 GR.
add(OpDefine, MIB);
3837bool SPIRVInstructionSelector::selectDerivativeInst(
3838 Register ResVReg, SPIRVTypeInst ResType, MachineInstr &
I,
3839 const unsigned DPdOpCode)
const {
3842 errorIfInstrOutsideShader(
I);
3847 Register SrcReg =
I.getOperand(2).getReg();
3852 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(DPdOpCode))
3855 .
addUse(
I.getOperand(2).getReg());
3857 MachineIRBuilder MIRBuilder(
I);
3860 if (componentCount != 1)
3864 const TargetRegisterClass *RegClass = GR.
getRegClass(SrcType);
3868 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpFConvert))
3873 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(DPdOpCode))
3878 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpFConvert))
3886bool SPIRVInstructionSelector::selectIntrinsic(
Register ResVReg,
3887 SPIRVTypeInst ResType,
3888 MachineInstr &
I)
const {
3892 case Intrinsic::spv_load:
3893 return selectLoad(ResVReg, ResType,
I);
3894 case Intrinsic::spv_store:
3895 return selectStore(
I);
3896 case Intrinsic::spv_extractv:
3897 return selectExtractVal(ResVReg, ResType,
I);
3898 case Intrinsic::spv_insertv:
3899 return selectInsertVal(ResVReg, ResType,
I);
3900 case Intrinsic::spv_extractelt:
3901 return selectExtractElt(ResVReg, ResType,
I);
3902 case Intrinsic::spv_insertelt:
3903 return selectInsertElt(ResVReg, ResType,
I);
3904 case Intrinsic::spv_gep:
3905 return selectGEP(ResVReg, ResType,
I);
3906 case Intrinsic::spv_bitcast: {
3907 Register OpReg =
I.getOperand(2).getReg();
3908 SPIRVTypeInst OpType =
3912 return selectOpWithSrcs(ResVReg, ResType,
I, {OpReg}, SPIRV::OpBitcast);
3914 case Intrinsic::spv_unref_global:
3915 case Intrinsic::spv_init_global: {
3916 MachineInstr *
MI = MRI->
getVRegDef(
I.getOperand(1).getReg());
3921 Register GVarVReg =
MI->getOperand(0).getReg();
3922 if (!selectGlobalValue(GVarVReg, *
MI, Init))
3927 if (
MI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE) {
3929 MI->eraseFromParent();
3933 case Intrinsic::spv_undef: {
3934 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
3940 case Intrinsic::spv_const_composite: {
3942 bool IsNull =
I.getNumExplicitDefs() + 1 ==
I.getNumExplicitOperands();
3948 if (!wrapIntoSpecConstantOp(
I, CompositeArgs))
3950 MachineIRBuilder MIR(
I);
3952 MIR, SPIRV::OpConstantComposite, 3,
3953 SPIRV::OpConstantCompositeContinuedINTEL, CompositeArgs, ResVReg,
3955 for (
auto *Instr : Instructions) {
3956 Instr->setDebugLoc(
I.getDebugLoc());
3961 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
3968 case Intrinsic::spv_assign_name: {
3969 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpName));
3970 MIB.
addUse(
I.getOperand(
I.getNumExplicitDefs() + 1).getReg());
3971 for (
unsigned i =
I.getNumExplicitDefs() + 2;
3972 i <
I.getNumExplicitOperands(); ++i) {
3973 MIB.
addImm(
I.getOperand(i).getImm());
3978 case Intrinsic::spv_switch: {
3979 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSwitch));
3980 for (
unsigned i = 1; i <
I.getNumExplicitOperands(); ++i) {
3981 if (
I.getOperand(i).isReg())
3982 MIB.
addReg(
I.getOperand(i).getReg());
3983 else if (
I.getOperand(i).isCImm())
3984 addNumImm(
I.getOperand(i).getCImm()->getValue(), MIB);
3985 else if (
I.getOperand(i).isMBB())
3986 MIB.
addMBB(
I.getOperand(i).getMBB());
3993 case Intrinsic::spv_loop_merge: {
3994 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoopMerge));
3995 for (
unsigned i = 1; i <
I.getNumExplicitOperands(); ++i) {
3996 if (
I.getOperand(i).isMBB())
3997 MIB.
addMBB(
I.getOperand(i).getMBB());
4004 case Intrinsic::spv_loop_control_intel: {
4006 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoopControlINTEL));
4007 for (
unsigned J = 1; J <
I.getNumExplicitOperands(); ++J)
4012 case Intrinsic::spv_selection_merge: {
4014 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSelectionMerge));
4015 assert(
I.getOperand(1).isMBB() &&
4016 "operand 1 to spv_selection_merge must be a basic block");
4017 MIB.
addMBB(
I.getOperand(1).getMBB());
4018 MIB.
addImm(getSelectionOperandForImm(
I.getOperand(2).getImm()));
4022 case Intrinsic::spv_cmpxchg:
4023 return selectAtomicCmpXchg(ResVReg, ResType,
I);
4024 case Intrinsic::spv_unreachable:
4025 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUnreachable))
4028 case Intrinsic::spv_alloca:
4029 return selectFrameIndex(ResVReg, ResType,
I);
4030 case Intrinsic::spv_alloca_array:
4031 return selectAllocaArray(ResVReg, ResType,
I);
4032 case Intrinsic::spv_assume:
4034 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpAssumeTrueKHR))
4035 .
addUse(
I.getOperand(1).getReg())
4040 case Intrinsic::spv_expect:
4042 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExpectKHR))
4045 .
addUse(
I.getOperand(2).getReg())
4046 .
addUse(
I.getOperand(3).getReg())
4051 case Intrinsic::arithmetic_fence:
4052 if (STI.
canUseExtension(SPIRV::Extension::SPV_EXT_arithmetic_fence)) {
4053 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpArithmeticFenceEXT))
4056 .
addUse(
I.getOperand(2).getReg())
4060 return BuildCOPY(ResVReg,
I.getOperand(2).getReg(),
I);
4062 case Intrinsic::spv_thread_id:
4068 return loadVec3BuiltinInputID(SPIRV::BuiltIn::GlobalInvocationId, ResVReg,
4070 case Intrinsic::spv_thread_id_in_group:
4076 return loadVec3BuiltinInputID(SPIRV::BuiltIn::LocalInvocationId, ResVReg,
4078 case Intrinsic::spv_group_id:
4084 return loadVec3BuiltinInputID(SPIRV::BuiltIn::WorkgroupId, ResVReg, ResType,
4086 case Intrinsic::spv_flattened_thread_id_in_group:
4093 return loadBuiltinInputID(SPIRV::BuiltIn::LocalInvocationIndex, ResVReg,
4095 case Intrinsic::spv_workgroup_size:
4096 return loadVec3BuiltinInputID(SPIRV::BuiltIn::WorkgroupSize, ResVReg,
4098 case Intrinsic::spv_global_size:
4099 return loadVec3BuiltinInputID(SPIRV::BuiltIn::GlobalSize, ResVReg, ResType,
4101 case Intrinsic::spv_global_offset:
4102 return loadVec3BuiltinInputID(SPIRV::BuiltIn::GlobalOffset, ResVReg,
4104 case Intrinsic::spv_num_workgroups:
4105 return loadVec3BuiltinInputID(SPIRV::BuiltIn::NumWorkgroups, ResVReg,
4107 case Intrinsic::spv_subgroup_size:
4108 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupSize, ResVReg, ResType,
4110 case Intrinsic::spv_num_subgroups:
4111 return loadBuiltinInputID(SPIRV::BuiltIn::NumSubgroups, ResVReg, ResType,
4113 case Intrinsic::spv_subgroup_id:
4114 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupId, ResVReg, ResType,
I);
4115 case Intrinsic::spv_subgroup_local_invocation_id:
4116 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupLocalInvocationId,
4117 ResVReg, ResType,
I);
4118 case Intrinsic::spv_subgroup_max_size:
4119 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupMaxSize, ResVReg, ResType,
4121 case Intrinsic::spv_fdot:
4122 return selectFloatDot(ResVReg, ResType,
I);
4123 case Intrinsic::spv_udot:
4124 case Intrinsic::spv_sdot:
4125 if (STI.
canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
4127 return selectIntegerDot(ResVReg, ResType,
I,
4128 IID == Intrinsic::spv_sdot);
4129 return selectIntegerDotExpansion(ResVReg, ResType,
I);
4130 case Intrinsic::spv_dot4add_i8packed:
4131 if (STI.
canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
4133 return selectDot4AddPacked<true>(ResVReg, ResType,
I);
4134 return selectDot4AddPackedExpansion<true>(ResVReg, ResType,
I);
4135 case Intrinsic::spv_dot4add_u8packed:
4136 if (STI.
canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
4138 return selectDot4AddPacked<false>(ResVReg, ResType,
I);
4139 return selectDot4AddPackedExpansion<false>(ResVReg, ResType,
I);
4140 case Intrinsic::spv_all:
4141 return selectAll(ResVReg, ResType,
I);
4142 case Intrinsic::spv_any:
4143 return selectAny(ResVReg, ResType,
I);
4144 case Intrinsic::spv_cross:
4145 return selectExtInst(ResVReg, ResType,
I, CL::cross, GL::Cross);
4146 case Intrinsic::spv_distance:
4147 return selectExtInst(ResVReg, ResType,
I, CL::distance, GL::Distance);
4148 case Intrinsic::spv_lerp:
4149 return selectExtInst(ResVReg, ResType,
I, CL::mix, GL::FMix);
4150 case Intrinsic::spv_length:
4151 return selectExtInst(ResVReg, ResType,
I, CL::length, GL::Length);
4152 case Intrinsic::spv_degrees:
4153 return selectExtInst(ResVReg, ResType,
I, CL::degrees, GL::Degrees);
4154 case Intrinsic::spv_faceforward:
4155 return selectExtInst(ResVReg, ResType,
I, GL::FaceForward);
4156 case Intrinsic::spv_frac:
4157 return selectExtInst(ResVReg, ResType,
I, CL::fract, GL::Fract);
4158 case Intrinsic::spv_isinf:
4159 return selectOpIsInf(ResVReg, ResType,
I);
4160 case Intrinsic::spv_isnan:
4161 return selectOpIsNan(ResVReg, ResType,
I);
4162 case Intrinsic::spv_normalize:
4163 return selectExtInst(ResVReg, ResType,
I, CL::normalize, GL::Normalize);
4164 case Intrinsic::spv_refract:
4165 return selectExtInst(ResVReg, ResType,
I, GL::Refract);
4166 case Intrinsic::spv_reflect:
4167 return selectExtInst(ResVReg, ResType,
I, GL::Reflect);
4168 case Intrinsic::spv_rsqrt:
4169 return selectExtInst(ResVReg, ResType,
I, CL::rsqrt, GL::InverseSqrt);
4170 case Intrinsic::spv_sign:
4171 return selectSign(ResVReg, ResType,
I);
4172 case Intrinsic::spv_smoothstep:
4173 return selectExtInst(ResVReg, ResType,
I, CL::smoothstep, GL::SmoothStep);
4174 case Intrinsic::spv_firstbituhigh:
4175 return selectFirstBitHigh(ResVReg, ResType,
I,
false);
4176 case Intrinsic::spv_firstbitshigh:
4177 return selectFirstBitHigh(ResVReg, ResType,
I,
true);
4178 case Intrinsic::spv_firstbitlow:
4179 return selectFirstBitLow(ResVReg, ResType,
I);
4180 case Intrinsic::spv_group_memory_barrier_with_group_sync: {
4182 buildI32Constant(SPIRV::MemorySemantics::SequentiallyConsistent,
I);
4183 Register ScopeReg = buildI32Constant(SPIRV::Scope::Workgroup,
I);
4185 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpControlBarrier))
4192 case Intrinsic::spv_generic_cast_to_ptr_explicit: {
4193 Register PtrReg =
I.getOperand(
I.getNumExplicitDefs() + 1).getReg();
4194 SPIRV::StorageClass::StorageClass ResSC =
4198 "Generic storage class");
4199 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpGenericCastToPtrExplicit))
4207 case Intrinsic::spv_lifetime_start:
4208 case Intrinsic::spv_lifetime_end: {
4209 unsigned Op = IID == Intrinsic::spv_lifetime_start ? SPIRV::OpLifetimeStart
4210 : SPIRV::OpLifetimeStop;
4211 int64_t
Size =
I.getOperand(
I.getNumExplicitDefs() + 1).getImm();
4212 Register PtrReg =
I.getOperand(
I.getNumExplicitDefs() + 2).getReg();
4221 case Intrinsic::spv_saturate:
4222 return selectSaturate(ResVReg, ResType,
I);
4223 case Intrinsic::spv_nclamp:
4224 return selectExtInst(ResVReg, ResType,
I, CL::fclamp, GL::NClamp);
4225 case Intrinsic::spv_uclamp:
4226 return selectExtInst(ResVReg, ResType,
I, CL::u_clamp, GL::UClamp);
4227 case Intrinsic::spv_sclamp:
4228 return selectExtInst(ResVReg, ResType,
I, CL::s_clamp, GL::SClamp);
4229 case Intrinsic::spv_subgroup_prefix_bit_count:
4230 return selectWavePrefixBitCount(ResVReg, ResType,
I);
4231 case Intrinsic::spv_wave_active_countbits:
4232 return selectWaveActiveCountBits(ResVReg, ResType,
I);
4233 case Intrinsic::spv_wave_all_equal:
4234 return selectWaveActiveAllEqual(ResVReg, ResType,
I);
4235 case Intrinsic::spv_wave_all:
4236 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformAll);
4237 case Intrinsic::spv_wave_any:
4238 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformAny);
4239 case Intrinsic::spv_subgroup_ballot:
4240 return selectWaveOpInst(ResVReg, ResType,
I,
4241 SPIRV::OpGroupNonUniformBallot);
4242 case Intrinsic::spv_wave_is_first_lane:
4243 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformElect);
4244 case Intrinsic::spv_wave_reduce_umax:
4245 return selectWaveReduceMax(ResVReg, ResType,
I,
true);
4246 case Intrinsic::spv_wave_reduce_max:
4247 return selectWaveReduceMax(ResVReg, ResType,
I,
false);
4248 case Intrinsic::spv_wave_reduce_umin:
4249 return selectWaveReduceMin(ResVReg, ResType,
I,
true);
4250 case Intrinsic::spv_wave_reduce_min:
4251 return selectWaveReduceMin(ResVReg, ResType,
I,
false);
4252 case Intrinsic::spv_wave_reduce_sum:
4253 return selectWaveReduceSum(ResVReg, ResType,
I);
4254 case Intrinsic::spv_wave_product:
4255 return selectWaveReduceProduct(ResVReg, ResType,
I);
4256 case Intrinsic::spv_wave_readlane:
4257 return selectWaveOpInst(ResVReg, ResType,
I,
4258 SPIRV::OpGroupNonUniformShuffle);
4259 case Intrinsic::spv_wave_prefix_sum:
4260 return selectWaveExclusiveScanSum(ResVReg, ResType,
I);
4261 case Intrinsic::spv_wave_prefix_product:
4262 return selectWaveExclusiveScanProduct(ResVReg, ResType,
I);
4263 case Intrinsic::spv_step:
4264 return selectExtInst(ResVReg, ResType,
I, CL::step, GL::Step);
4265 case Intrinsic::spv_radians:
4266 return selectExtInst(ResVReg, ResType,
I, CL::radians, GL::Radians);
4270 case Intrinsic::instrprof_increment:
4271 case Intrinsic::instrprof_increment_step:
4272 case Intrinsic::instrprof_value_profile:
4275 case Intrinsic::spv_value_md:
4277 case Intrinsic::spv_resource_handlefrombinding: {
4278 return selectHandleFromBinding(ResVReg, ResType,
I);
4280 case Intrinsic::spv_resource_counterhandlefrombinding:
4281 return selectCounterHandleFromBinding(ResVReg, ResType,
I);
4282 case Intrinsic::spv_resource_updatecounter:
4283 return selectUpdateCounter(ResVReg, ResType,
I);
4284 case Intrinsic::spv_resource_store_typedbuffer: {
4285 return selectImageWriteIntrinsic(
I);
4287 case Intrinsic::spv_resource_load_typedbuffer: {
4288 return selectReadImageIntrinsic(ResVReg, ResType,
I);
4290 case Intrinsic::spv_resource_sample:
4291 case Intrinsic::spv_resource_sample_clamp:
4292 return selectSampleBasicIntrinsic(ResVReg, ResType,
I);
4293 case Intrinsic::spv_resource_samplebias:
4294 case Intrinsic::spv_resource_samplebias_clamp:
4295 return selectSampleBiasIntrinsic(ResVReg, ResType,
I);
4296 case Intrinsic::spv_resource_samplegrad:
4297 case Intrinsic::spv_resource_samplegrad_clamp:
4298 return selectSampleGradIntrinsic(ResVReg, ResType,
I);
4299 case Intrinsic::spv_resource_samplelevel:
4300 return selectSampleLevelIntrinsic(ResVReg, ResType,
I);
4301 case Intrinsic::spv_resource_samplecmp:
4302 case Intrinsic::spv_resource_samplecmp_clamp:
4303 return selectSampleCmpIntrinsic(ResVReg, ResType,
I);
4304 case Intrinsic::spv_resource_samplecmplevelzero:
4305 return selectSampleCmpLevelZeroIntrinsic(ResVReg, ResType,
I);
4306 case Intrinsic::spv_resource_gather:
4307 case Intrinsic::spv_resource_gather_cmp:
4308 return selectGatherIntrinsic(ResVReg, ResType,
I);
4309 case Intrinsic::spv_resource_getpointer: {
4310 return selectResourceGetPointer(ResVReg, ResType,
I);
4312 case Intrinsic::spv_pushconstant_getpointer: {
4313 return selectPushConstantGetPointer(ResVReg, ResType,
I);
4315 case Intrinsic::spv_discard: {
4316 return selectDiscard(ResVReg, ResType,
I);
4318 case Intrinsic::spv_resource_nonuniformindex: {
4319 return selectResourceNonUniformIndex(ResVReg, ResType,
I);
4321 case Intrinsic::spv_unpackhalf2x16: {
4322 return selectExtInst(ResVReg, ResType,
I, GL::UnpackHalf2x16);
4324 case Intrinsic::spv_packhalf2x16: {
4325 return selectExtInst(ResVReg, ResType,
I, GL::PackHalf2x16);
4327 case Intrinsic::spv_ddx:
4328 return selectDerivativeInst(ResVReg, ResType,
I, SPIRV::OpDPdx);
4329 case Intrinsic::spv_ddy:
4330 return selectDerivativeInst(ResVReg, ResType,
I, SPIRV::OpDPdy);
4331 case Intrinsic::spv_ddx_coarse:
4332 return selectDerivativeInst(ResVReg, ResType,
I, SPIRV::OpDPdxCoarse);
4333 case Intrinsic::spv_ddy_coarse:
4334 return selectDerivativeInst(ResVReg, ResType,
I, SPIRV::OpDPdyCoarse);
4335 case Intrinsic::spv_ddx_fine:
4336 return selectDerivativeInst(ResVReg, ResType,
I, SPIRV::OpDPdxFine);
4337 case Intrinsic::spv_ddy_fine:
4338 return selectDerivativeInst(ResVReg, ResType,
I, SPIRV::OpDPdyFine);
4339 case Intrinsic::spv_fwidth:
4340 return selectDerivativeInst(ResVReg, ResType,
I, SPIRV::OpFwidth);
4342 std::string DiagMsg;
4343 raw_string_ostream OS(DiagMsg);
4345 DiagMsg =
"Intrinsic selection not implemented: " + DiagMsg;
4352bool SPIRVInstructionSelector::selectHandleFromBinding(
Register &ResVReg,
4353 SPIRVTypeInst ResType,
4354 MachineInstr &
I)
const {
4357 if (ResType->
getOpcode() == SPIRV::OpTypeImage)
4364bool SPIRVInstructionSelector::selectCounterHandleFromBinding(
4365 Register &ResVReg, SPIRVTypeInst ResType, MachineInstr &
I)
const {
4367 assert(Intr.getIntrinsicID() ==
4368 Intrinsic::spv_resource_counterhandlefrombinding);
4371 Register MainHandleReg = Intr.getOperand(2).getReg();
4373 assert(MainHandleDef->getIntrinsicID() ==
4374 Intrinsic::spv_resource_handlefrombinding);
4378 uint32_t ArraySize =
getIConstVal(MainHandleDef->getOperand(4).getReg(), MRI);
4379 Register IndexReg = MainHandleDef->getOperand(5).getReg();
4380 std::string CounterName =
4385 MachineIRBuilder MIRBuilder(
I);
4387 buildPointerToResource(SPIRVTypeInst(GR.
getPointeeType(ResType)),
4389 ArraySize, IndexReg, CounterName, MIRBuilder);
4391 return BuildCOPY(ResVReg, CounterVarReg,
I);
4394bool SPIRVInstructionSelector::selectUpdateCounter(
Register &ResVReg,
4395 SPIRVTypeInst ResType,
4396 MachineInstr &
I)
const {
4398 assert(Intr.getIntrinsicID() == Intrinsic::spv_resource_updatecounter);
4400 Register CounterHandleReg = Intr.getOperand(2).getReg();
4401 Register IncrReg = Intr.getOperand(3).getReg();
4408 SPIRVTypeInst CounterVarPointeeType = GR.
getPointeeType(CounterVarType);
4409 assert(CounterVarPointeeType &&
4410 CounterVarPointeeType->
getOpcode() == SPIRV::OpTypeStruct &&
4411 "Counter variable must be a struct");
4413 SPIRV::StorageClass::StorageBuffer &&
4414 "Counter variable must be in the storage buffer storage class");
4416 "Counter variable must have exactly 1 member in the struct");
4417 const SPIRVTypeInst MemberType =
4420 "Counter variable struct must have a single i32 member");
4424 MachineIRBuilder MIRBuilder(
I);
4426 Type::getInt32Ty(
I.getMF()->getFunction().getContext());
4429 LLVMIntType, MIRBuilder, SPIRV::StorageClass::StorageBuffer);
4435 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpAccessChain))
4438 .
addUse(CounterHandleReg)
4445 Register Semantics = buildI32Constant(SPIRV::MemorySemantics::None,
I);
4448 Register Incr = buildI32Constant(
static_cast<uint32_t
>(IncrVal),
I);
4451 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpAtomicIAdd))
4460 return BuildCOPY(ResVReg, AtomicRes,
I);
4468 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpIAddS))
4476bool SPIRVInstructionSelector::selectReadImageIntrinsic(
Register &ResVReg,
4477 SPIRVTypeInst ResType,
4478 MachineInstr &
I)
const {
4486 Register ImageReg =
I.getOperand(2).getReg();
4494 Register IdxReg =
I.getOperand(3).getReg();
4496 MachineInstr &Pos =
I;
4498 return generateImageReadOrFetch(ResVReg, ResType, NewImageReg, IdxReg, Loc,
4502bool SPIRVInstructionSelector::generateSampleImage(
4505 DebugLoc Loc, MachineInstr &Pos)
const {
4516 if (!loadHandleBeforePosition(NewSamplerReg,
4522 MachineIRBuilder MIRBuilder(Pos);
4535 bool IsExplicitLod = ImOps.GradX.has_value() || ImOps.GradY.has_value() ||
4536 ImOps.Lod.has_value();
4537 unsigned Opcode = IsExplicitLod ? SPIRV::OpImageSampleExplicitLod
4538 : SPIRV::OpImageSampleImplicitLod;
4540 Opcode = IsExplicitLod ? SPIRV::OpImageSampleDrefExplicitLod
4541 : SPIRV::OpImageSampleDrefImplicitLod;
4550 MIB.
addUse(*ImOps.Compare);
4552 uint32_t ImageOperands = 0;
4554 ImageOperands |= SPIRV::ImageOperand::Bias;
4556 ImageOperands |= SPIRV::ImageOperand::Lod;
4557 if (ImOps.GradX && ImOps.GradY)
4558 ImageOperands |= SPIRV::ImageOperand::Grad;
4559 if (ImOps.Offset && !isScalarOrVectorIntConstantZero(*ImOps.Offset)) {
4561 ImageOperands |= SPIRV::ImageOperand::ConstOffset;
4564 "Non-constant offsets are not supported in sample instructions.");
4568 ImageOperands |= SPIRV::ImageOperand::MinLod;
4570 if (ImageOperands != 0) {
4571 MIB.
addImm(ImageOperands);
4572 if (ImageOperands & SPIRV::ImageOperand::Bias)
4574 if (ImageOperands & SPIRV::ImageOperand::Lod)
4576 if (ImageOperands & SPIRV::ImageOperand::Grad) {
4577 MIB.
addUse(*ImOps.GradX);
4578 MIB.
addUse(*ImOps.GradY);
4581 (SPIRV::ImageOperand::ConstOffset | SPIRV::ImageOperand::Offset))
4582 MIB.
addUse(*ImOps.Offset);
4583 if (ImageOperands & SPIRV::ImageOperand::MinLod)
4584 MIB.
addUse(*ImOps.MinLod);
4591bool SPIRVInstructionSelector::selectSampleBasicIntrinsic(
4592 Register &ResVReg, SPIRVTypeInst ResType, MachineInstr &
I)
const {
4593 Register ImageReg =
I.getOperand(2).getReg();
4594 Register SamplerReg =
I.getOperand(3).getReg();
4595 Register CoordinateReg =
I.getOperand(4).getReg();
4596 ImageOperands ImOps;
4597 if (
I.getNumOperands() > 5)
4598 ImOps.Offset =
I.getOperand(5).getReg();
4599 if (
I.getNumOperands() > 6)
4600 ImOps.MinLod =
I.getOperand(6).getReg();
4601 return generateSampleImage(ResVReg, ResType, ImageReg, SamplerReg,
4602 CoordinateReg, ImOps,
I.getDebugLoc(),
I);
4605bool SPIRVInstructionSelector::selectSampleBiasIntrinsic(
4606 Register &ResVReg, SPIRVTypeInst ResType, MachineInstr &
I)
const {
4607 Register ImageReg =
I.getOperand(2).getReg();
4608 Register SamplerReg =
I.getOperand(3).getReg();
4609 Register CoordinateReg =
I.getOperand(4).getReg();
4610 ImageOperands ImOps;
4611 ImOps.Bias =
I.getOperand(5).getReg();
4612 if (
I.getNumOperands() > 6)
4613 ImOps.Offset =
I.getOperand(6).getReg();
4614 if (
I.getNumOperands() > 7)
4615 ImOps.MinLod =
I.getOperand(7).getReg();
4616 return generateSampleImage(ResVReg, ResType, ImageReg, SamplerReg,
4617 CoordinateReg, ImOps,
I.getDebugLoc(),
I);
4620bool SPIRVInstructionSelector::selectSampleGradIntrinsic(
4621 Register &ResVReg, SPIRVTypeInst ResType, MachineInstr &
I)
const {
4622 Register ImageReg =
I.getOperand(2).getReg();
4623 Register SamplerReg =
I.getOperand(3).getReg();
4624 Register CoordinateReg =
I.getOperand(4).getReg();
4625 ImageOperands ImOps;
4626 ImOps.GradX =
I.getOperand(5).getReg();
4627 ImOps.GradY =
I.getOperand(6).getReg();
4628 if (
I.getNumOperands() > 7)
4629 ImOps.Offset =
I.getOperand(7).getReg();
4630 if (
I.getNumOperands() > 8)
4631 ImOps.MinLod =
I.getOperand(8).getReg();
4632 return generateSampleImage(ResVReg, ResType, ImageReg, SamplerReg,
4633 CoordinateReg, ImOps,
I.getDebugLoc(),
I);
4636bool SPIRVInstructionSelector::selectSampleLevelIntrinsic(
4637 Register &ResVReg, SPIRVTypeInst ResType, MachineInstr &
I)
const {
4638 Register ImageReg =
I.getOperand(2).getReg();
4639 Register SamplerReg =
I.getOperand(3).getReg();
4640 Register CoordinateReg =
I.getOperand(4).getReg();
4641 ImageOperands ImOps;
4642 ImOps.Lod =
I.getOperand(5).getReg();
4643 if (
I.getNumOperands() > 6)
4644 ImOps.Offset =
I.getOperand(6).getReg();
4645 return generateSampleImage(ResVReg, ResType, ImageReg, SamplerReg,
4646 CoordinateReg, ImOps,
I.getDebugLoc(),
I);
4649bool SPIRVInstructionSelector::selectSampleCmpIntrinsic(
Register &ResVReg,
4650 SPIRVTypeInst ResType,
4651 MachineInstr &
I)
const {
4652 Register ImageReg =
I.getOperand(2).getReg();
4653 Register SamplerReg =
I.getOperand(3).getReg();
4654 Register CoordinateReg =
I.getOperand(4).getReg();
4655 ImageOperands ImOps;
4656 ImOps.Compare =
I.getOperand(5).getReg();
4657 if (
I.getNumOperands() > 6)
4658 ImOps.Offset =
I.getOperand(6).getReg();
4659 if (
I.getNumOperands() > 7)
4660 ImOps.MinLod =
I.getOperand(7).getReg();
4661 return generateSampleImage(ResVReg, ResType, ImageReg, SamplerReg,
4662 CoordinateReg, ImOps,
I.getDebugLoc(),
I);
4665bool SPIRVInstructionSelector::selectSampleCmpLevelZeroIntrinsic(
4666 Register &ResVReg, SPIRVTypeInst ResType, MachineInstr &
I)
const {
4667 Register ImageReg =
I.getOperand(2).getReg();
4668 Register SamplerReg =
I.getOperand(3).getReg();
4669 Register CoordinateReg =
I.getOperand(4).getReg();
4670 ImageOperands ImOps;
4671 ImOps.Compare =
I.getOperand(5).getReg();
4672 if (
I.getNumOperands() > 6)
4673 ImOps.Offset =
I.getOperand(6).getReg();
4676 return generateSampleImage(ResVReg, ResType, ImageReg, SamplerReg,
4677 CoordinateReg, ImOps,
I.getDebugLoc(),
I);
4680bool SPIRVInstructionSelector::selectGatherIntrinsic(
Register &ResVReg,
4681 SPIRVTypeInst ResType,
4682 MachineInstr &
I)
const {
4683 Register ImageReg =
I.getOperand(2).getReg();
4684 Register SamplerReg =
I.getOperand(3).getReg();
4685 Register CoordinateReg =
I.getOperand(4).getReg();
4688 "ImageReg is not an image type.");
4693 ComponentOrCompareReg =
I.getOperand(5).getReg();
4694 OffsetReg =
I.getOperand(6).getReg();
4697 if (!loadHandleBeforePosition(NewImageReg, ImageType, *ImageDef,
I)) {
4701 auto Dim =
static_cast<SPIRV::Dim::Dim
>(ImageType->
getOperand(2).
getImm());
4702 if (Dim != SPIRV::Dim::DIM_2D && Dim != SPIRV::Dim::DIM_Cube &&
4703 Dim != SPIRV::Dim::DIM_Rect) {
4705 "Gather operations are only supported for 2D, Cube, and Rect images.");
4712 if (!loadHandleBeforePosition(
4717 MachineIRBuilder MIRBuilder(
I);
4718 SPIRVTypeInst SampledImageType =
4723 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSampledImage))
4731 bool IsGatherCmp =
IntrId == Intrinsic::spv_resource_gather_cmp;
4733 IsGatherCmp ? SPIRV::OpImageDrefGather : SPIRV::OpImageGather;
4735 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
4740 .
addUse(ComponentOrCompareReg);
4742 uint32_t ImageOperands = 0;
4743 if (OffsetReg && !isScalarOrVectorIntConstantZero(OffsetReg)) {
4744 if (Dim == SPIRV::Dim::DIM_Cube) {
4746 "Gather operations with offset are not supported for Cube images.");
4750 ImageOperands |= SPIRV::ImageOperand::ConstOffset;
4752 ImageOperands |= SPIRV::ImageOperand::Offset;
4756 if (ImageOperands != 0) {
4757 MIB.
addImm(ImageOperands);
4759 (SPIRV::ImageOperand::ConstOffset | SPIRV::ImageOperand::Offset))
4767bool SPIRVInstructionSelector::generateImageReadOrFetch(
4772 "ImageReg is not an image type.");
4774 bool IsSignedInteger =
4779 bool IsFetch = (SampledOp.getImm() == 1);
4782 if (ResultSize == 4) {
4785 TII.get(IsFetch ? SPIRV::OpImageFetch : SPIRV::OpImageRead))
4791 if (IsSignedInteger)
4797 SPIRVTypeInst ReadType = widenTypeToVec4(ResType, Pos);
4801 TII.get(IsFetch ? SPIRV::OpImageFetch : SPIRV::OpImageRead))
4806 if (IsSignedInteger)
4810 if (ResultSize == 1) {
4819 return extractSubvector(ResVReg, ResType, ReadReg, Pos);
4822bool SPIRVInstructionSelector::selectResourceGetPointer(
Register &ResVReg,
4823 SPIRVTypeInst ResType,
4824 MachineInstr &
I)
const {
4825 Register ResourcePtr =
I.getOperand(2).getReg();
4827 if (
RegType->getOpcode() == SPIRV::OpTypeImage) {
4836 MachineIRBuilder MIRBuilder(
I);
4838 Register IndexReg =
I.getOperand(3).getReg();
4841 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpAccessChain))
4851bool SPIRVInstructionSelector::selectPushConstantGetPointer(
4852 Register &ResVReg, SPIRVTypeInst ResType, MachineInstr &
I)
const {
4857bool SPIRVInstructionSelector::selectResourceNonUniformIndex(
4858 Register &ResVReg, SPIRVTypeInst ResType, MachineInstr &
I)
const {
4859 Register ObjReg =
I.getOperand(2).getReg();
4860 if (!BuildCOPY(ResVReg, ObjReg,
I))
4870 decorateUsesAsNonUniform(ResVReg);
4874void SPIRVInstructionSelector::decorateUsesAsNonUniform(
4877 while (WorkList.
size() > 0) {
4881 bool IsDecorated =
false;
4883 if (
Use.getOpcode() == SPIRV::OpDecorate &&
4884 Use.getOperand(1).getImm() == SPIRV::Decoration::NonUniformEXT) {
4890 if (
Use.getOperand(0).isReg() &&
Use.getOperand(0).isDef()) {
4892 if (ResultReg == CurrentReg)
4900 SPIRV::Decoration::NonUniformEXT, {});
4905bool SPIRVInstructionSelector::extractSubvector(
4907 MachineInstr &InsertionPoint)
const {
4909 [[maybe_unused]] uint64_t InputSize =
4912 assert(InputSize > 1 &&
"The input must be a vector.");
4913 assert(ResultSize > 1 &&
"The result must be a vector.");
4914 assert(ResultSize < InputSize &&
4915 "Cannot extract more element than there are in the input.");
4918 const TargetRegisterClass *ScalarRegClass = GR.
getRegClass(ScalarType);
4919 for (uint64_t
I = 0;
I < ResultSize;
I++) {
4922 InsertionPoint.
getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
4931 MachineInstrBuilder MIB =
BuildMI(*InsertionPoint.
getParent(), InsertionPoint,
4933 TII.get(SPIRV::OpCompositeConstruct))
4937 for (
Register ComponentReg : ComponentRegisters)
4938 MIB.
addUse(ComponentReg);
4943bool SPIRVInstructionSelector::selectImageWriteIntrinsic(
4944 MachineInstr &
I)
const {
4951 Register ImageReg =
I.getOperand(1).getReg();
4959 Register CoordinateReg =
I.getOperand(2).getReg();
4960 Register DataReg =
I.getOperand(3).getReg();
4963 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpImageWrite))
4971Register SPIRVInstructionSelector::buildPointerToResource(
4972 SPIRVTypeInst SpirvResType, SPIRV::StorageClass::StorageClass SC,
4973 uint32_t Set, uint32_t
Binding, uint32_t ArraySize,
Register IndexReg,
4974 StringRef Name, MachineIRBuilder MIRBuilder)
const {
4976 if (ArraySize == 1) {
4977 SPIRVTypeInst PtrType =
4980 "SpirvResType did not have an explicit layout.");
4985 const Type *VarType = ArrayType::get(
const_cast<Type *
>(ResType), ArraySize);
4986 SPIRVTypeInst VarPointerType =
4989 VarPointerType, Set,
Binding, Name, MIRBuilder);
4991 SPIRVTypeInst ResPointerType =
5004bool SPIRVInstructionSelector::selectFirstBitSet16(
5005 Register ResVReg, SPIRVTypeInst ResType, MachineInstr &
I,
5006 unsigned ExtendOpcode,
unsigned BitSetOpcode)
const {
5008 if (!selectOpWithSrcs(ExtReg, ResType,
I, {
I.getOperand(2).getReg()},
5012 return selectFirstBitSet32(ResVReg, ResType,
I, ExtReg, BitSetOpcode);
5015bool SPIRVInstructionSelector::selectFirstBitSet32(
5017 unsigned BitSetOpcode)
const {
5018 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
5021 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::GLSL_std_450))
5028bool SPIRVInstructionSelector::selectFirstBitSet64Overflow(
5030 unsigned BitSetOpcode,
bool SwapPrimarySide)
const {
5037 assert(ComponentCount < 5 &&
"Vec 5+ will generate invalid SPIR-V ops");
5039 MachineIRBuilder MIRBuilder(
I);
5042 SPIRVTypeInst I64x2Type =
5044 SPIRVTypeInst Vec2ResType =
5047 std::vector<Register> PartialRegs;
5050 unsigned CurrentComponent = 0;
5051 for (; CurrentComponent + 1 < ComponentCount; CurrentComponent += 2) {
5057 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
5058 TII.get(SPIRV::OpVectorShuffle))
5063 .
addImm(CurrentComponent)
5064 .
addImm(CurrentComponent + 1);
5071 if (!selectFirstBitSet64(SubVecBitSetReg, Vec2ResType,
I, BitSetResult,
5072 BitSetOpcode, SwapPrimarySide))
5075 PartialRegs.push_back(SubVecBitSetReg);
5079 if (CurrentComponent != ComponentCount) {
5085 if (!selectOpWithSrcs(FinalElemReg, I64Type,
I, {SrcReg, ConstIntLastIdx},
5086 SPIRV::OpVectorExtractDynamic))
5092 if (!selectFirstBitSet64(FinalElemBitSetReg,
BaseType,
I, FinalElemReg,
5093 BitSetOpcode, SwapPrimarySide))
5096 PartialRegs.push_back(FinalElemBitSetReg);
5101 return selectOpWithSrcs(ResVReg, ResType,
I, std::move(PartialRegs),
5102 SPIRV::OpCompositeConstruct);
5105bool SPIRVInstructionSelector::selectFirstBitSet64(
5107 unsigned BitSetOpcode,
bool SwapPrimarySide)
const {
5120 if (ComponentCount > 2) {
5121 return selectFirstBitSet64Overflow(ResVReg, ResType,
I, SrcReg,
5122 BitSetOpcode, SwapPrimarySide);
5126 MachineIRBuilder MIRBuilder(
I);
5128 BaseType, 2 * ComponentCount, MIRBuilder,
false);
5132 if (!selectOpWithSrcs(BitcastReg, PostCastType,
I, {SrcReg},
5138 if (!selectFirstBitSet32(FBSReg, PostCastType,
I, BitcastReg, BitSetOpcode))
5145 bool IsScalarRes = ResType->
getOpcode() != SPIRV::OpTypeVector;
5148 if (!selectOpWithSrcs(HighReg, ResType,
I, {FBSReg, ConstIntZero},
5149 SPIRV::OpVectorExtractDynamic))
5151 if (!selectOpWithSrcs(LowReg, ResType,
I, {FBSReg, ConstIntOne},
5152 SPIRV::OpVectorExtractDynamic))
5156 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
5157 TII.get(SPIRV::OpVectorShuffle))
5165 for (
unsigned J = 0; J < ComponentCount * 2; J += 2) {
5171 MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
5172 TII.get(SPIRV::OpVectorShuffle))
5180 for (
unsigned J = 1; J < ComponentCount * 2; J += 2) {
5200 SelectOp = SPIRV::OpSelectSISCond;
5201 AddOp = SPIRV::OpIAddS;
5209 SelectOp = SPIRV::OpSelectVIVCond;
5210 AddOp = SPIRV::OpIAddV;
5220 if (SwapPrimarySide) {
5221 PrimaryReg = LowReg;
5222 SecondaryReg = HighReg;
5223 PrimaryShiftReg = Reg0;
5224 SecondaryShiftReg = Reg32;
5229 if (!selectOpWithSrcs(BReg, BoolType,
I, {PrimaryReg, NegOneReg},
5235 if (!selectOpWithSrcs(TmpReg, ResType,
I, {
BReg, SecondaryReg, PrimaryReg},
5241 if (!selectOpWithSrcs(ValReg, ResType,
I,
5242 {
BReg, SecondaryShiftReg, PrimaryShiftReg}, SelectOp))
5245 return selectOpWithSrcs(ResVReg, ResType,
I, {ValReg, TmpReg}, AddOp);
5248bool SPIRVInstructionSelector::selectFirstBitHigh(
Register ResVReg,
5249 SPIRVTypeInst ResType,
5251 bool IsSigned)
const {
5253 Register OpReg =
I.getOperand(2).getReg();
5256 unsigned ExtendOpcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
5257 unsigned BitSetOpcode = IsSigned ? GL::FindSMsb : GL::FindUMsb;
5261 return selectFirstBitSet16(ResVReg, ResType,
I, ExtendOpcode, BitSetOpcode);
5263 return selectFirstBitSet32(ResVReg, ResType,
I, OpReg, BitSetOpcode);
5265 return selectFirstBitSet64(ResVReg, ResType,
I, OpReg, BitSetOpcode,
5269 "spv_firstbituhigh and spv_firstbitshigh only support 16,32,64 bits.");
5273bool SPIRVInstructionSelector::selectFirstBitLow(
Register ResVReg,
5274 SPIRVTypeInst ResType,
5275 MachineInstr &
I)
const {
5277 Register OpReg =
I.getOperand(2).getReg();
5282 unsigned ExtendOpcode = SPIRV::OpUConvert;
5283 unsigned BitSetOpcode = GL::FindILsb;
5287 return selectFirstBitSet16(ResVReg, ResType,
I, ExtendOpcode, BitSetOpcode);
5289 return selectFirstBitSet32(ResVReg, ResType,
I, OpReg, BitSetOpcode);
5291 return selectFirstBitSet64(ResVReg, ResType,
I, OpReg, BitSetOpcode,
5298bool SPIRVInstructionSelector::selectAllocaArray(
Register ResVReg,
5299 SPIRVTypeInst ResType,
5300 MachineInstr &
I)
const {
5304 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVariableLengthArrayINTEL))
5307 .
addUse(
I.getOperand(2).getReg())
5310 unsigned Alignment =
I.getOperand(3).getImm();
5316bool SPIRVInstructionSelector::selectFrameIndex(
Register ResVReg,
5317 SPIRVTypeInst ResType,
5318 MachineInstr &
I)
const {
5322 BuildMI(*It->getParent(), It, It->getDebugLoc(),
TII.get(SPIRV::OpVariable))
5325 .
addImm(
static_cast<uint32_t
>(SPIRV::StorageClass::Function))
5328 unsigned Alignment =
I.getOperand(2).getImm();
5335bool SPIRVInstructionSelector::selectBranch(MachineInstr &
I)
const {
5340 const MachineInstr *PrevI =
I.getPrevNode();
5342 if (PrevI !=
nullptr && PrevI->
getOpcode() == TargetOpcode::G_BRCOND) {
5346 .
addMBB(
I.getOperand(0).getMBB())
5351 .
addMBB(
I.getOperand(0).getMBB())
5356bool SPIRVInstructionSelector::selectBranchCond(MachineInstr &
I)
const {
5367 const MachineInstr *NextI =
I.getNextNode();
5369 if (NextI !=
nullptr && NextI->
getOpcode() == SPIRV::OpBranchConditional)
5375 MachineBasicBlock *NextMBB =
I.getMF()->getBlockNumbered(NextMBBNum);
5377 .
addUse(
I.getOperand(0).getReg())
5378 .
addMBB(
I.getOperand(1).getMBB())
5384bool SPIRVInstructionSelector::selectPhi(
Register ResVReg,
5385 MachineInstr &
I)
const {
5387 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(TargetOpcode::PHI))
5389 const unsigned NumOps =
I.getNumOperands();
5390 for (
unsigned i = 1; i <
NumOps; i += 2) {
5391 MIB.
addUse(
I.getOperand(i + 0).getReg());
5392 MIB.
addMBB(
I.getOperand(i + 1).getMBB());
5398bool SPIRVInstructionSelector::selectGlobalValue(
5399 Register ResVReg, MachineInstr &
I,
const MachineInstr *Init)
const {
5401 MachineIRBuilder MIRBuilder(
I);
5402 const GlobalValue *GV =
I.
getOperand(1).getGlobal();
5405 std::string GlobalIdent;
5407 unsigned &
ID = UnnamedGlobalIDs[GV];
5409 ID = UnnamedGlobalIDs.
size();
5410 GlobalIdent =
"__unnamed_" + Twine(
ID).str();
5436 GVFun ? SPIRV::StorageClass::CodeSectionINTEL
5443 MachineRegisterInfo *MRI = MIRBuilder.
getMRI();
5448 MachineInstrBuilder MIB1 =
5449 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
5452 MachineInstrBuilder MIB2 =
5454 TII.get(SPIRV::OpConstantFunctionPointerINTEL))
5458 GR.
add(ConstVal, MIB2);
5466 MachineInstrBuilder MIB3 =
5467 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
5470 GR.
add(ConstVal, MIB3);
5474 assert(NewReg != ResVReg);
5475 return BuildCOPY(ResVReg, NewReg,
I);
5485 const std::optional<SPIRV::LinkageType::LinkageType> LnkType =
5491 SPIRVTypeInst ResType =
5495 GlobalVar->isConstant(), LnkType, MIRBuilder,
true);
5500 if (
GlobalVar->isExternallyInitialized() &&
5501 STI.getTargetTriple().getVendor() ==
Triple::AMD) {
5502 constexpr unsigned ReadWriteINTEL = 3u;
5505 MachineInstrBuilder MIB(*MF, --MIRBuilder.
getInsertPt());
5511bool SPIRVInstructionSelector::selectLog10(
Register ResVReg,
5512 SPIRVTypeInst ResType,
5513 MachineInstr &
I)
const {
5515 return selectExtInst(ResVReg, ResType,
I, CL::log10);
5523 MachineIRBuilder MIRBuilder(
I);
5528 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
5531 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::GLSL_std_450))
5533 .
add(
I.getOperand(1))
5538 ResType->
getOpcode() == SPIRV::OpTypeFloat);
5540 SPIRVTypeInst SpirvScalarType = ResType->
getOpcode() == SPIRV::OpTypeVector
5548 auto Opcode = ResType->
getOpcode() == SPIRV::OpTypeVector
5549 ? SPIRV::OpVectorTimesScalar
5560bool SPIRVInstructionSelector::selectModf(
Register ResVReg,
5561 SPIRVTypeInst ResType,
5562 MachineInstr &
I)
const {
5578 MachineIRBuilder MIRBuilder(
I);
5581 ResType, MIRBuilder, SPIRV::StorageClass::Function);
5593 MachineBasicBlock &EntryBB =
I.getMF()->front();
5597 BuildMI(EntryBB, VarPos,
I.getDebugLoc(),
TII.get(SPIRV::OpVariable))
5600 .
addImm(
static_cast<uint32_t
>(SPIRV::StorageClass::Function));
5606 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
5609 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::OpenCL_std))
5612 .
add(
I.getOperand(
I.getNumExplicitDefs()))
5616 Register IntegralPartReg =
I.getOperand(1).getReg();
5617 if (IntegralPartReg.
isValid()) {
5619 auto LoadMIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
5630 assert(
false &&
"GLSL::Modf is deprecated.");
5641bool SPIRVInstructionSelector::loadVec3BuiltinInputID(
5642 SPIRV::BuiltIn::BuiltIn BuiltInValue,
Register ResVReg,
5643 SPIRVTypeInst ResType, MachineInstr &
I)
const {
5644 MachineIRBuilder MIRBuilder(
I);
5645 const SPIRVTypeInst Vec3Ty =
5648 Vec3Ty, MIRBuilder, SPIRV::StorageClass::Input);
5660 SPIRV::StorageClass::Input,
nullptr,
true, std::nullopt, MIRBuilder,
5664 MachineRegisterInfo *MRI = MIRBuilder.
getMRI();
5670 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
5677 assert(
I.getOperand(2).isReg());
5678 const uint32_t ThreadId =
foldImm(
I.getOperand(2), MRI);
5682 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
5693bool SPIRVInstructionSelector::loadBuiltinInputID(
5694 SPIRV::BuiltIn::BuiltIn BuiltInValue,
Register ResVReg,
5695 SPIRVTypeInst ResType, MachineInstr &
I)
const {
5696 MachineIRBuilder MIRBuilder(
I);
5698 ResType, MIRBuilder, SPIRV::StorageClass::Input);
5713 SPIRV::StorageClass::Input,
nullptr,
true, std::nullopt, MIRBuilder,
5717 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
5726SPIRVTypeInst SPIRVInstructionSelector::widenTypeToVec4(SPIRVTypeInst
Type,
5727 MachineInstr &
I)
const {
5728 MachineIRBuilder MIRBuilder(
I);
5729 if (
Type->getOpcode() != SPIRV::OpTypeVector)
5733 if (VectorSize == 4)
5741bool SPIRVInstructionSelector::loadHandleBeforePosition(
5742 Register &HandleReg, SPIRVTypeInst ResType, GIntrinsic &HandleDef,
5743 MachineInstr &Pos)
const {
5746 Intrinsic::spv_resource_handlefrombinding);
5754 bool IsStructuredBuffer = ResType->
getOpcode() == SPIRV::OpTypePointer;
5755 MachineIRBuilder MIRBuilder(HandleDef);
5756 SPIRVTypeInst VarType = ResType;
5757 SPIRV::StorageClass::StorageClass SC = SPIRV::StorageClass::UniformConstant;
5759 if (IsStructuredBuffer) {
5765 buildPointerToResource(SPIRVTypeInst(VarType), SC, Set,
Binding,
5766 ArraySize, IndexReg, Name, MIRBuilder);
5770 uint32_t LoadOpcode =
5771 IsStructuredBuffer ? SPIRV::OpCopyObject : SPIRV::OpLoad;
5781void SPIRVInstructionSelector::errorIfInstrOutsideShader(
5782 MachineInstr &
I)
const {
5784 std::string DiagMsg;
5785 raw_string_ostream OS(DiagMsg);
5786 I.print(OS,
true,
false,
false,
false);
5787 DiagMsg +=
" is only supported in shaders.\n";
5793InstructionSelector *
5797 return new SPIRVInstructionSelector(TM, Subtarget, RBI);
MachineInstrBuilder & UseMI
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file declares a class to represent arbitrary precision floating point values and provide a varie...
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
DXIL Resource Implicit Binding
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
LLVMTypeRef LLVMIntType(unsigned NumBits)
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
Register const TargetRegisterInfo * TRI
Promote Memory to Register
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static StringRef getName(Value *V)
static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size)
static APFloat getOneFP(const Type *LLVMFloatTy)
static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC)
static bool isASCastInGVar(MachineRegisterInfo *MRI, Register ResVReg)
static bool mayApplyGenericSelection(unsigned Opcode)
static APFloat getZeroFP(const Type *LLVMFloatTy)
std::vector< std::pair< SPIRV::InstructionSet::InstructionSet, uint32_t > > ExtInstList
static bool intrinsicHasSideEffects(Intrinsic::ID ID)
static unsigned getBoolCmpOpcode(unsigned PredNum)
static unsigned getICmpOpcode(unsigned PredNum)
static bool isOpcodeWithNoSideEffects(unsigned Opcode)
static void addMemoryOperands(MachineMemOperand *MemOp, MachineInstrBuilder &MIB, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry &GR)
static bool isConstReg(MachineRegisterInfo *MRI, MachineInstr *OpDef)
static unsigned getPtrCmpOpcode(unsigned Pred)
unsigned getVectorSizeOrOne(SPIRVTypeInst Type)
bool isDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static const fltSemantics & IEEEsingle()
static const fltSemantics & IEEEdouble()
static const fltSemantics & IEEEhalf()
static APFloat getOne(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative One.
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Intrinsic::ID getIntrinsicID() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
@ InternalLinkage
Rename collisions when linking (static functions).
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
LLVM_ABI iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineInstrBundleIterator< MachineInstr > iterator
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Helper class to build MachineInstr.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
void constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & addUse(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addDef(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register definition operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
unsigned getNumOperands() const
Retuns the total number of operands.
LLVM_ABI unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
LLVM_ABI unsigned getNumExplicitDefs() const
Returns the number of non-implicit definitions.
LLVM_ABI void emitGenericError(const Twine &ErrMsg) const
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
@ MOVolatile
The memory access is volatile.
@ MONonTemporal
The memory access is non-temporal.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
defusechain_instr_iterator< true, false, false, true > use_instr_iterator
use_instr_iterator/use_instr_begin/use_instr_end - Walk all uses of the specified register,...
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
use_instr_iterator use_instr_begin(Register RegNo) const
static def_instr_iterator def_instr_end()
defusechain_instr_iterator< false, true, false, true > def_instr_iterator
def_instr_iterator/def_instr_begin/def_instr_end - Walk all defs of the specified register,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
def_instr_iterator def_instr_begin(Register RegNo) const
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
static use_instr_iterator use_instr_end()
iterator_range< use_instr_nodbg_iterator > use_nodbg_instructions(Register Reg) const
LLVM_ABI void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
const MachineFunction & getMF() const
LLVM_ABI void setRegClass(Register Reg, const TargetRegisterClass *RC)
setRegClass - Set the register class of the specified virtual register.
LLVM_ABI Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
const TargetRegisterClass * getRegClassOrNull(Register Reg) const
Return the register class of Reg, or null if Reg has not been assigned a register class yet.
iterator_range< use_instr_iterator > use_instructions(Register Reg) const
unsigned getNumVirtRegs() const
getNumVirtRegs - Return the number of virtual registers created.
LLVM_ABI void replaceRegWith(Register FromReg, Register ToReg)
replaceRegWith - Replace all instances of FromReg with ToReg in the machine function.
Analysis providing profile information.
Holds all the information related to register banks.
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
bool isScalarOrVectorSigned(SPIRVTypeInst Type) const
SPIRVTypeInst getOrCreateOpTypeSampledImage(SPIRVTypeInst ImageType, MachineIRBuilder &MIRBuilder)
void assignSPIRVTypeToVReg(SPIRVTypeInst Type, Register VReg, const MachineFunction &MF)
const TargetRegisterClass * getRegClass(SPIRVTypeInst SpvType) const
MachineInstr * getOrAddMemAliasingINTELInst(MachineIRBuilder &MIRBuilder, const MDNode *AliasingListMD)
bool isAggregateType(SPIRVTypeInst Type) const
unsigned getScalarOrVectorBitWidth(SPIRVTypeInst Type) const
SPIRVTypeInst getOrCreateSPIRVIntegerType(unsigned BitWidth, MachineIRBuilder &MIRBuilder)
SPIRVTypeInst getOrCreateSPIRVVectorType(SPIRVTypeInst BaseType, unsigned NumElements, MachineIRBuilder &MIRBuilder, bool EmitIR)
Register buildGlobalVariable(Register Reg, SPIRVTypeInst BaseType, StringRef Name, const GlobalValue *GV, SPIRV::StorageClass::StorageClass Storage, const MachineInstr *Init, bool IsConst, const std::optional< SPIRV::LinkageType::LinkageType > &LinkageType, MachineIRBuilder &MIRBuilder, bool IsInstSelector)
SPIRVTypeInst getResultType(Register VReg, MachineFunction *MF=nullptr)
unsigned getScalarOrVectorComponentCount(Register VReg) const
const Type * getTypeForSPIRVType(SPIRVTypeInst Ty) const
bool isBitcastCompatible(SPIRVTypeInst Type1, SPIRVTypeInst Type2) const
unsigned getPointerSize() const
Register getOrCreateConstFP(APFloat Val, MachineInstr &I, SPIRVTypeInst SpvType, const SPIRVInstrInfo &TII, bool ZeroAsNull=true)
LLT getRegType(SPIRVTypeInst SpvType) const
void invalidateMachineInstr(MachineInstr *MI)
SPIRVTypeInst getOrCreateSPIRVBoolType(MachineIRBuilder &MIRBuilder, bool EmitIR)
SPIRVTypeInst getOrCreateSPIRVPointerType(const Type *BaseType, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SC)
bool isScalarOfType(Register VReg, unsigned TypeOpcode) const
Register getSPIRVTypeID(SPIRVTypeInst SpirvType) const
Register getOrCreateConstInt(uint64_t Val, MachineInstr &I, SPIRVTypeInst SpvType, const SPIRVInstrInfo &TII, bool ZeroAsNull=true)
Register getOrCreateConstIntArray(uint64_t Val, size_t Num, MachineInstr &I, SPIRVTypeInst SpvType, const SPIRVInstrInfo &TII)
bool findValueAttrs(const MachineInstr *Key, Type *&Ty, StringRef &Name)
SPIRVTypeInst retrieveScalarOrVectorIntType(SPIRVTypeInst Type) const
Register getOrCreateGlobalVariableWithBinding(SPIRVTypeInst VarType, uint32_t Set, uint32_t Binding, StringRef Name, MachineIRBuilder &MIRBuilder)
SPIRVTypeInst changePointerStorageClass(SPIRVTypeInst PtrType, SPIRV::StorageClass::StorageClass SC, MachineInstr &I)
Register getOrCreateConstVector(uint64_t Val, MachineInstr &I, SPIRVTypeInst SpvType, const SPIRVInstrInfo &TII, bool ZeroAsNull=true)
Register buildConstantFP(APFloat Val, MachineIRBuilder &MIRBuilder, SPIRVTypeInst SpvType=nullptr)
void addGlobalObject(const Value *V, const MachineFunction *MF, Register R)
SPIRVTypeInst getScalarOrVectorComponentType(SPIRVTypeInst Type) const
void recordFunctionPointer(const MachineOperand *MO, const Function *F)
SPIRVTypeInst getOrCreateSPIRVFloatType(unsigned BitWidth, MachineInstr &I, const SPIRVInstrInfo &TII)
SPIRVTypeInst getPointeeType(SPIRVTypeInst PtrType)
SPIRVTypeInst getOrCreateSPIRVType(const Type *Type, MachineInstr &I, SPIRV::AccessQualifier::AccessQualifier AQ, bool EmitIR)
bool isScalarOrVectorOfType(Register VReg, unsigned TypeOpcode) const
MachineFunction * setCurrentFunc(MachineFunction &MF)
Register getOrCreateConstNullPtr(MachineIRBuilder &MIRBuilder, SPIRVTypeInst SpvType)
SPIRVTypeInst getSPIRVTypeForVReg(Register VReg, const MachineFunction *MF=nullptr) const
Type * getDeducedGlobalValueType(const GlobalValue *Global)
Register getOrCreateUndef(MachineInstr &I, SPIRVTypeInst SpvType, const SPIRVInstrInfo &TII)
SPIRV::StorageClass::StorageClass getPointerStorageClass(Register VReg) const
bool erase(const MachineInstr *MI)
bool add(SPIRV::IRHandle Handle, const MachineInstr *MI)
Register find(SPIRV::IRHandle Handle, const MachineFunction *MF)
bool isPhysicalSPIRV() const
bool isAtLeastSPIRVVer(VersionTuple VerToCompareTo) const
bool canUseExtInstSet(SPIRV::InstructionSet::InstructionSet E) const
bool isLogicalSPIRV() const
bool canUseExtension(SPIRV::Extension::Extension E) const
bool erase(PtrType Ptr)
Remove pointer from the set.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
constexpr size_t size() const
size - Get the string size.
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
The instances of the Type class are immutable: once they are created, they are never changed.
@ HalfTyID
16-bit floating point type
@ FloatTyID
32-bit floating point type
@ DoubleTyID
64-bit floating point type
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
bool isStructTy() const
True if this is an instance of StructType.
bool isAggregateType() const
Return true if the type is an aggregate type.
TypeID getTypeID() const
Return the type id for the type.
Value * getOperand(unsigned i) const
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char IsConst[]
Key for Kernel::Arg::Metadata::mIsConst.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Scope
Defines the scope in which this symbol should be visible: Default – Visible in the public interface o...
NodeAddr< DefNode * > Def
NodeAddr< InstrNode * > Instr
NodeAddr< UseNode * > Use
This is an optimization pass for GlobalISel generic memory operations.
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
int64_t getIConstValSext(Register ConstReg, const MachineRegisterInfo *MRI)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool isTypeFoldingSupported(unsigned Opcode)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
LLVM_ABI void constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
Register createVirtualRegister(SPIRVTypeInst SpvType, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, const MachineFunction &MF)
unsigned getArrayComponentCount(const MachineRegisterInfo *MRI, const MachineInstr *ResType)
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
SmallVector< MachineInstr *, 4 > createContinuedInstructions(MachineIRBuilder &MIRBuilder, unsigned Opcode, unsigned MinWC, unsigned ContinuedOpcode, ArrayRef< Register > Args, Register ReturnRegister, Register TypeID)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
constexpr unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)
MachineBasicBlock::iterator getFirstValidInstructionInsertPoint(MachineBasicBlock &BB)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
MachineBasicBlock::iterator getOpVariableMBBIt(MachineInstr &I)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
Type * toTypedPointer(Type *Ty)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
constexpr bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
MachineInstr * passCopy(MachineInstr *Def, const MachineRegisterInfo *MRI)
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
std::optional< SPIRV::LinkageType::LinkageType > getSpirvLinkageTypeFor(const SPIRVSubtarget &ST, const GlobalValue &GV)
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
SPIRV::StorageClass::StorageClass addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI)
AtomicOrdering
Atomic ordering for LLVM's memory model.
SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id)
InstructionSelector * createSPIRVInstructionSelector(const SPIRVTargetMachine &TM, const SPIRVSubtarget &Subtarget, const RegisterBankInfo &RBI)
std::string getStringValueFromReg(Register Reg, MachineRegisterInfo &MRI)
int64_t foldImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
MachineInstr * getDefInstrMaybeConstant(Register &ConstReg, const MachineRegisterInfo *MRI)
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
bool hasInitializer(const GlobalVariable *GV)
void addStringImm(const StringRef &Str, MCInst &Inst)
MachineInstr * getVRegDef(MachineRegisterInfo &MRI, Register Reg)
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
LLVM_ABI bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
Check whether an instruction MI is dead: it only defines dead virtual registers, and doesn't have oth...