52#include "llvm/IR/IntrinsicsNVPTX.h"
78#define DEBUG_TYPE "nvptx-lower"
88 cl::desc(
"NVPTX Specific: FMA contraction (0: don't do it"
89 " 1: do it 2: do it aggressively"),
95 "NVPTX Specific: Override the precision of the lowering for f32 fdiv"),
100 "Use IEEE Compliant F32 div.rnd if available (default)"),
102 "Use IEEE Compliant F32 div.rnd if available, no FTZ")),
107 cl::desc(
"NVPTX Specific: 0 use sqrt.approx, 1 use sqrt.rn."),
113 "nvptx-approx-log2f32",
114 cl::desc(
"NVPTX Specific: whether to use lg2.approx for log2"),
125 if (Flags.hasApproximateFuncs())
138 if (Flags.hasApproximateFuncs())
194static std::optional<std::pair<unsigned int, MVT>>
201 return {{4, MVT::i64}};
208 if (VectorVT == MVT::i128 || VectorVT == MVT::f128)
209 return {{2, MVT::i64}};
217 unsigned PackRegSize;
230 if (!CanLowerTo256Bit)
237 return std::pair(NumElts, EltVT);
245 if (!CanLowerTo256Bit)
267 if (!CanLowerTo256Bit)
275 return std::pair(NumElts, EltVT);
285 const unsigned NPerReg = PackRegSize / EltVT.
getSizeInBits();
307 for (
const auto [VT, Off] :
zip(TempVTs, TempOffsets)) {
313 if (VT.getScalarType() == MVT::i8) {
314 if (RegisterVT == MVT::i16)
315 RegisterVT = MVT::i8;
316 else if (RegisterVT == MVT::v2i16)
317 RegisterVT = MVT::v2i8;
319 assert(RegisterVT == MVT::v4i8 &&
320 "Expected v4i8, v2i16, or i16 for i8 RegisterVT");
327 for (
unsigned I :
seq(NumRegs)) {
348 if (V.getValueType() == VT) {
349 assert(
I == 0 &&
"Index must be 0 for scalar value");
366 return GetElement(0);
392 "Promotion is not suitable for scalars of size larger than 64-bits");
426 if (ParamAlignment < AccessSize)
429 if (Offsets[Idx] & (AccessSize - 1))
432 EVT EltVT = ValueVTs[Idx];
436 if (EltSize >= AccessSize)
439 unsigned NumElts = AccessSize / EltSize;
441 if (AccessSize != EltSize * NumElts)
445 if (Idx + NumElts > ValueVTs.
size())
449 if (NumElts != 4 && NumElts != 2)
452 for (
unsigned j = Idx + 1; j < Idx + NumElts; ++j) {
454 if (ValueVTs[j] != EltVT)
458 if (Offsets[j] - Offsets[j - 1] != EltSize)
477 bool IsVAArg =
false) {
486 const auto GetNumElts = [&](
unsigned I) ->
unsigned {
487 for (
const unsigned AccessSize : {16, 8, 4, 2}) {
489 I, AccessSize, ValueVTs, Offsets, ParamAlignment);
490 assert((NumElts == 1 || NumElts == 2 || NumElts == 4) &&
491 "Unexpected vectorization size");
499 for (
unsigned I = 0,
E = ValueVTs.
size();
I !=
E;) {
500 const unsigned NumElts = GetNumElts(
I);
501 VectorInfo.push_back(NumElts);
504 assert(std::accumulate(VectorInfo.begin(), VectorInfo.end(), 0u) ==
539 bool IsOpSupported = STI.allowFP16Math();
550 IsOpSupported &= STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 70;
553 IsOpSupported &= STI.getSmVersion() >= 75 && STI.getPTXVersion() >= 70;
561 bool IsOpSupported = STI.hasNativeBF16Support(
Op);
563 Op, VT, IsOpSupported ? Action : NoBF16Action);
568 bool IsOpSupported =
false;
576 IsOpSupported = STI.getSmVersion() >= 90 && STI.getPTXVersion() >= 80;
595 if (STI.hasF32x2Instructions()) {
607 if (STI.getSmVersion() >= 30 && STI.getPTXVersion() > 31)
644 if (STI.hasF32x2Instructions())
669 {MVT::v4i8, MVT::v2i32},
Expand);
672 for (
MVT VT : {MVT::bf16, MVT::f16, MVT::v2bf16, MVT::v2f16, MVT::f32,
673 MVT::v2f32, MVT::f64, MVT::i1, MVT::i8, MVT::i16, MVT::v2i16,
674 MVT::v4i8, MVT::i32, MVT::v2i32, MVT::i64}) {
702 {MVT::i8, MVT::i16, MVT::v2i16, MVT::i32, MVT::i64},
705 if (STI.hasHWROT32()) {
721 for (
MVT ValVT : FloatVTs) {
722 for (
MVT MemVT : FloatVTs) {
734 for (
MVT ValVT : IntVTs)
735 for (
MVT MemVT : IntVTs)
756 {MVT::v2i8, MVT::v2i16},
Expand);
767 if (!
isTypeLegal(VT) && VT.getStoreSizeInBits() <= 256)
805 {MVT::i16, MVT::i32, MVT::i64},
Legal);
831 {MVT::v2i16, MVT::v2i32},
Expand);
844 if (STI.getPTXVersion() >= 43) {
889 if (STI.hasF32x2Instructions())
894 if (STI.allowFP16Math() || STI.hasBF16Math())
901 if (EltVT == MVT::f32 || EltVT == MVT::f64) {
928 for (
const auto &VT : {MVT::bf16, MVT::v2bf16}) {
929 if (!STI.hasNativeBF16Support(
Op) && STI.hasNativeBF16Support(
ISD::FMA)) {
936 const bool IsFP16FP16x2NegAvailable = STI.getSmVersion() >= 53 &&
937 STI.getPTXVersion() >= 60 &&
939 for (
const auto &VT : {MVT::f16, MVT::v2f16})
962 if (STI.getSmVersion() < 80 || STI.getPTXVersion() < 71) {
965 if (STI.getSmVersion() < 90 || STI.getPTXVersion() < 78) {
966 for (
MVT VT : {MVT::bf16, MVT::f32, MVT::f64}) {
979 if (STI.getSmVersion() < 90 || STI.getPTXVersion() < 78) {
980 for (
MVT VT : {MVT::i1, MVT::i16, MVT::i32, MVT::i64}) {
1009 for (
const auto &
Op :
1025 if (STI.getPTXVersion() >= 65) {
1037 for (
const auto &
Op :
1049 bool SupportsF32MinMaxNaN =
1050 STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 70;
1106 {MVT::v2i32, MVT::v4i32, MVT::v8i32, MVT::v16i32,
1107 MVT::v32i32, MVT::v64i32, MVT::v128i32, MVT::v2f32,
1108 MVT::v4f32, MVT::v8f32, MVT::v16f32, MVT::v32f32,
1109 MVT::v64f32, MVT::v128f32},
1114 {MVT::v2i32, MVT::v4i32, MVT::v8i32, MVT::v16i32,
1115 MVT::v32i32, MVT::v64i32, MVT::v128i32, MVT::Other},
1124 {MVT::i32, MVT::i128, MVT::v4f32, MVT::Other},
Custom);
1142 bool Reciprocal)
const {
1163 if (Reciprocal || ExtraSteps > 0) {
1165 return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_rsqrt_approx_ftz_f
1166 : Intrinsic::nvvm_rsqrt_approx_f);
1167 else if (VT == MVT::f64)
1168 return MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d);
1173 return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_sqrt_approx_ftz_f
1174 : Intrinsic::nvvm_sqrt_approx_f);
1182 DAG.
getConstant(Intrinsic::nvvm_rcp_approx_ftz_d,
DL, MVT::i32),
1183 MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d));
1194 std::optional<unsigned> FirstVAArg,
const CallBase &CB,
1195 unsigned UniqueCallSite)
const {
1198 std::string Prototype;
1200 O <<
"prototype_" << UniqueCallSite <<
" : .callprototype ";
1208 O <<
".param .align " << RetAlign.
value() <<
" .b8 _["
1209 <<
DL.getTypeAllocSize(RetTy) <<
"]";
1213 size = ITy->getBitWidth();
1216 "Floating point type expected here");
1224 O <<
".param .b" <<
size <<
" _";
1226 O <<
".param .b" << PtrVT.getSizeInBits() <<
" _";
1236 const unsigned NumArgs = FirstVAArg.value_or(Args.size());
1238 for (
const unsigned I :
llvm::seq(NumArgs)) {
1239 const auto ArgOuts =
1240 AllOuts.take_while([
I](
auto O) {
return O.OrigArgIndex ==
I; });
1241 AllOuts = AllOuts.drop_front(ArgOuts.size());
1243 Type *Ty = Args[
I].Ty;
1249 if (ArgOuts[0].Flags.isByVal()) {
1252 Type *ETy = Args[
I].IndirectType;
1253 Align InitialAlign = ArgOuts[0].Flags.getNonZeroByValAlign();
1254 Align ParamByValAlign =
1257 O <<
".param .align " << ParamByValAlign.
value() <<
" .b8 _["
1258 << ArgOuts[0].Flags.getByValSize() <<
"]";
1263 O <<
".param .align " << ParamAlign.
value() <<
" .b8 _["
1264 <<
DL.getTypeAllocSize(Ty) <<
"]";
1269 (
getValueType(
DL, Ty) == MVT::i8 && ArgOuts[0].VT == MVT::i16)) &&
1270 "type mismatch between callee prototype and arguments");
1276 sz = PtrVT.getSizeInBits();
1278 sz = Ty->getPrimitiveSizeInBits();
1280 O <<
".param .b" << sz <<
" _";
1285 O << (first ?
"" :
",") <<
" .param .align "
1286 << STI.getMaxRequiredAlignment() <<
" .b8 _[]";
1299 return DL.getABITypeAlign(Ty);
1304 if (!DirectCallee) {
1312 return StackAlign.value();
1323 return DL.getABITypeAlign(Ty);
1370 const EVT ActualVT = V.getValueType();
1371 assert((ActualVT == ExpectedVT ||
1373 "Non-integer argument type size mismatch");
1374 if (ExpectedVT.
bitsGT(ActualVT))
1376 if (ExpectedVT.
bitsLT(ActualVT))
1385 if (CLI.
IsVarArg && (STI.getPTXVersion() < 60 || STI.getSmVersion() < 30))
1387 "Support for variadic functions (unsized array parameter) introduced "
1388 "in PTX ISA version 6.0 and requires target sm_30.");
1400 const auto GetI32 = [&](
const unsigned I) {
1404 const unsigned UniqueCallSite = GlobalUniqueCallSite++;
1412 const auto MakeDeclareScalarParam = [&](
SDValue Symbol,
unsigned Size) {
1417 DAG.
getNode(NVPTXISD::DeclareScalarParam, dl, {MVT::Other, MVT::Glue},
1418 {StartChain, Symbol, GetI32(SizeBits), DeclareGlue});
1427 NVPTXISD::DeclareArrayParam, dl, {MVT::Other, MVT::Glue},
1428 {StartChain, Symbol, GetI32(
Align.
value()), GetI32(
Size), DeclareGlue});
1450 "Non-VarArg function with extra arguments");
1453 unsigned VAOffset = 0;
1455 const SDValue VADeclareParam =
1456 CLI.
Args.size() > FirstVAArg
1457 ? MakeDeclareArrayParam(getCallParamSymbol(DAG, FirstVAArg, MVT::i32),
1458 Align(STI.getMaxRequiredAlignment()), 0)
1472 assert(AllOuts.size() == AllOutVals.size() &&
1473 "Outs and OutVals must be the same size");
1477 const auto ArgI = E.index();
1478 const auto Arg = E.value();
1479 const auto ArgOuts =
1480 AllOuts.take_while([&](
auto O) {
return O.OrigArgIndex == ArgI; });
1481 const auto ArgOutVals = AllOutVals.take_front(ArgOuts.size());
1482 AllOuts = AllOuts.drop_front(ArgOuts.size());
1483 AllOutVals = AllOutVals.drop_front(ArgOuts.size());
1485 const bool IsVAArg = (ArgI >= FirstVAArg);
1486 const bool IsByVal = Arg.IsByVal;
1489 getCallParamSymbol(DAG, IsVAArg ? FirstVAArg : ArgI, MVT::i32);
1491 assert((!IsByVal || Arg.IndirectType) &&
1492 "byval arg must have indirect type");
1493 Type *ETy = (IsByVal ? Arg.IndirectType : Arg.Ty);
1495 const Align ArgAlign = [&]() {
1500 const Align InitialAlign = ArgOuts[0].Flags.getNonZeroByValAlign();
1507 const unsigned TySize =
DL.getTypeAllocSize(ETy);
1508 assert((!IsByVal || TySize == ArgOuts[0].Flags.getByValSize()) &&
1509 "type size mismatch");
1511 const SDValue ArgDeclare = [&]() {
1513 return VADeclareParam;
1516 return MakeDeclareArrayParam(ParamSymbol, ArgAlign, TySize);
1518 assert(ArgOuts.size() == 1 &&
"We must pass only one value as non-array");
1519 assert((ArgOuts[0].VT.isInteger() || ArgOuts[0].VT.isFloatingPoint()) &&
1520 "Only int and float types are supported as non-array arguments");
1522 return MakeDeclareScalarParam(ParamSymbol, TySize);
1526 assert(ArgOutVals.size() == 1 &&
"We must pass only one value as byval");
1527 SDValue SrcPtr = ArgOutVals[0];
1528 const auto PointerInfo =
refinePtrAS(SrcPtr, DAG,
DL, *
this);
1529 const Align BaseSrcAlign = ArgOuts[0].Flags.getNonZeroByValAlign();
1532 VAOffset =
alignTo(VAOffset, ArgAlign);
1540 for (
const unsigned NumElts : VI) {
1545 DAG.
getLoad(LoadVT, dl, CallChain, SrcAddr, PointerInfo, SrcAlign);
1547 TypeSize ParamOffset = Offsets[J].getWithIncrement(VAOffset);
1552 DAG.
getStore(ArgDeclare, dl, SrcLoad, ParamAddr,
1565 assert(VTs.
size() == Offsets.size() &&
"Size mismatch");
1566 assert(VTs.
size() == ArgOuts.size() &&
"Size mismatch");
1572 const bool ExtendIntegerParam =
1573 Arg.Ty->isIntegerTy() &&
DL.getTypeAllocSizeInBits(Arg.Ty) < 32;
1575 const auto GetStoredValue = [&](
const unsigned I) {
1579 "OutVal type should always be legal");
1583 ExtendIntegerParam ? MVT::i32 : (VTI == MVT::i1 ? MVT::i8 : VTI);
1590 for (
const unsigned NumElts : VI) {
1598 "Vectorization should be disabled for vaargs.");
1604 const EVT TheStoreType = ExtendIntegerParam ? MVT::i32 : EltVT;
1607 assert(VAOffset == 0 &&
"VAOffset must be 0 for non-VA args");
1614 const MaybeAlign CurrentAlign = ExtendIntegerParam
1620 return GetStoredValue(J + K);
1624 DAG.
getStore(ArgDeclare, dl, Val, Ptr,
1636 const unsigned ResultSize =
DL.getTypeAllocSize(RetTy);
1639 MakeDeclareArrayParam(RetSymbol, RetAlign, ResultSize);
1641 MakeDeclareScalarParam(RetSymbol, ResultSize);
1647 if (VADeclareParam) {
1650 VADeclareParam.
getOperand(2), GetI32(VAOffset),
1653 VADeclareParam->
getVTList(), DeclareParamOps);
1664 const bool IsIndirectCall = (!Func && CB) || ConvertToIndirectCall;
1671 assert(CalleeFunc !=
nullptr &&
"Libcall callee must be set.");
1675 CalleeFunc->
addFnAttr(
"nvptx-libcall-callee",
"true");
1689 HasVAArgs ? std::optional(FirstVAArg) : std::nullopt, *CB,
1691 const char *ProtoStr =
nvTM->getStrPool().save(Proto).data();
1693 NVPTXISD::CallPrototype, dl, MVT::Other,
1695 CallPrereqs.
push_back(PrototypeDeclare);
1699 const unsigned NumArgs =
1705 NVPTXISD::CALL, dl, MVT::Other,
1707 GetI32(Ins.
empty() ? 0 : 1), GetI32(NumArgs), Callee, GetI32(Proto)});
1723 const bool ExtendIntegerRetVal =
1724 RetTy->
isIntegerTy() &&
DL.getTypeAllocSizeInBits(RetTy) < 32;
1728 for (
const unsigned NumElts : VI) {
1730 ExtendIntegerRetVal ?
MaybeAlign(std::nullopt)
1735 ExtendIntegerRetVal ? MVT::i32 : (VTI == MVT::i1 ? MVT::i8 : VTI);
1745 for (
const unsigned J :
llvm::seq(NumElts))
1753 UniqueCallSite + 1,
SDValue(), dl);
1760 DAG.
getNode(NVPTXISD::ProxyReg, dl, Reg.getValueType(), {CallEnd, Reg});
1774 if (STI.getPTXVersion() < 73 || STI.getSmVersion() < 52) {
1779 "Support for dynamic alloca introduced in PTX ISA version 7.3 and "
1780 "requires target sm_52.",
1801 DAG.
getNode(NVPTXISD::DYNAMIC_STACKALLOC,
DL, {LocalVT, MVT::Other},
1814 if (STI.getPTXVersion() < 73 || STI.getSmVersion() < 52) {
1819 "Support for stackrestore requires PTX ISA version >= 7.3 and target "
1822 return Op.getOperand(0);
1830 return DAG.
getNode(NVPTXISD::STACKRESTORE,
DL, MVT::Other, {Chain, ASC});
1836 if (STI.getPTXVersion() < 73 || STI.getSmVersion() < 52) {
1841 "Support for stacksave requires PTX ISA version >= 7.3 and target >= "
1851 DAG.
getNode(NVPTXISD::STACKSAVE,
DL, {LocalVT, MVT::Other}, Chain);
1865 unsigned NumOperands =
Node->getNumOperands();
1866 for (
unsigned i = 0; i < NumOperands; ++i) {
1868 EVT VVT = SubOp.getNode()->getValueType(0);
1871 for (
unsigned j = 0; j < NumSubElem; ++j) {
1882 assert(
A.getValueType() == MVT::i32 &&
B.getValueType() == MVT::i32 &&
1883 Selector.
getValueType() == MVT::i32 &&
"PRMT must have i32 operands");
1884 return DAG.
getNode(NVPTXISD::PRMT,
DL, MVT::i32,
1901 ArrayRef<std::pair<unsigned /*NodeType*/, unsigned /*NumInputs*/>>
Ops,
1907 while (Level.size() > 1) {
1913 unsigned I = 0,
E = Level.size();
1914 for (;
I + NumInputs <=
E;
I += NumInputs) {
1923 if (ReducedLevel.
empty()) {
1927 assert(
OpIdx <
Ops.size() &&
"no smaller operators for reduction");
1939 Level = ReducedLevel;
1942 return *Level.begin();
1947 switch (ReductionOpcode) {
1962static std::optional<unsigned>
1964 switch (ReductionOpcode) {
1966 return NVPTXISD::FMAXNUM3;
1968 return NVPTXISD::FMINNUM3;
1970 return NVPTXISD::FMAXIMUM3;
1972 return NVPTXISD::FMINIMUM3;
1974 return std::nullopt;
1984 const SDNodeFlags
Flags =
Op->getFlags();
1987 const unsigned Opcode =
Op->getOpcode();
1988 const EVT EltTy =
Vector.getValueType().getVectorElementType();
1991 const bool CanUseMinMax3 =
1992 EltTy == MVT::f32 && STI.getSmVersion() >= 100 &&
1993 STI.getPTXVersion() >= 88 &&
1999 SmallVector<std::pair<
unsigned ,
unsigned >, 2> ScalarOps;
2002 CanUseMinMax3 && Opcode3Elem)
2003 ScalarOps.push_back({*Opcode3Elem, 3});
2015 EVT FromVT =
Op->getOperand(0)->getValueType(0);
2016 if (FromVT != MVT::v2i8) {
2032 EVT ToVT =
Op->getValueType(0);
2042 EVT VT =
Op->getValueType(0);
2048 return Operand->isUndef() || isa<ConstantSDNode>(Operand) ||
2049 isa<ConstantFPSDNode>(Operand);
2051 if (VT != MVT::v4i8)
2056 uint64_t SelectionValue) ->
SDValue {
2063 return getPRMT(L, R, SelectionValue,
DL, DAG);
2065 auto PRMT__10 = GetPRMT(
Op->getOperand(0),
Op->getOperand(1),
true, 0x3340);
2066 auto PRMT__32 = GetPRMT(
Op->getOperand(2),
Op->getOperand(3),
true, 0x3340);
2067 auto PRMT3210 = GetPRMT(PRMT__10, PRMT__32,
false, 0x5410);
2072 auto GetOperand = [](
SDValue Op,
int N) -> APInt {
2074 EVT VT =
Op->getValueType(0);
2076 return APInt(32, 0);
2078 if (VT == MVT::v2f16 || VT == MVT::v2bf16)
2080 else if (VT == MVT::v2i16 || VT == MVT::v4i8)
2086 if (VT == MVT::v4i8)
2088 return Value.zext(32);
2106 assert(32 % NumElements == 0 &&
"must evenly divide bit length");
2107 const unsigned ShiftAmount = 32 / NumElements;
2108 for (
unsigned ElementNo :
seq(NumElements))
2109 Value |= GetOperand(
Op, ElementNo).shl(ElementNo * ShiftAmount);
2119 EVT VectorVT =
Vector.getValueType();
2121 if (VectorVT == MVT::v4i8) {
2144 SDLoc dl(
Op.getNode());
2156 EVT VectorVT =
Vector.getValueType();
2158 if (VectorVT != MVT::v4i8)
2162 if (
Value->isUndef())
2168 DAG.
getNode(NVPTXISD::BFI,
DL, MVT::i32,
2181 if (VectorVT != MVT::v4i8 ||
Op.getValueType() != MVT::v4i8)
2187 uint32_t Selector = 0;
2189 if (
I.value() != -1)
2190 Selector |= (
I.value() << (
I.index() * 4));
2208 EVT VT =
Op.getValueType();
2216 if (VTBits == 32 && STI.getSmVersion() >= 35) {
2224 DAG.
getNode(NVPTXISD::FSHR_CLAMP, dl, VT, ShOpHi, ShOpLo, ShAmt);
2269 EVT VT =
Op.getValueType();
2276 if (VTBits == 32 && STI.getSmVersion() >= 35) {
2283 DAG.
getNode(NVPTXISD::FSHL_CLAMP, dl, VT, ShOpHi, ShOpLo, ShAmt);
2323 EVT VT =
Op.getValueType();
2333 return DAG.
getNode(NVPTXISD::FCOPYSIGN,
DL, VT, In1, In2);
2337 EVT VT =
Op.getValueType();
2340 return LowerFROUND32(
Op, DAG);
2343 return LowerFROUND64(
Op, DAG);
2359 EVT VT =
Op.getValueType();
2365 const unsigned SignBitMask = 0x80000000;
2368 const unsigned PointFiveInBits = 0x3F000000;
2369 SDValue PointFiveWithSignRaw =
2400 EVT VT =
Op.getValueType();
2429 EVT VT =
N->getValueType(0);
2451 assert(STI.getSmVersion() < 90 || STI.getPTXVersion() < 78);
2453 if (
Op.getValueType() == MVT::bf16) {
2457 DAG.
getNode(
Op.getOpcode(), Loc, MVT::f32,
Op.getOperand(0)),
2467 assert(STI.getSmVersion() < 90 || STI.getPTXVersion() < 78);
2469 if (
Op.getOperand(0).getValueType() == MVT::bf16) {
2472 Op.getOpcode(), Loc,
Op.getValueType(),
2482 EVT NarrowVT =
Op.getValueType();
2487 if (STI.getSmVersion() < 80 || STI.getPTXVersion() < 70) {
2490 if (STI.getSmVersion() < 90 || STI.getPTXVersion() < 78) {
2492 if (STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 70) {
2518 EVT WideVT =
Op.getValueType();
2521 (STI.getSmVersion() < 80 || STI.getPTXVersion() < 71)) {
2526 (STI.getSmVersion() < 90 || STI.getPTXVersion() < 78)) {
2529 if (STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 71) {
2544 if (
Op.getValueType() != MVT::v2i16)
2546 EVT EltVT =
Op.getValueType().getVectorElementType();
2548 for (
int I = 0,
E =
Op.getValueType().getVectorNumElements();
I <
E;
I++) {
2551 [&](
const SDUse &O) {
2552 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT,
2553 O.get(), DAG.getIntPtrConstant(I, DL));
2563 bool hasOffset =
false) {
2565 if (!
Op->getOperand(hasOffset ? 4 : 3).getValueType().isVector())
2573 for (
size_t I = 0;
I <
N->getNumOperands();
I++) {
2590 return Tcgen05StNode;
2596 EVT VT =
Op.getValueType();
2623 return DAG.
getNode(NVPTXISD::BUILD_VECTOR,
DL, MVT::i64,
2624 {SwappedHigh, SwappedLow});
2633 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg1:
2634 return NVPTXISD::TCGEN05_MMA_SHARED_DISABLE_OUTPUT_LANE_CG1;
2635 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg2:
2636 return NVPTXISD::TCGEN05_MMA_SHARED_DISABLE_OUTPUT_LANE_CG2;
2637 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg1:
2638 return NVPTXISD::TCGEN05_MMA_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG1;
2639 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg2:
2640 return NVPTXISD::TCGEN05_MMA_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG2;
2641 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1:
2642 return NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG1;
2643 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2:
2644 return NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG2;
2645 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1:
2646 return NVPTXISD::TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1;
2647 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2:
2648 return NVPTXISD::TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2;
2649 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1_ashift:
2650 return NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG1_ASHIFT;
2651 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2_ashift:
2652 return NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG2_ASHIFT;
2654 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1_ashift:
2655 return NVPTXISD::TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT;
2657 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2_ashift:
2658 return NVPTXISD::TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT;
2659 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg1:
2660 return NVPTXISD::TCGEN05_MMA_SP_SHARED_DISABLE_OUTPUT_LANE_CG1;
2661 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg2:
2662 return NVPTXISD::TCGEN05_MMA_SP_SHARED_DISABLE_OUTPUT_LANE_CG2;
2663 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg1:
2664 return NVPTXISD::TCGEN05_MMA_SP_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG1;
2665 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg2:
2666 return NVPTXISD::TCGEN05_MMA_SP_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG2;
2667 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1:
2668 return NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG1;
2669 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2:
2670 return NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG2;
2671 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1_ashift:
2672 return NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG1_ASHIFT;
2673 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2_ashift:
2674 return NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG2_ASHIFT;
2675 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1:
2676 return NVPTXISD::TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1;
2677 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2:
2678 return NVPTXISD::TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2;
2680 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1_ashift:
2682 TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT;
2684 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2_ashift:
2686 TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT;
2698 for (
size_t I = 0;
I <
N->getNumOperands();
I++) {
2717 return Tcgen05MMANode;
2721static std::optional<std::pair<SDValue, SDValue>>
2724 EVT ResVT =
N->getValueType(0);
2732 for (
unsigned i = 0; i < NumElts; ++i)
2743 Ops.push_back(
N->getOperand(3));
2744 Ops.push_back(
N->getOperand(4));
2746 Ops.push_back(
N->getOperand(3));
2755 for (
unsigned i = 0; i < NumElts; ++i) {
2762 return {{BuildVector, Chain}};
2774 AS = MemN->getAddressSpace();
2782 " with value " +
Twine(Val) +
2783 " is not supported on the given target.",
2785 return Op.getOperand(0);
2793 unsigned Val =
N->getConstantOperandVal(3);
2807 unsigned Val =
N->getConstantOperandVal(3);
2825 case Intrinsic::nvvm_tcgen05_st_16x64b_x2:
2826 case Intrinsic::nvvm_tcgen05_st_16x64b_x4:
2827 case Intrinsic::nvvm_tcgen05_st_16x64b_x8:
2828 case Intrinsic::nvvm_tcgen05_st_16x64b_x16:
2829 case Intrinsic::nvvm_tcgen05_st_16x64b_x32:
2830 case Intrinsic::nvvm_tcgen05_st_16x64b_x128:
2831 case Intrinsic::nvvm_tcgen05_st_16x128b_x1:
2832 case Intrinsic::nvvm_tcgen05_st_16x128b_x2:
2833 case Intrinsic::nvvm_tcgen05_st_16x128b_x4:
2834 case Intrinsic::nvvm_tcgen05_st_16x128b_x8:
2835 case Intrinsic::nvvm_tcgen05_st_16x128b_x16:
2836 case Intrinsic::nvvm_tcgen05_st_16x128b_x32:
2837 case Intrinsic::nvvm_tcgen05_st_16x128b_x64:
2838 case Intrinsic::nvvm_tcgen05_st_16x256b_x1:
2839 case Intrinsic::nvvm_tcgen05_st_16x256b_x2:
2840 case Intrinsic::nvvm_tcgen05_st_16x256b_x4:
2841 case Intrinsic::nvvm_tcgen05_st_16x256b_x8:
2842 case Intrinsic::nvvm_tcgen05_st_16x256b_x16:
2843 case Intrinsic::nvvm_tcgen05_st_16x256b_x32:
2844 case Intrinsic::nvvm_tcgen05_st_32x32b_x2:
2845 case Intrinsic::nvvm_tcgen05_st_32x32b_x4:
2846 case Intrinsic::nvvm_tcgen05_st_32x32b_x8:
2847 case Intrinsic::nvvm_tcgen05_st_32x32b_x16:
2848 case Intrinsic::nvvm_tcgen05_st_32x32b_x32:
2849 case Intrinsic::nvvm_tcgen05_st_16x64b_x64:
2850 case Intrinsic::nvvm_tcgen05_st_32x32b_x64:
2851 case Intrinsic::nvvm_tcgen05_st_32x32b_x128:
2853 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x2:
2854 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x4:
2855 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x8:
2856 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x16:
2857 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x32:
2858 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x64:
2859 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x128:
2861 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg1:
2862 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg2:
2863 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg1:
2864 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg2:
2865 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg1:
2866 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg2:
2867 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg1:
2868 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg2:
2869 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1:
2870 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2:
2871 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1:
2872 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2:
2873 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1:
2874 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2:
2875 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1:
2876 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2:
2877 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1_ashift:
2878 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2_ashift:
2880 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1_ashift:
2882 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2_ashift:
2883 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1_ashift:
2884 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2_ashift:
2886 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1_ashift:
2888 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2_ashift:
2890 case Intrinsic::nvvm_tensormap_replace_elemtype:
2892 case Intrinsic::nvvm_tensormap_replace_swizzle_mode:
2902 if (
N->getOperand(1).getValueType() != MVT::i128) {
2909 auto Opcode = [&]() {
2911 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_is_canceled:
2912 return NVPTXISD::CLUSTERLAUNCHCONTROL_QUERY_CANCEL_IS_CANCELED;
2913 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_x:
2914 return NVPTXISD::CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_X;
2915 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_y:
2916 return NVPTXISD::CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_Y;
2917 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_z:
2918 return NVPTXISD::CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_Z;
2925 SDValue TryCancelResponse =
N->getOperand(1);
2934 return DAG.
getNode(Opcode,
DL,
N->getVTList(),
2935 {TryCancelResponse0, TryCancelResponse1});
2944 unsigned IntrinsicID =
N->getConstantOperandVal(0);
2948 for (
unsigned i = 0; i < 4; ++i)
2954 auto [OpCode, RetTy, CvtModeFlag] =
2955 [&]() -> std::tuple<unsigned, MVT::SimpleValueType, uint32_t> {
2956 switch (IntrinsicID) {
2957 case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_relu_satfinite:
2958 return {NVPTXISD::CVT_E4M3X4_F32X4_RS_SF, MVT::v4i8,
2959 CvtMode::RS | CvtMode::RELU_FLAG};
2960 case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_satfinite:
2961 return {NVPTXISD::CVT_E4M3X4_F32X4_RS_SF, MVT::v4i8, CvtMode::RS};
2962 case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_relu_satfinite:
2963 return {NVPTXISD::CVT_E5M2X4_F32X4_RS_SF, MVT::v4i8,
2964 CvtMode::RS | CvtMode::RELU_FLAG};
2965 case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_satfinite:
2966 return {NVPTXISD::CVT_E5M2X4_F32X4_RS_SF, MVT::v4i8, CvtMode::RS};
2967 case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_relu_satfinite:
2968 return {NVPTXISD::CVT_E2M3X4_F32X4_RS_SF, MVT::v4i8,
2969 CvtMode::RS | CvtMode::RELU_FLAG};
2970 case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_satfinite:
2971 return {NVPTXISD::CVT_E2M3X4_F32X4_RS_SF, MVT::v4i8, CvtMode::RS};
2972 case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_relu_satfinite:
2973 return {NVPTXISD::CVT_E3M2X4_F32X4_RS_SF, MVT::v4i8,
2974 CvtMode::RS | CvtMode::RELU_FLAG};
2975 case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_satfinite:
2976 return {NVPTXISD::CVT_E3M2X4_F32X4_RS_SF, MVT::v4i8, CvtMode::RS};
2977 case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_relu_satfinite:
2978 return {NVPTXISD::CVT_E2M1X4_F32X4_RS_SF, MVT::i16,
2979 CvtMode::RS | CvtMode::RELU_FLAG};
2980 case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_satfinite:
2981 return {NVPTXISD::CVT_E2M1X4_F32X4_RS_SF, MVT::i16, CvtMode::RS};
2987 Ops.push_back(RBits);
2994 const unsigned Mode = [&]() {
2995 switch (
Op->getConstantOperandVal(0)) {
2996 case Intrinsic::nvvm_prmt:
2998 case Intrinsic::nvvm_prmt_b4e:
3000 case Intrinsic::nvvm_prmt_ecl:
3002 case Intrinsic::nvvm_prmt_ecr:
3004 case Intrinsic::nvvm_prmt_f4e:
3006 case Intrinsic::nvvm_prmt_rc16:
3008 case Intrinsic::nvvm_prmt_rc8:
3016 SDValue B =
Op.getNumOperands() == 4 ?
Op.getOperand(2)
3018 SDValue Selector = (
Op->op_end() - 1)->get();
3022#define TCGEN05_LD_RED_INTR(SHAPE, NUM, TYPE) \
3023 Intrinsic::nvvm_tcgen05_ld_red_##SHAPE##_x##NUM##_##TYPE
3025#define TCGEN05_LD_RED_INST(SHAPE, NUM, TYPE) \
3026 NVPTXISD::TCGEN05_LD_RED_##SHAPE##_X##NUM##_##TYPE
3092static std::optional<std::tuple<SDValue, SDValue, SDValue>>
3095 EVT ResVT =
N->getValueType(0);
3115 for (
unsigned i = 2; i <
N->getNumOperands(); i++)
3116 Ops.push_back(
N->getOperand(i));
3126 for (
unsigned i = 0; i < NumElts; ++i) {
3134 return {{BuildVector, RedResult, Chain}};
3138 switch (
Op->getConstantOperandVal(1)) {
3144 case Intrinsic::nvvm_tcgen05_ld_16x64b_x2:
3145 case Intrinsic::nvvm_tcgen05_ld_16x128b_x1:
3146 case Intrinsic::nvvm_tcgen05_ld_32x32b_x2:
3151 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x2:
3156 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x2_f32:
3157 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x2_i32:
3158 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x2_f32:
3159 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x2_i32:
3162 {std::get<0>(*Res), std::get<1>(*Res), std::get<2>(*Res)},
SDLoc(
Op));
3168 switch (
Op->getConstantOperandVal(0)) {
3171 case Intrinsic::nvvm_prmt:
3172 case Intrinsic::nvvm_prmt_b4e:
3173 case Intrinsic::nvvm_prmt_ecl:
3174 case Intrinsic::nvvm_prmt_ecr:
3175 case Intrinsic::nvvm_prmt_f4e:
3176 case Intrinsic::nvvm_prmt_rc16:
3177 case Intrinsic::nvvm_prmt_rc8:
3179 case Intrinsic::nvvm_internal_addrspace_wrap:
3180 return Op.getOperand(1);
3181 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_is_canceled:
3182 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_x:
3183 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_y:
3184 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_z:
3186 case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_satfinite:
3187 case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_relu_satfinite:
3188 case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_satfinite:
3189 case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_relu_satfinite:
3190 case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_satfinite:
3191 case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_relu_satfinite:
3192 case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_satfinite:
3193 case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_relu_satfinite:
3194 case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_satfinite:
3195 case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_relu_satfinite:
3205 assert(V.getValueType() == MVT::i64 &&
3206 "Unexpected CTLZ/CTPOP type to legalize");
3215 assert(
A.getValueType() == MVT::i64 &&
B.getValueType() == MVT::i64);
3220 const auto Amt = AmtConst->getZExtValue() & 63;
3247 ? std::make_tuple(AHi, ALo, BHi)
3248 : std::make_tuple(ALo, BHi, BLo);
3254 return DAG.
getNode(NVPTXISD::BUILD_VECTOR,
DL, MVT::i64, {RLo, RHi});
3275 EVT Ty =
Op.getValueType();
3285 if (Flags.hasNoInfs())
3297 assert(
Op.getValueType() == MVT::i1 &&
"Custom lowering enabled only for i1");
3307 TrueVal = TrueVal.getOperand(0);
3308 FalseVal = FalseVal.getOperand(0);
3310 EVT VT = TrueVal.getSimpleValueType().bitsLE(FalseVal.getSimpleValueType())
3311 ? TrueVal.getValueType()
3312 : FalseVal.getValueType();
3335 SDValue BasePtr =
N->getOperand(2);
3342 assert(ValVT.
isVector() &&
"Masked vector store must have vector type");
3344 "Unexpected alignment for masked store");
3346 unsigned Opcode = 0;
3365 Ops.push_back(Chain);
3369 assert(Mask.getValueType().isVector() &&
3370 Mask.getValueType().getVectorElementType() == MVT::i1 &&
3371 "Mask must be a vector of i1");
3373 "Mask expected to be a BUILD_VECTOR");
3374 assert(Mask.getValueType().getVectorNumElements() ==
3376 "Mask size must be the same as the vector size");
3379 if (
Op.getNode()->getAsZExtVal() == 0) {
3389 Ops.push_back(ExtVal);
3394 Ops.push_back(BasePtr);
3400 "Offset operand expected to be undef");
3412 switch (
Op.getOpcode()) {
3418 return LowerADDRSPACECAST(
Op, DAG);
3426 return LowerBUILD_VECTOR(
Op, DAG);
3428 return LowerBITCAST(
Op, DAG);
3432 return LowerEXTRACT_VECTOR_ELT(
Op, DAG);
3434 return LowerINSERT_VECTOR_ELT(
Op, DAG);
3436 return LowerVECTOR_SHUFFLE(
Op, DAG);
3438 return LowerCONCAT_VECTORS(
Op, DAG);
3443 return LowerVECREDUCE(
Op, DAG);
3445 return LowerSTORE(
Op, DAG);
3447 assert(STI.has256BitVectorLoadStore(
3449 "Masked store vector not supported on subtarget.");
3453 return LowerLOAD(
Op, DAG);
3455 return LowerMLOAD(
Op, DAG);
3457 return LowerShiftLeftParts(
Op, DAG);
3460 return LowerShiftRightParts(
Op, DAG);
3464 return LowerFROUND(
Op, DAG);
3466 return LowerFCOPYSIGN(
Op, DAG);
3469 return LowerINT_TO_FP(
Op, DAG);
3472 return LowerFP_TO_INT(
Op, DAG);
3474 return LowerFP_ROUND(
Op, DAG);
3476 return LowerFP_EXTEND(
Op, DAG);
3478 return LowerVAARG(
Op, DAG);
3480 return LowerVASTART(
Op, DAG);
3506 return LowerCopyToReg_128(
Op, DAG);
3511 return PromoteBinOpIfF32FTZ(
Op, DAG);
3532 unsigned SrcAS =
N->getSrcAddressSpace();
3533 unsigned DestAS =
N->getDestAddressSpace();
3543 const MVT GenerictVT =
3547 SDValue SharedClusterConversion =
3550 return SharedClusterConversion;
3565 SDNode *
Node =
Op.getNode();
3567 EVT VT =
Node->getValueType(0);
3571 const MaybeAlign MA(
Node->getConstantOperandVal(3));
3574 Tmp1, Tmp2, MachinePointerInfo(V));
3594 MachinePointerInfo(V));
3600 return DAG.
getLoad(VT,
DL, Tmp1, VAList, MachinePointerInfo(SrcV));
3609 SDValue VAReg = getParamSymbol(DAG, -1, PtrVT);
3612 return DAG.
getStore(
Op.getOperand(0),
DL, VAReg,
Op.getOperand(1),
3613 MachinePointerInfo(SV));
3616static std::pair<MemSDNode *, uint32_t>
3620 SDValue BasePtr =
N->getOperand(1);
3622 [[maybe_unused]]
SDValue Passthru =
N->getOperand(4);
3625 EVT ResVT =
N->getValueType(0);
3626 assert(ResVT.
isVector() &&
"Masked vector load must have vector type");
3632 "Passthru operand expected to be poison or undef");
3638 assert(ElementSizeInBits % 8 == 0 &&
"Unexpected element size");
3639 uint32_t ElementSizeInBytes = ElementSizeInBits / 8;
3640 uint32_t ElementMask = (1u << ElementSizeInBytes) - 1u;
3646 UsedBytesMask <<= ElementSizeInBytes;
3649 if (
Op->getAsZExtVal() != 0)
3650 UsedBytesMask |= ElementMask;
3653 assert(UsedBytesMask != 0 && UsedBytesMask != UINT32_MAX &&
3654 "Unexpected masked load with elements masked all on or all off");
3663 UsedBytesMask = UINT32_MAX;
3665 return {NewLD, UsedBytesMask};
3669static std::optional<std::pair<SDValue, SDValue>>
3672 const EVT ResVT = LD->getValueType(0);
3673 const EVT MemVT = LD->getMemoryVT();
3678 return std::nullopt;
3680 const auto NumEltsAndEltVT =
3682 if (!NumEltsAndEltVT)
3683 return std::nullopt;
3684 const auto [NumElts, EltVT] = NumEltsAndEltVT.value();
3686 Align Alignment = LD->getAlign();
3689 if (Alignment < PrefAlign) {
3695 return std::nullopt;
3699 std::optional<uint32_t> UsedBytesMask = std::nullopt;
3701 std::tie(LD, UsedBytesMask) =
3712 return std::nullopt;
3724 ListVTs.push_back(MVT::Other);
3733 DAG.
getConstant(UsedBytesMask.value_or(UINT32_MAX),
DL, MVT::i32));
3741 LD->getMemOperand());
3750 for (
const unsigned I :
llvm::seq(NumElts)) {
3755 for (
const unsigned I :
llvm::seq(NumElts)) {
3757 if (LoadEltVT != EltVT)
3765 const MVT BuildVecVT =
3777 Results.append({Res->first, Res->second});
3794 assert(LD->getValueType(0) == MVT::i1 &&
"Custom lowering for i1 load only");
3796 LD->getBasePtr(), LD->getPointerInfo(),
3797 MVT::i8, LD->getAlign(),
3798 LD->getMemOperand()->getFlags());
3809 if (
Op.getValueType() == MVT::i1)
3816 assert(
LD->getValueType(0).isInteger() &&
LD->getMemoryVT().isInteger() &&
3817 "Unexpected fpext-load");
3819 LD->getChain(),
LD->getBasePtr(),
LD->getMemoryVT(),
3820 LD->getMemOperand());
3836 EVT VT =
Op.getValueType();
3840 MemSDNode *
LD = std::get<0>(Result);
3841 uint32_t UsedBytesMask = std::get<1>(Result);
3848 OtherOps.push_back(DAG.
getConstant(UsedBytesMask,
DL, MVT::i32));
3856 LD->getMemoryVT(),
LD->getMemOperand());
3868 const EVT MemVT =
N->getMemoryVT();
3875 const auto NumEltsAndEltVT =
3877 if (!NumEltsAndEltVT)
3879 const auto [NumElts, EltVT] = NumEltsAndEltVT.value();
3883 Align Alignment =
N->getAlign();
3885 if (Alignment < PrefAlign) {
3912 Ops.push_back(
N->getOperand(0));
3922 for (
const unsigned I :
llvm::seq(NumElts)) {
3925 NumEltsPerSubVector);
3930 for (
const unsigned I :
llvm::seq(NumElts)) {
3940 Ops.push_back(ExtVal);
3945 Ops.append(
N->op_begin() + 2,
N->op_end());
3949 N->getMemoryVT(),
N->getMemOperand());
3957 EVT VT =
Store->getMemoryVT();
3960 return LowerSTOREi1(
Op, DAG);
3972 SDNode *
Node =
Op.getNode();
3981 DAG.
getTruncStore(Tmp1, dl, Tmp3, Tmp2,
ST->getPointerInfo(), MVT::i8,
3982 ST->getAlign(),
ST->getMemOperand()->getFlags());
3991 assert(
Op.getOperand(1).getValueType() == MVT::i128 &&
3992 "Custom lowering for 128-bit CopyToReg only");
3994 SDNode *
Node =
Op.getNode();
4006 NewOps[0] =
Op->getOperand(0);
4007 NewOps[1] =
Op->getOperand(1);
4011 NewOps[4] =
Op->getOperand(3);
4016unsigned NVPTXTargetLowering::getNumRegisters(
4018 std::optional<MVT> RegisterVT = std::nullopt)
const {
4019 if (VT == MVT::i128 && RegisterVT == MVT::i128)
4024bool NVPTXTargetLowering::splitValueIntoRegisterParts(
4026 unsigned NumParts,
MVT PartVT, std::optional<CallingConv::ID> CC)
const {
4027 if (Val.
getValueType() == MVT::i128 && NumParts == 1) {
4040 StringRef SavedStr =
nvTM->getStrPool().save(
4047 const StringRef SavedStr =
nvTM->getStrPool().save(
"param" + Twine(
I));
4075 for (
const auto &Arg :
F.args()) {
4076 const auto ArgIns = AllIns.take_while(
4077 [&](
auto I) {
return I.OrigArgIndex == Arg.getArgNo(); });
4078 AllIns = AllIns.drop_front(ArgIns.size());
4080 Type *Ty = Arg.getType();
4085 if (Arg.use_empty()) {
4087 for (
const auto &In : ArgIns) {
4088 assert(!In.Used &&
"Arg.use_empty() is true but Arg is used?");
4094 SDValue ArgSymbol = getParamSymbol(DAG, Arg.getArgNo(), PtrVT);
4100 if (Arg.hasByValAttr()) {
4108 assert(ArgIns.size() == 1 &&
"ByVal argument must be a pointer");
4109 const auto &ByvalIn = ArgIns[0];
4111 "Ins type did not match function type");
4112 assert(ByvalIn.VT == PtrVT &&
"ByVal argument must be a pointer");
4117 "grid_constant by NVPTXLowerArgs");
4119 P.getNode()->setIROrder(Arg.getArgNo() + 1);
4121 P = DAG.
getNode(NVPTXISD::MoveParam, dl, ByvalIn.VT, ArgSymbol);
4122 P.getNode()->setIROrder(Arg.getArgNo() + 1);
4131 assert(VTs.
size() == ArgIns.size() &&
"Size mismatch");
4132 assert(VTs.
size() == Offsets.size() &&
"Size mismatch");
4135 &
F, Ty, Arg.getArgNo() + AttributeList::FirstArgIndex,
DL);
4139 for (
const unsigned NumElts : VI) {
4141 const EVT LoadVT = VTs[
I] == MVT::i1 ? MVT::i8 : VTs[
I];
4149 DAG.
getLoad(VecVT, dl, Root, VecAddr,
4153 P.getNode()->setIROrder(Arg.getArgNo() + 1);
4154 for (
const unsigned J :
llvm::seq(NumElts)) {
4166 if (!OutChains.
empty())
4179 Type *RetTy =
F.getReturnType();
4182 assert(OutVals.
empty() && Outs.
empty() &&
"Return value expected for void");
4183 return DAG.
getNode(NVPTXISD::RET_GLUE, dl, MVT::Other, Chain);
4195 const bool ExtendIntegerRetVal =
4196 RetTy->
isIntegerTy() &&
DL.getTypeAllocSizeInBits(RetTy) < 32;
4201 assert(VTs.
size() == OutVals.
size() &&
"Bad return value decomposition");
4203 const auto GetRetVal = [&](
unsigned I) ->
SDValue {
4207 "OutVal type should always be legal");
4211 ExtendIntegerRetVal ? MVT::i32 : (VTI == MVT::i1 ? MVT::i8 : VTI);
4217 for (
const unsigned NumElts : VI) {
4218 const MaybeAlign CurrentAlign = ExtendIntegerRetVal
4223 NumElts, dl, DAG, [&](
unsigned K) {
return GetRetVal(
I + K); });
4228 Chain = DAG.
getStore(Chain, dl, Val, Ptr,
4234 return DAG.
getNode(NVPTXISD::RET_GLUE, dl, MVT::Other, Chain);
4240 if (Constraint.
size() > 1)
4257 case Intrinsic::nvvm_match_all_sync_i32p:
4258 case Intrinsic::nvvm_match_all_sync_i64p:
4263 Info.memVT = MVT::i1;
4269 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_col:
4270 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_row:
4271 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_col_stride:
4272 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_row_stride:
4273 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_col:
4274 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_row:
4275 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_col_stride:
4276 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_row_stride:
4277 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_col:
4278 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_row:
4279 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_col_stride:
4280 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_row_stride:
4281 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_col:
4282 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_row:
4283 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_col_stride:
4284 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_row_stride:
4285 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_col:
4286 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_row:
4287 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_col_stride:
4288 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_row_stride:
4289 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_col:
4290 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_row:
4291 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_col_stride:
4292 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_row_stride: {
4294 Info.memVT = MVT::v8f16;
4295 Info.ptrVal =
I.getArgOperand(0);
4298 Info.align =
Align(16);
4302 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_col:
4303 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_col_stride:
4304 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_col_stride:
4305 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_col:
4306 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_row:
4307 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_row_stride:
4308 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_row_stride:
4309 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_row:
4310 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_col:
4311 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_col_stride:
4312 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_row:
4313 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_row_stride:
4314 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_col:
4315 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_col_stride:
4316 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_col_stride:
4317 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_col:
4318 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_row:
4319 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_row_stride:
4320 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_row_stride:
4321 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_row:
4322 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_col:
4323 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_col_stride:
4324 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_row:
4325 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_row_stride: {
4327 Info.memVT = MVT::v2i32;
4328 Info.ptrVal =
I.getArgOperand(0);
4331 Info.align =
Align(8);
4336 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_col:
4337 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_col_stride:
4338 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_col_stride:
4339 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_col:
4340 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_row:
4341 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_row_stride:
4342 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_row_stride:
4343 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_row:
4344 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_col:
4345 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_col_stride:
4346 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_row:
4347 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_row_stride:
4348 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_col:
4349 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_col_stride:
4350 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_row:
4351 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_row_stride:
4353 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_col:
4354 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_col_stride:
4355 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_col_stride:
4356 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_col:
4357 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_row:
4358 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_row_stride:
4359 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_row_stride:
4360 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_row:
4361 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_col:
4362 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_col_stride:
4363 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_row:
4364 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_row_stride:
4365 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_col:
4366 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_col_stride:
4367 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_row:
4368 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_row_stride:
4369 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x4_b16:
4370 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x4_trans_b16:
4371 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x2_trans_b8:
4372 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x2_trans_b8x16_b4x16_p64:
4373 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x2_trans_b8x16_b6x16_p32:
4374 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x4_b8x16_b4x16_p64:
4375 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x4_b8x16_b6x16_p32: {
4377 Info.memVT = MVT::v4i32;
4378 Info.ptrVal =
I.getArgOperand(0);
4381 Info.align =
Align(16);
4386 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_col:
4387 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_col_stride:
4388 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_col_stride:
4389 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_col:
4390 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_row:
4391 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_row_stride:
4392 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_row_stride:
4393 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_row:
4395 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_col:
4396 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_col_stride:
4397 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_col_stride:
4398 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_col:
4399 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_row:
4400 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_row_stride:
4401 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_row_stride:
4402 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_row:
4403 case Intrinsic::nvvm_wmma_m8n8k128_load_a_b1_row:
4404 case Intrinsic::nvvm_wmma_m8n8k128_load_a_b1_row_stride:
4405 case Intrinsic::nvvm_wmma_m8n8k128_load_b_b1_col:
4406 case Intrinsic::nvvm_wmma_m8n8k128_load_b_b1_col_stride:
4407 case Intrinsic::nvvm_wmma_m8n8k32_load_a_s4_row:
4408 case Intrinsic::nvvm_wmma_m8n8k32_load_a_s4_row_stride:
4409 case Intrinsic::nvvm_wmma_m8n8k32_load_a_u4_row_stride:
4410 case Intrinsic::nvvm_wmma_m8n8k32_load_a_u4_row:
4411 case Intrinsic::nvvm_wmma_m8n8k32_load_b_s4_col:
4412 case Intrinsic::nvvm_wmma_m8n8k32_load_b_s4_col_stride:
4413 case Intrinsic::nvvm_wmma_m8n8k32_load_b_u4_col_stride:
4414 case Intrinsic::nvvm_wmma_m8n8k32_load_b_u4_col:
4415 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x1_b16:
4416 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x1_trans_b16:
4417 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x1_b8x16_b4x16_p64:
4418 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x1_b8x16_b6x16_p32: {
4420 Info.memVT = MVT::i32;
4421 Info.ptrVal =
I.getArgOperand(0);
4424 Info.align =
Align(4);
4429 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_col:
4430 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_row:
4431 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_col_stride:
4432 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_row_stride:
4433 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_col:
4434 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_row:
4435 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_col_stride:
4436 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_row_stride:
4437 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_col:
4438 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_row:
4439 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_col_stride:
4440 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_row_stride: {
4442 Info.memVT = MVT::v4f16;
4443 Info.ptrVal =
I.getArgOperand(0);
4446 Info.align =
Align(16);
4451 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_col:
4452 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_row:
4453 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_col_stride:
4454 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_row_stride:
4455 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_col:
4456 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_row:
4457 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_col_stride:
4458 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_row_stride:
4459 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_col:
4460 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_row:
4461 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_col_stride:
4462 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_row_stride:
4463 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_col:
4464 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_row:
4465 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_col_stride:
4466 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_row_stride: {
4468 Info.memVT = MVT::v8f32;
4469 Info.ptrVal =
I.getArgOperand(0);
4472 Info.align =
Align(16);
4477 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_col:
4478 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_col_stride:
4479 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_row:
4480 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_row_stride:
4482 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_col:
4483 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_col_stride:
4484 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_row:
4485 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_row_stride:
4487 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_col:
4488 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_col_stride:
4489 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_row:
4490 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_row_stride:
4491 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_col:
4492 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_col_stride:
4493 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_row:
4494 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_row_stride:
4495 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_col:
4496 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_col_stride:
4497 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_row:
4498 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_row_stride: {
4500 Info.memVT = MVT::v8i32;
4501 Info.ptrVal =
I.getArgOperand(0);
4504 Info.align =
Align(16);
4509 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_col:
4510 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_col_stride:
4511 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_row:
4512 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_row_stride:
4513 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_col:
4514 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_col_stride:
4515 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_row:
4516 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_row_stride:
4517 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x2_b16:
4518 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x2_trans_b16:
4519 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x1_trans_b8:
4520 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x1_trans_b8x16_b4x16_p64:
4521 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x1_trans_b8x16_b6x16_p32:
4522 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x2_b8x16_b4x16_p64:
4523 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x2_b8x16_b6x16_p32: {
4525 Info.memVT = MVT::v2i32;
4526 Info.ptrVal =
I.getArgOperand(0);
4529 Info.align =
Align(8);
4534 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_col:
4535 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_col_stride:
4536 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_row:
4537 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_row_stride:
4539 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_col:
4540 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_col_stride:
4541 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_row:
4542 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_row_stride: {
4544 Info.memVT = MVT::f64;
4545 Info.ptrVal =
I.getArgOperand(0);
4548 Info.align =
Align(8);
4553 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_col:
4554 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_col_stride:
4555 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_row:
4556 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_row_stride: {
4558 Info.memVT = MVT::v2f64;
4559 Info.ptrVal =
I.getArgOperand(0);
4562 Info.align =
Align(16);
4567 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_col:
4568 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_row:
4569 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_col_stride:
4570 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_row_stride:
4571 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_col:
4572 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_row:
4573 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_col_stride:
4574 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_row_stride:
4575 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_col:
4576 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_row:
4577 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_col_stride:
4578 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_row_stride: {
4580 Info.memVT = MVT::v4f16;
4581 Info.ptrVal =
I.getArgOperand(0);
4584 Info.align =
Align(16);
4589 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_col:
4590 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_row:
4591 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_col_stride:
4592 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_row_stride:
4593 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_col:
4594 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_row:
4595 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_col_stride:
4596 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_row_stride:
4597 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_col:
4598 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_row:
4599 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_col_stride:
4600 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_row_stride:
4601 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_col:
4602 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_row:
4603 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_col_stride:
4604 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_row_stride: {
4606 Info.memVT = MVT::v8f32;
4607 Info.ptrVal =
I.getArgOperand(0);
4610 Info.align =
Align(16);
4615 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_col:
4616 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_col_stride:
4617 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_row:
4618 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_row_stride:
4619 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_col:
4620 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_col_stride:
4621 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_row:
4622 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_row_stride:
4623 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_col:
4624 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_col_stride:
4625 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_row:
4626 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_row_stride: {
4628 Info.memVT = MVT::v8i32;
4629 Info.ptrVal =
I.getArgOperand(0);
4632 Info.align =
Align(16);
4637 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_col:
4638 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_col_stride:
4639 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_row:
4640 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_row_stride:
4641 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_col:
4642 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_col_stride:
4643 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_row:
4644 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_row_stride:
4645 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x2_b16:
4646 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x2_trans_b16:
4647 case Intrinsic::nvvm_stmatrix_sync_aligned_m16n8_x2_trans_b8: {
4649 Info.memVT = MVT::v2i32;
4650 Info.ptrVal =
I.getArgOperand(0);
4653 Info.align =
Align(8);
4658 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_col:
4659 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_col_stride:
4660 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_row:
4661 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_row_stride: {
4663 Info.memVT = MVT::v2f64;
4664 Info.ptrVal =
I.getArgOperand(0);
4667 Info.align =
Align(16);
4672 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x1_b16:
4673 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x1_trans_b16:
4674 case Intrinsic::nvvm_stmatrix_sync_aligned_m16n8_x1_trans_b8: {
4676 Info.memVT = MVT::i32;
4677 Info.ptrVal =
I.getArgOperand(0);
4680 Info.align =
Align(4);
4685 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x4_b16:
4686 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x4_trans_b16:
4687 case Intrinsic::nvvm_stmatrix_sync_aligned_m16n8_x4_trans_b8: {
4689 Info.memVT = MVT::v4i32;
4690 Info.ptrVal =
I.getArgOperand(0);
4693 Info.align =
Align(16);
4698 case Intrinsic::nvvm_atomic_add_gen_f_cta:
4699 case Intrinsic::nvvm_atomic_add_gen_f_sys:
4700 case Intrinsic::nvvm_atomic_add_gen_i_cta:
4701 case Intrinsic::nvvm_atomic_add_gen_i_sys:
4702 case Intrinsic::nvvm_atomic_and_gen_i_cta:
4703 case Intrinsic::nvvm_atomic_and_gen_i_sys:
4704 case Intrinsic::nvvm_atomic_cas_gen_i_cta:
4705 case Intrinsic::nvvm_atomic_cas_gen_i_sys:
4706 case Intrinsic::nvvm_atomic_dec_gen_i_cta:
4707 case Intrinsic::nvvm_atomic_dec_gen_i_sys:
4708 case Intrinsic::nvvm_atomic_inc_gen_i_cta:
4709 case Intrinsic::nvvm_atomic_inc_gen_i_sys:
4710 case Intrinsic::nvvm_atomic_max_gen_i_cta:
4711 case Intrinsic::nvvm_atomic_max_gen_i_sys:
4712 case Intrinsic::nvvm_atomic_min_gen_i_cta:
4713 case Intrinsic::nvvm_atomic_min_gen_i_sys:
4714 case Intrinsic::nvvm_atomic_or_gen_i_cta:
4715 case Intrinsic::nvvm_atomic_or_gen_i_sys:
4716 case Intrinsic::nvvm_atomic_exch_gen_i_cta:
4717 case Intrinsic::nvvm_atomic_exch_gen_i_sys:
4718 case Intrinsic::nvvm_atomic_xor_gen_i_cta:
4719 case Intrinsic::nvvm_atomic_xor_gen_i_sys: {
4720 auto &
DL =
I.getDataLayout();
4723 Info.ptrVal =
I.getArgOperand(0);
4731 case Intrinsic::nvvm_prefetch_tensormap: {
4732 auto &
DL =
I.getDataLayout();
4735 Info.ptrVal =
I.getArgOperand(0);
4744 case Intrinsic::nvvm_tensormap_replace_global_address:
4745 case Intrinsic::nvvm_tensormap_replace_global_stride: {
4747 Info.memVT = MVT::i64;
4748 Info.ptrVal =
I.getArgOperand(0);
4756 case Intrinsic::nvvm_tensormap_replace_rank:
4757 case Intrinsic::nvvm_tensormap_replace_box_dim:
4758 case Intrinsic::nvvm_tensormap_replace_global_dim:
4759 case Intrinsic::nvvm_tensormap_replace_element_stride:
4760 case Intrinsic::nvvm_tensormap_replace_elemtype:
4761 case Intrinsic::nvvm_tensormap_replace_interleave_layout:
4762 case Intrinsic::nvvm_tensormap_replace_swizzle_mode:
4763 case Intrinsic::nvvm_tensormap_replace_swizzle_atomicity:
4764 case Intrinsic::nvvm_tensormap_replace_fill_mode: {
4766 Info.memVT = MVT::i32;
4767 Info.ptrVal =
I.getArgOperand(0);
4775 case Intrinsic::nvvm_ldu_global_i:
4776 case Intrinsic::nvvm_ldu_global_f:
4777 case Intrinsic::nvvm_ldu_global_p: {
4780 Info.ptrVal =
I.getArgOperand(0);
4788 case Intrinsic::nvvm_tex_1d_v4f32_s32:
4789 case Intrinsic::nvvm_tex_1d_v4f32_f32:
4790 case Intrinsic::nvvm_tex_1d_level_v4f32_f32:
4791 case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:
4792 case Intrinsic::nvvm_tex_1d_array_v4f32_s32:
4793 case Intrinsic::nvvm_tex_1d_array_v4f32_f32:
4794 case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:
4795 case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:
4796 case Intrinsic::nvvm_tex_2d_v4f32_s32:
4797 case Intrinsic::nvvm_tex_2d_v4f32_f32:
4798 case Intrinsic::nvvm_tex_2d_level_v4f32_f32:
4799 case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:
4800 case Intrinsic::nvvm_tex_2d_array_v4f32_s32:
4801 case Intrinsic::nvvm_tex_2d_array_v4f32_f32:
4802 case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:
4803 case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:
4804 case Intrinsic::nvvm_tex_3d_v4f32_s32:
4805 case Intrinsic::nvvm_tex_3d_v4f32_f32:
4806 case Intrinsic::nvvm_tex_3d_level_v4f32_f32:
4807 case Intrinsic::nvvm_tex_3d_grad_v4f32_f32:
4808 case Intrinsic::nvvm_tex_cube_v4f32_f32:
4809 case Intrinsic::nvvm_tex_cube_level_v4f32_f32:
4810 case Intrinsic::nvvm_tex_cube_array_v4f32_f32:
4811 case Intrinsic::nvvm_tex_cube_array_level_v4f32_f32:
4812 case Intrinsic::nvvm_tld4_r_2d_v4f32_f32:
4813 case Intrinsic::nvvm_tld4_g_2d_v4f32_f32:
4814 case Intrinsic::nvvm_tld4_b_2d_v4f32_f32:
4815 case Intrinsic::nvvm_tld4_a_2d_v4f32_f32:
4816 case Intrinsic::nvvm_tex_unified_1d_v4f32_s32:
4817 case Intrinsic::nvvm_tex_unified_1d_v4f32_f32:
4818 case Intrinsic::nvvm_tex_unified_1d_level_v4f32_f32:
4819 case Intrinsic::nvvm_tex_unified_1d_grad_v4f32_f32:
4820 case Intrinsic::nvvm_tex_unified_1d_array_v4f32_s32:
4821 case Intrinsic::nvvm_tex_unified_1d_array_v4f32_f32:
4822 case Intrinsic::nvvm_tex_unified_1d_array_level_v4f32_f32:
4823 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4f32_f32:
4824 case Intrinsic::nvvm_tex_unified_2d_v4f32_s32:
4825 case Intrinsic::nvvm_tex_unified_2d_v4f32_f32:
4826 case Intrinsic::nvvm_tex_unified_2d_level_v4f32_f32:
4827 case Intrinsic::nvvm_tex_unified_2d_grad_v4f32_f32:
4828 case Intrinsic::nvvm_tex_unified_2d_array_v4f32_s32:
4829 case Intrinsic::nvvm_tex_unified_2d_array_v4f32_f32:
4830 case Intrinsic::nvvm_tex_unified_2d_array_level_v4f32_f32:
4831 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4f32_f32:
4832 case Intrinsic::nvvm_tex_unified_3d_v4f32_s32:
4833 case Intrinsic::nvvm_tex_unified_3d_v4f32_f32:
4834 case Intrinsic::nvvm_tex_unified_3d_level_v4f32_f32:
4835 case Intrinsic::nvvm_tex_unified_3d_grad_v4f32_f32:
4836 case Intrinsic::nvvm_tex_unified_cube_v4f32_f32:
4837 case Intrinsic::nvvm_tex_unified_cube_level_v4f32_f32:
4838 case Intrinsic::nvvm_tex_unified_cube_array_v4f32_f32:
4839 case Intrinsic::nvvm_tex_unified_cube_array_level_v4f32_f32:
4840 case Intrinsic::nvvm_tex_unified_cube_grad_v4f32_f32:
4841 case Intrinsic::nvvm_tex_unified_cube_array_grad_v4f32_f32:
4842 case Intrinsic::nvvm_tld4_unified_r_2d_v4f32_f32:
4843 case Intrinsic::nvvm_tld4_unified_g_2d_v4f32_f32:
4844 case Intrinsic::nvvm_tld4_unified_b_2d_v4f32_f32:
4845 case Intrinsic::nvvm_tld4_unified_a_2d_v4f32_f32:
4847 Info.memVT = MVT::v4f32;
4848 Info.ptrVal =
nullptr;
4851 Info.align =
Align(16);
4855 case Intrinsic::nvvm_tex_1d_v4s32_s32:
4856 case Intrinsic::nvvm_tex_1d_v4s32_f32:
4857 case Intrinsic::nvvm_tex_1d_level_v4s32_f32:
4858 case Intrinsic::nvvm_tex_1d_grad_v4s32_f32:
4859 case Intrinsic::nvvm_tex_1d_array_v4s32_s32:
4860 case Intrinsic::nvvm_tex_1d_array_v4s32_f32:
4861 case Intrinsic::nvvm_tex_1d_array_level_v4s32_f32:
4862 case Intrinsic::nvvm_tex_1d_array_grad_v4s32_f32:
4863 case Intrinsic::nvvm_tex_2d_v4s32_s32:
4864 case Intrinsic::nvvm_tex_2d_v4s32_f32:
4865 case Intrinsic::nvvm_tex_2d_level_v4s32_f32:
4866 case Intrinsic::nvvm_tex_2d_grad_v4s32_f32:
4867 case Intrinsic::nvvm_tex_2d_array_v4s32_s32:
4868 case Intrinsic::nvvm_tex_2d_array_v4s32_f32:
4869 case Intrinsic::nvvm_tex_2d_array_level_v4s32_f32:
4870 case Intrinsic::nvvm_tex_2d_array_grad_v4s32_f32:
4871 case Intrinsic::nvvm_tex_3d_v4s32_s32:
4872 case Intrinsic::nvvm_tex_3d_v4s32_f32:
4873 case Intrinsic::nvvm_tex_3d_level_v4s32_f32:
4874 case Intrinsic::nvvm_tex_3d_grad_v4s32_f32:
4875 case Intrinsic::nvvm_tex_cube_v4s32_f32:
4876 case Intrinsic::nvvm_tex_cube_level_v4s32_f32:
4877 case Intrinsic::nvvm_tex_cube_array_v4s32_f32:
4878 case Intrinsic::nvvm_tex_cube_array_level_v4s32_f32:
4879 case Intrinsic::nvvm_tex_cube_v4u32_f32:
4880 case Intrinsic::nvvm_tex_cube_level_v4u32_f32:
4881 case Intrinsic::nvvm_tex_cube_array_v4u32_f32:
4882 case Intrinsic::nvvm_tex_cube_array_level_v4u32_f32:
4883 case Intrinsic::nvvm_tex_1d_v4u32_s32:
4884 case Intrinsic::nvvm_tex_1d_v4u32_f32:
4885 case Intrinsic::nvvm_tex_1d_level_v4u32_f32:
4886 case Intrinsic::nvvm_tex_1d_grad_v4u32_f32:
4887 case Intrinsic::nvvm_tex_1d_array_v4u32_s32:
4888 case Intrinsic::nvvm_tex_1d_array_v4u32_f32:
4889 case Intrinsic::nvvm_tex_1d_array_level_v4u32_f32:
4890 case Intrinsic::nvvm_tex_1d_array_grad_v4u32_f32:
4891 case Intrinsic::nvvm_tex_2d_v4u32_s32:
4892 case Intrinsic::nvvm_tex_2d_v4u32_f32:
4893 case Intrinsic::nvvm_tex_2d_level_v4u32_f32:
4894 case Intrinsic::nvvm_tex_2d_grad_v4u32_f32:
4895 case Intrinsic::nvvm_tex_2d_array_v4u32_s32:
4896 case Intrinsic::nvvm_tex_2d_array_v4u32_f32:
4897 case Intrinsic::nvvm_tex_2d_array_level_v4u32_f32:
4898 case Intrinsic::nvvm_tex_2d_array_grad_v4u32_f32:
4899 case Intrinsic::nvvm_tex_3d_v4u32_s32:
4900 case Intrinsic::nvvm_tex_3d_v4u32_f32:
4901 case Intrinsic::nvvm_tex_3d_level_v4u32_f32:
4902 case Intrinsic::nvvm_tex_3d_grad_v4u32_f32:
4903 case Intrinsic::nvvm_tld4_r_2d_v4s32_f32:
4904 case Intrinsic::nvvm_tld4_g_2d_v4s32_f32:
4905 case Intrinsic::nvvm_tld4_b_2d_v4s32_f32:
4906 case Intrinsic::nvvm_tld4_a_2d_v4s32_f32:
4907 case Intrinsic::nvvm_tld4_r_2d_v4u32_f32:
4908 case Intrinsic::nvvm_tld4_g_2d_v4u32_f32:
4909 case Intrinsic::nvvm_tld4_b_2d_v4u32_f32:
4910 case Intrinsic::nvvm_tld4_a_2d_v4u32_f32:
4911 case Intrinsic::nvvm_tex_unified_1d_v4s32_s32:
4912 case Intrinsic::nvvm_tex_unified_1d_v4s32_f32:
4913 case Intrinsic::nvvm_tex_unified_1d_level_v4s32_f32:
4914 case Intrinsic::nvvm_tex_unified_1d_grad_v4s32_f32:
4915 case Intrinsic::nvvm_tex_unified_1d_array_v4s32_s32:
4916 case Intrinsic::nvvm_tex_unified_1d_array_v4s32_f32:
4917 case Intrinsic::nvvm_tex_unified_1d_array_level_v4s32_f32:
4918 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4s32_f32:
4919 case Intrinsic::nvvm_tex_unified_2d_v4s32_s32:
4920 case Intrinsic::nvvm_tex_unified_2d_v4s32_f32:
4921 case Intrinsic::nvvm_tex_unified_2d_level_v4s32_f32:
4922 case Intrinsic::nvvm_tex_unified_2d_grad_v4s32_f32:
4923 case Intrinsic::nvvm_tex_unified_2d_array_v4s32_s32:
4924 case Intrinsic::nvvm_tex_unified_2d_array_v4s32_f32:
4925 case Intrinsic::nvvm_tex_unified_2d_array_level_v4s32_f32:
4926 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4s32_f32:
4927 case Intrinsic::nvvm_tex_unified_3d_v4s32_s32:
4928 case Intrinsic::nvvm_tex_unified_3d_v4s32_f32:
4929 case Intrinsic::nvvm_tex_unified_3d_level_v4s32_f32:
4930 case Intrinsic::nvvm_tex_unified_3d_grad_v4s32_f32:
4931 case Intrinsic::nvvm_tex_unified_1d_v4u32_s32:
4932 case Intrinsic::nvvm_tex_unified_1d_v4u32_f32:
4933 case Intrinsic::nvvm_tex_unified_1d_level_v4u32_f32:
4934 case Intrinsic::nvvm_tex_unified_1d_grad_v4u32_f32:
4935 case Intrinsic::nvvm_tex_unified_1d_array_v4u32_s32:
4936 case Intrinsic::nvvm_tex_unified_1d_array_v4u32_f32:
4937 case Intrinsic::nvvm_tex_unified_1d_array_level_v4u32_f32:
4938 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4u32_f32:
4939 case Intrinsic::nvvm_tex_unified_2d_v4u32_s32:
4940 case Intrinsic::nvvm_tex_unified_2d_v4u32_f32:
4941 case Intrinsic::nvvm_tex_unified_2d_level_v4u32_f32:
4942 case Intrinsic::nvvm_tex_unified_2d_grad_v4u32_f32:
4943 case Intrinsic::nvvm_tex_unified_2d_array_v4u32_s32:
4944 case Intrinsic::nvvm_tex_unified_2d_array_v4u32_f32:
4945 case Intrinsic::nvvm_tex_unified_2d_array_level_v4u32_f32:
4946 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4u32_f32:
4947 case Intrinsic::nvvm_tex_unified_3d_v4u32_s32:
4948 case Intrinsic::nvvm_tex_unified_3d_v4u32_f32:
4949 case Intrinsic::nvvm_tex_unified_3d_level_v4u32_f32:
4950 case Intrinsic::nvvm_tex_unified_3d_grad_v4u32_f32:
4951 case Intrinsic::nvvm_tex_unified_cube_v4s32_f32:
4952 case Intrinsic::nvvm_tex_unified_cube_level_v4s32_f32:
4953 case Intrinsic::nvvm_tex_unified_cube_array_v4s32_f32:
4954 case Intrinsic::nvvm_tex_unified_cube_array_level_v4s32_f32:
4955 case Intrinsic::nvvm_tex_unified_cube_v4u32_f32:
4956 case Intrinsic::nvvm_tex_unified_cube_level_v4u32_f32:
4957 case Intrinsic::nvvm_tex_unified_cube_array_v4u32_f32:
4958 case Intrinsic::nvvm_tex_unified_cube_array_level_v4u32_f32:
4959 case Intrinsic::nvvm_tex_unified_cube_grad_v4s32_f32:
4960 case Intrinsic::nvvm_tex_unified_cube_grad_v4u32_f32:
4961 case Intrinsic::nvvm_tex_unified_cube_array_grad_v4s32_f32:
4962 case Intrinsic::nvvm_tex_unified_cube_array_grad_v4u32_f32:
4963 case Intrinsic::nvvm_tld4_unified_r_2d_v4s32_f32:
4964 case Intrinsic::nvvm_tld4_unified_g_2d_v4s32_f32:
4965 case Intrinsic::nvvm_tld4_unified_b_2d_v4s32_f32:
4966 case Intrinsic::nvvm_tld4_unified_a_2d_v4s32_f32:
4967 case Intrinsic::nvvm_tld4_unified_r_2d_v4u32_f32:
4968 case Intrinsic::nvvm_tld4_unified_g_2d_v4u32_f32:
4969 case Intrinsic::nvvm_tld4_unified_b_2d_v4u32_f32:
4970 case Intrinsic::nvvm_tld4_unified_a_2d_v4u32_f32:
4972 Info.memVT = MVT::v4i32;
4973 Info.ptrVal =
nullptr;
4976 Info.align =
Align(16);
4980 case Intrinsic::nvvm_suld_1d_i8_clamp:
4981 case Intrinsic::nvvm_suld_1d_v2i8_clamp:
4982 case Intrinsic::nvvm_suld_1d_v4i8_clamp:
4983 case Intrinsic::nvvm_suld_1d_array_i8_clamp:
4984 case Intrinsic::nvvm_suld_1d_array_v2i8_clamp:
4985 case Intrinsic::nvvm_suld_1d_array_v4i8_clamp:
4986 case Intrinsic::nvvm_suld_2d_i8_clamp:
4987 case Intrinsic::nvvm_suld_2d_v2i8_clamp:
4988 case Intrinsic::nvvm_suld_2d_v4i8_clamp:
4989 case Intrinsic::nvvm_suld_2d_array_i8_clamp:
4990 case Intrinsic::nvvm_suld_2d_array_v2i8_clamp:
4991 case Intrinsic::nvvm_suld_2d_array_v4i8_clamp:
4992 case Intrinsic::nvvm_suld_3d_i8_clamp:
4993 case Intrinsic::nvvm_suld_3d_v2i8_clamp:
4994 case Intrinsic::nvvm_suld_3d_v4i8_clamp:
4995 case Intrinsic::nvvm_suld_1d_i8_trap:
4996 case Intrinsic::nvvm_suld_1d_v2i8_trap:
4997 case Intrinsic::nvvm_suld_1d_v4i8_trap:
4998 case Intrinsic::nvvm_suld_1d_array_i8_trap:
4999 case Intrinsic::nvvm_suld_1d_array_v2i8_trap:
5000 case Intrinsic::nvvm_suld_1d_array_v4i8_trap:
5001 case Intrinsic::nvvm_suld_2d_i8_trap:
5002 case Intrinsic::nvvm_suld_2d_v2i8_trap:
5003 case Intrinsic::nvvm_suld_2d_v4i8_trap:
5004 case Intrinsic::nvvm_suld_2d_array_i8_trap:
5005 case Intrinsic::nvvm_suld_2d_array_v2i8_trap:
5006 case Intrinsic::nvvm_suld_2d_array_v4i8_trap:
5007 case Intrinsic::nvvm_suld_3d_i8_trap:
5008 case Intrinsic::nvvm_suld_3d_v2i8_trap:
5009 case Intrinsic::nvvm_suld_3d_v4i8_trap:
5010 case Intrinsic::nvvm_suld_1d_i8_zero:
5011 case Intrinsic::nvvm_suld_1d_v2i8_zero:
5012 case Intrinsic::nvvm_suld_1d_v4i8_zero:
5013 case Intrinsic::nvvm_suld_1d_array_i8_zero:
5014 case Intrinsic::nvvm_suld_1d_array_v2i8_zero:
5015 case Intrinsic::nvvm_suld_1d_array_v4i8_zero:
5016 case Intrinsic::nvvm_suld_2d_i8_zero:
5017 case Intrinsic::nvvm_suld_2d_v2i8_zero:
5018 case Intrinsic::nvvm_suld_2d_v4i8_zero:
5019 case Intrinsic::nvvm_suld_2d_array_i8_zero:
5020 case Intrinsic::nvvm_suld_2d_array_v2i8_zero:
5021 case Intrinsic::nvvm_suld_2d_array_v4i8_zero:
5022 case Intrinsic::nvvm_suld_3d_i8_zero:
5023 case Intrinsic::nvvm_suld_3d_v2i8_zero:
5024 case Intrinsic::nvvm_suld_3d_v4i8_zero:
5026 Info.memVT = MVT::i8;
5027 Info.ptrVal =
nullptr;
5030 Info.align =
Align(16);
5034 case Intrinsic::nvvm_suld_1d_i16_clamp:
5035 case Intrinsic::nvvm_suld_1d_v2i16_clamp:
5036 case Intrinsic::nvvm_suld_1d_v4i16_clamp:
5037 case Intrinsic::nvvm_suld_1d_array_i16_clamp:
5038 case Intrinsic::nvvm_suld_1d_array_v2i16_clamp:
5039 case Intrinsic::nvvm_suld_1d_array_v4i16_clamp:
5040 case Intrinsic::nvvm_suld_2d_i16_clamp:
5041 case Intrinsic::nvvm_suld_2d_v2i16_clamp:
5042 case Intrinsic::nvvm_suld_2d_v4i16_clamp:
5043 case Intrinsic::nvvm_suld_2d_array_i16_clamp:
5044 case Intrinsic::nvvm_suld_2d_array_v2i16_clamp:
5045 case Intrinsic::nvvm_suld_2d_array_v4i16_clamp:
5046 case Intrinsic::nvvm_suld_3d_i16_clamp:
5047 case Intrinsic::nvvm_suld_3d_v2i16_clamp:
5048 case Intrinsic::nvvm_suld_3d_v4i16_clamp:
5049 case Intrinsic::nvvm_suld_1d_i16_trap:
5050 case Intrinsic::nvvm_suld_1d_v2i16_trap:
5051 case Intrinsic::nvvm_suld_1d_v4i16_trap:
5052 case Intrinsic::nvvm_suld_1d_array_i16_trap:
5053 case Intrinsic::nvvm_suld_1d_array_v2i16_trap:
5054 case Intrinsic::nvvm_suld_1d_array_v4i16_trap:
5055 case Intrinsic::nvvm_suld_2d_i16_trap:
5056 case Intrinsic::nvvm_suld_2d_v2i16_trap:
5057 case Intrinsic::nvvm_suld_2d_v4i16_trap:
5058 case Intrinsic::nvvm_suld_2d_array_i16_trap:
5059 case Intrinsic::nvvm_suld_2d_array_v2i16_trap:
5060 case Intrinsic::nvvm_suld_2d_array_v4i16_trap:
5061 case Intrinsic::nvvm_suld_3d_i16_trap:
5062 case Intrinsic::nvvm_suld_3d_v2i16_trap:
5063 case Intrinsic::nvvm_suld_3d_v4i16_trap:
5064 case Intrinsic::nvvm_suld_1d_i16_zero:
5065 case Intrinsic::nvvm_suld_1d_v2i16_zero:
5066 case Intrinsic::nvvm_suld_1d_v4i16_zero:
5067 case Intrinsic::nvvm_suld_1d_array_i16_zero:
5068 case Intrinsic::nvvm_suld_1d_array_v2i16_zero:
5069 case Intrinsic::nvvm_suld_1d_array_v4i16_zero:
5070 case Intrinsic::nvvm_suld_2d_i16_zero:
5071 case Intrinsic::nvvm_suld_2d_v2i16_zero:
5072 case Intrinsic::nvvm_suld_2d_v4i16_zero:
5073 case Intrinsic::nvvm_suld_2d_array_i16_zero:
5074 case Intrinsic::nvvm_suld_2d_array_v2i16_zero:
5075 case Intrinsic::nvvm_suld_2d_array_v4i16_zero:
5076 case Intrinsic::nvvm_suld_3d_i16_zero:
5077 case Intrinsic::nvvm_suld_3d_v2i16_zero:
5078 case Intrinsic::nvvm_suld_3d_v4i16_zero:
5080 Info.memVT = MVT::i16;
5081 Info.ptrVal =
nullptr;
5084 Info.align =
Align(16);
5088 case Intrinsic::nvvm_suld_1d_i32_clamp:
5089 case Intrinsic::nvvm_suld_1d_v2i32_clamp:
5090 case Intrinsic::nvvm_suld_1d_v4i32_clamp:
5091 case Intrinsic::nvvm_suld_1d_array_i32_clamp:
5092 case Intrinsic::nvvm_suld_1d_array_v2i32_clamp:
5093 case Intrinsic::nvvm_suld_1d_array_v4i32_clamp:
5094 case Intrinsic::nvvm_suld_2d_i32_clamp:
5095 case Intrinsic::nvvm_suld_2d_v2i32_clamp:
5096 case Intrinsic::nvvm_suld_2d_v4i32_clamp:
5097 case Intrinsic::nvvm_suld_2d_array_i32_clamp:
5098 case Intrinsic::nvvm_suld_2d_array_v2i32_clamp:
5099 case Intrinsic::nvvm_suld_2d_array_v4i32_clamp:
5100 case Intrinsic::nvvm_suld_3d_i32_clamp:
5101 case Intrinsic::nvvm_suld_3d_v2i32_clamp:
5102 case Intrinsic::nvvm_suld_3d_v4i32_clamp:
5103 case Intrinsic::nvvm_suld_1d_i32_trap:
5104 case Intrinsic::nvvm_suld_1d_v2i32_trap:
5105 case Intrinsic::nvvm_suld_1d_v4i32_trap:
5106 case Intrinsic::nvvm_suld_1d_array_i32_trap:
5107 case Intrinsic::nvvm_suld_1d_array_v2i32_trap:
5108 case Intrinsic::nvvm_suld_1d_array_v4i32_trap:
5109 case Intrinsic::nvvm_suld_2d_i32_trap:
5110 case Intrinsic::nvvm_suld_2d_v2i32_trap:
5111 case Intrinsic::nvvm_suld_2d_v4i32_trap:
5112 case Intrinsic::nvvm_suld_2d_array_i32_trap:
5113 case Intrinsic::nvvm_suld_2d_array_v2i32_trap:
5114 case Intrinsic::nvvm_suld_2d_array_v4i32_trap:
5115 case Intrinsic::nvvm_suld_3d_i32_trap:
5116 case Intrinsic::nvvm_suld_3d_v2i32_trap:
5117 case Intrinsic::nvvm_suld_3d_v4i32_trap:
5118 case Intrinsic::nvvm_suld_1d_i32_zero:
5119 case Intrinsic::nvvm_suld_1d_v2i32_zero:
5120 case Intrinsic::nvvm_suld_1d_v4i32_zero:
5121 case Intrinsic::nvvm_suld_1d_array_i32_zero:
5122 case Intrinsic::nvvm_suld_1d_array_v2i32_zero:
5123 case Intrinsic::nvvm_suld_1d_array_v4i32_zero:
5124 case Intrinsic::nvvm_suld_2d_i32_zero:
5125 case Intrinsic::nvvm_suld_2d_v2i32_zero:
5126 case Intrinsic::nvvm_suld_2d_v4i32_zero:
5127 case Intrinsic::nvvm_suld_2d_array_i32_zero:
5128 case Intrinsic::nvvm_suld_2d_array_v2i32_zero:
5129 case Intrinsic::nvvm_suld_2d_array_v4i32_zero:
5130 case Intrinsic::nvvm_suld_3d_i32_zero:
5131 case Intrinsic::nvvm_suld_3d_v2i32_zero:
5132 case Intrinsic::nvvm_suld_3d_v4i32_zero:
5134 Info.memVT = MVT::i32;
5135 Info.ptrVal =
nullptr;
5138 Info.align =
Align(16);
5142 case Intrinsic::nvvm_suld_1d_i64_clamp:
5143 case Intrinsic::nvvm_suld_1d_v2i64_clamp:
5144 case Intrinsic::nvvm_suld_1d_array_i64_clamp:
5145 case Intrinsic::nvvm_suld_1d_array_v2i64_clamp:
5146 case Intrinsic::nvvm_suld_2d_i64_clamp:
5147 case Intrinsic::nvvm_suld_2d_v2i64_clamp:
5148 case Intrinsic::nvvm_suld_2d_array_i64_clamp:
5149 case Intrinsic::nvvm_suld_2d_array_v2i64_clamp:
5150 case Intrinsic::nvvm_suld_3d_i64_clamp:
5151 case Intrinsic::nvvm_suld_3d_v2i64_clamp:
5152 case Intrinsic::nvvm_suld_1d_i64_trap:
5153 case Intrinsic::nvvm_suld_1d_v2i64_trap:
5154 case Intrinsic::nvvm_suld_1d_array_i64_trap:
5155 case Intrinsic::nvvm_suld_1d_array_v2i64_trap:
5156 case Intrinsic::nvvm_suld_2d_i64_trap:
5157 case Intrinsic::nvvm_suld_2d_v2i64_trap:
5158 case Intrinsic::nvvm_suld_2d_array_i64_trap:
5159 case Intrinsic::nvvm_suld_2d_array_v2i64_trap:
5160 case Intrinsic::nvvm_suld_3d_i64_trap:
5161 case Intrinsic::nvvm_suld_3d_v2i64_trap:
5162 case Intrinsic::nvvm_suld_1d_i64_zero:
5163 case Intrinsic::nvvm_suld_1d_v2i64_zero:
5164 case Intrinsic::nvvm_suld_1d_array_i64_zero:
5165 case Intrinsic::nvvm_suld_1d_array_v2i64_zero:
5166 case Intrinsic::nvvm_suld_2d_i64_zero:
5167 case Intrinsic::nvvm_suld_2d_v2i64_zero:
5168 case Intrinsic::nvvm_suld_2d_array_i64_zero:
5169 case Intrinsic::nvvm_suld_2d_array_v2i64_zero:
5170 case Intrinsic::nvvm_suld_3d_i64_zero:
5171 case Intrinsic::nvvm_suld_3d_v2i64_zero:
5173 Info.memVT = MVT::i64;
5174 Info.ptrVal =
nullptr;
5177 Info.align =
Align(16);
5181 case Intrinsic::nvvm_tcgen05_ld_16x64b_x1:
5182 case Intrinsic::nvvm_tcgen05_ld_32x32b_x1:
5183 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x1: {
5185 Info.memVT = MVT::v1i32;
5186 Info.ptrVal =
I.getArgOperand(0);
5194 case Intrinsic::nvvm_tcgen05_ld_16x64b_x2:
5195 case Intrinsic::nvvm_tcgen05_ld_16x128b_x1:
5196 case Intrinsic::nvvm_tcgen05_ld_32x32b_x2:
5197 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x2:
5198 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x2_i32:
5199 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x2_i32: {
5201 Info.memVT = MVT::v2i32;
5202 Info.ptrVal =
I.getArgOperand(0);
5210 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x2_f32:
5211 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x2_f32: {
5213 Info.memVT = MVT::v2f32;
5214 Info.ptrVal =
I.getArgOperand(0);
5222 case Intrinsic::nvvm_tcgen05_ld_16x64b_x4:
5223 case Intrinsic::nvvm_tcgen05_ld_16x128b_x2:
5224 case Intrinsic::nvvm_tcgen05_ld_32x32b_x4:
5225 case Intrinsic::nvvm_tcgen05_ld_16x256b_x1:
5226 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x4:
5227 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x4_i32:
5228 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x4_i32: {
5230 Info.memVT = MVT::v4i32;
5231 Info.ptrVal =
I.getArgOperand(0);
5239 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x4_f32:
5240 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x4_f32: {
5242 Info.memVT = MVT::v4f32;
5243 Info.ptrVal =
I.getArgOperand(0);
5251 case Intrinsic::nvvm_tcgen05_ld_16x64b_x8:
5252 case Intrinsic::nvvm_tcgen05_ld_16x128b_x4:
5253 case Intrinsic::nvvm_tcgen05_ld_16x256b_x2:
5254 case Intrinsic::nvvm_tcgen05_ld_32x32b_x8:
5255 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x8:
5256 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x8_i32:
5257 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x8_i32: {
5259 Info.memVT = MVT::v8i32;
5260 Info.ptrVal =
I.getArgOperand(0);
5268 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x8_f32:
5269 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x8_f32: {
5271 Info.memVT = MVT::v8f32;
5272 Info.ptrVal =
I.getArgOperand(0);
5280 case Intrinsic::nvvm_tcgen05_ld_16x64b_x16:
5281 case Intrinsic::nvvm_tcgen05_ld_16x128b_x8:
5282 case Intrinsic::nvvm_tcgen05_ld_16x256b_x4:
5283 case Intrinsic::nvvm_tcgen05_ld_32x32b_x16:
5284 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x16:
5285 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x16_i32:
5286 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x16_i32: {
5288 Info.memVT = MVT::v16i32;
5289 Info.ptrVal =
I.getArgOperand(0);
5297 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x16_f32:
5298 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x16_f32: {
5300 Info.memVT = MVT::v16f32;
5301 Info.ptrVal =
I.getArgOperand(0);
5309 case Intrinsic::nvvm_tcgen05_ld_16x64b_x32:
5310 case Intrinsic::nvvm_tcgen05_ld_16x128b_x16:
5311 case Intrinsic::nvvm_tcgen05_ld_16x256b_x8:
5312 case Intrinsic::nvvm_tcgen05_ld_32x32b_x32:
5313 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x32:
5314 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x32_i32:
5315 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x32_i32: {
5317 Info.memVT = MVT::v32i32;
5318 Info.ptrVal =
I.getArgOperand(0);
5326 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x32_f32:
5327 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x32_f32: {
5329 Info.memVT = MVT::v32f32;
5330 Info.ptrVal =
I.getArgOperand(0);
5338 case Intrinsic::nvvm_tcgen05_ld_16x64b_x64:
5339 case Intrinsic::nvvm_tcgen05_ld_16x128b_x32:
5340 case Intrinsic::nvvm_tcgen05_ld_16x256b_x16:
5341 case Intrinsic::nvvm_tcgen05_ld_32x32b_x64:
5342 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x64:
5343 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x64_i32:
5344 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x64_i32: {
5346 Info.memVT = MVT::v64i32;
5347 Info.ptrVal =
I.getArgOperand(0);
5355 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x64_f32:
5356 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x64_f32: {
5358 Info.memVT = MVT::v64f32;
5359 Info.ptrVal =
I.getArgOperand(0);
5367 case Intrinsic::nvvm_tcgen05_ld_16x64b_x128:
5368 case Intrinsic::nvvm_tcgen05_ld_16x128b_x64:
5369 case Intrinsic::nvvm_tcgen05_ld_16x256b_x32:
5370 case Intrinsic::nvvm_tcgen05_ld_32x32b_x128:
5371 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x128:
5372 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x128_i32:
5373 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x128_i32: {
5375 Info.memVT = MVT::v128i32;
5376 Info.ptrVal =
I.getArgOperand(0);
5384 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x128_f32:
5385 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x128_f32: {
5387 Info.memVT = MVT::v128f32;
5388 Info.ptrVal =
I.getArgOperand(0);
5396 case Intrinsic::nvvm_tcgen05_st_16x64b_x1:
5397 case Intrinsic::nvvm_tcgen05_st_32x32b_x1:
5398 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x1: {
5400 Info.memVT = MVT::i32;
5401 Info.ptrVal =
I.getArgOperand(0);
5409 case Intrinsic::nvvm_tcgen05_st_16x64b_x2:
5410 case Intrinsic::nvvm_tcgen05_st_16x128b_x1:
5411 case Intrinsic::nvvm_tcgen05_st_32x32b_x2:
5412 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x2: {
5414 Info.memVT = MVT::v2i32;
5415 Info.ptrVal =
I.getArgOperand(0);
5423 case Intrinsic::nvvm_tcgen05_st_16x64b_x4:
5424 case Intrinsic::nvvm_tcgen05_st_16x128b_x2:
5425 case Intrinsic::nvvm_tcgen05_st_16x256b_x1:
5426 case Intrinsic::nvvm_tcgen05_st_32x32b_x4:
5427 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x4: {
5429 Info.memVT = MVT::v4i32;
5430 Info.ptrVal =
I.getArgOperand(0);
5438 case Intrinsic::nvvm_tcgen05_st_16x64b_x8:
5439 case Intrinsic::nvvm_tcgen05_st_16x128b_x4:
5440 case Intrinsic::nvvm_tcgen05_st_16x256b_x2:
5441 case Intrinsic::nvvm_tcgen05_st_32x32b_x8:
5442 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x8: {
5444 Info.memVT = MVT::v8i32;
5445 Info.ptrVal =
I.getArgOperand(0);
5453 case Intrinsic::nvvm_tcgen05_st_16x64b_x16:
5454 case Intrinsic::nvvm_tcgen05_st_16x128b_x8:
5455 case Intrinsic::nvvm_tcgen05_st_16x256b_x4:
5456 case Intrinsic::nvvm_tcgen05_st_32x32b_x16:
5457 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x16: {
5459 Info.memVT = MVT::v16i32;
5460 Info.ptrVal =
I.getArgOperand(0);
5468 case Intrinsic::nvvm_tcgen05_st_16x64b_x32:
5469 case Intrinsic::nvvm_tcgen05_st_16x128b_x16:
5470 case Intrinsic::nvvm_tcgen05_st_16x256b_x8:
5471 case Intrinsic::nvvm_tcgen05_st_32x32b_x32:
5472 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x32: {
5474 Info.memVT = MVT::v32i32;
5475 Info.ptrVal =
I.getArgOperand(0);
5483 case Intrinsic::nvvm_tcgen05_st_16x64b_x64:
5484 case Intrinsic::nvvm_tcgen05_st_16x128b_x32:
5485 case Intrinsic::nvvm_tcgen05_st_16x256b_x16:
5486 case Intrinsic::nvvm_tcgen05_st_32x32b_x64:
5487 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x64: {
5489 Info.memVT = MVT::v64i32;
5490 Info.ptrVal =
I.getArgOperand(0);
5498 case Intrinsic::nvvm_tcgen05_st_16x64b_x128:
5499 case Intrinsic::nvvm_tcgen05_st_16x128b_x64:
5500 case Intrinsic::nvvm_tcgen05_st_16x256b_x32:
5501 case Intrinsic::nvvm_tcgen05_st_32x32b_x128:
5502 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x128: {
5504 Info.memVT = MVT::v128i32;
5505 Info.ptrVal =
I.getArgOperand(0);
5512 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg1:
5513 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg1:
5514 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg1:
5515 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg1:
5516 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1:
5517 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1:
5518 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1_ashift:
5520 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1_ashift:
5521 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1:
5522 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1:
5523 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1_ashift:
5525 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1_ashift: {
5528 Info.memVT = MVT::v4i32;
5529 Info.ptrVal =
I.getArgOperand(0);
5532 Info.align =
Align(16);
5537 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg2:
5538 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg2:
5539 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg2:
5540 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg2:
5541 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2:
5542 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2:
5543 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2:
5544 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2:
5545 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2_ashift:
5547 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2_ashift:
5548 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2_ashift:
5550 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2_ashift: {
5553 Info.memVT = MVT::v8i32;
5554 Info.ptrVal =
I.getArgOperand(0);
5557 Info.align =
Align(16);
5569 std::string ParamName;
5574 ParamStr <<
"_vararg";
5576 ParamStr <<
"_param_" << Idx;
5628 if (Constraint.
size() == 1) {
5629 switch (Constraint[0]) {
5648std::pair<unsigned, const TargetRegisterClass *>
5652 if (Constraint.
size() == 1) {
5653 switch (Constraint[0]) {
5655 return std::make_pair(0U, &NVPTX::B1RegClass);
5658 return std::make_pair(0U, &NVPTX::B16RegClass);
5661 return std::make_pair(0U, &NVPTX::B32RegClass);
5665 return std::make_pair(0U, &NVPTX::B64RegClass);
5667 if (STI.getSmVersion() < 70)
5669 "supported for sm_70 and higher!");
5670 return std::make_pair(0U, &NVPTX::B128RegClass);
5700 return Const && Const->getZExtValue() == 0;
5732 if (M->getOpcode() !=
ISD::MUL || !M.getNode()->hasOneUse())
5740 ((ZeroOpNum == 1) ? N1 : MAD),
5741 ((ZeroOpNum == 1) ? MAD : N1));
5756 (
N->getFlags().hasAllowContract() &&
5769 int nonAddCount = 0;
5778 int orderNo =
N->getIROrder();
5784 if (orderNo - orderNo2 < 500)
5790 bool opIsLive =
false;
5799 int orderNo3 =
User->getIROrder();
5800 if (orderNo3 > orderNo) {
5808 int orderNo3 =
User->getIROrder();
5809 if (orderNo3 > orderNo) {
5844 EVT ElementVT =
N->getValueType(0);
5853 if (U.getValueType() == MVT::Glue || U.getValueType() == MVT::Other)
5855 if (U.getUser()->getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
5856 if (N->getOpcode() != ISD::LOAD)
5873 return !U.getUser()->use_empty();
5887 unsigned OldNumOutputs;
5888 switch (
LD->getOpcode()) {
5897 Operands.push_back(DCI.DAG.getConstant(UINT32_MAX,
DL, MVT::i32));
5898 Operands.push_back(DCI.DAG.getIntPtrConstant(
5908 if (ElementVT != MVT::v2f32 && ElementVT != MVT::v2i32)
5919 const unsigned NewNumOutputs = OldNumOutputs * 2;
5922 NewVTs.append(
LD->value_begin() + OldNumOutputs,
LD->value_end());
5925 SDValue NewLoad = DCI.DAG.getMemIntrinsicNode(
5926 Opcode,
DL, DCI.DAG.getVTList(NewVTs), Operands,
LD->getMemoryVT(),
5927 LD->getMemOperand());
5933 for (
unsigned I :
seq(OldNumOutputs))
5934 Results.push_back(DCI.DAG.getBuildVector(
5935 ElementVT,
DL, {NewLoad.getValue(I * 2), NewLoad.getValue(I * 2 + 1)}));
5940 return DCI.DAG.getMergeValues(
Results,
DL);
5955 unsigned Front,
unsigned Back) {
5962 EVT ElementVT =
N->getOperand(Front).getValueType();
5972 switch (
N->getOpcode()) {
5985 if (ElementVT != MVT::v2f32 && ElementVT != MVT::v2i32)
5999 for (
SDValue BV :
N->ops().drop_front(Front).drop_back(Back)) {
6005 if (!BV.hasOneUse())
6013 Op =
Op.getOperand(0);
6017 Op->getOperand(0).getValueType() == MVT::i32)
6024 Operands.
append({BV.getOperand(0), BV.getOperand(1)});
6026 Operands.
append(
N->op_end() - Back,
N->op_end());
6030 ST->getMemoryVT(), ST->getMemOperand());
6041 if (!ST->getValue().getValueType().isSimple())
6054 if (!
N->getValueType(0).isSimple())
6074 if (VT.
isVector() || VT != MVT::i32)
6099 if (!IsExt0 && !IsExt1)
6104 if (IsExt0 != IsExt1)
6125 if ((Idx0 && !Idx1) || (!Idx0 && Idx1))
6129 return std::abs(Idx0->getSExtValue() - Idx1->getSExtValue()) != 1;
6157 EVT VT =
N->getValueType(0);
6158 if (VT != MVT::v2f32)
6169 unsigned Opc =
N->getOpcode();
6176 return Op.getOperand(Index);
6206 if (VT.
isVector() || !(VT == MVT::f32 || VT == MVT::f64))
6219 switch (MinMax2Opcode) {
6222 return NVPTXISD::FMAXNUM3;
6225 return NVPTXISD::FMINNUM3;
6227 return NVPTXISD::FMAXIMUM3;
6229 return NVPTXISD::FMINIMUM3;
6239 unsigned PTXVersion,
unsigned SmVersion) {
6242 EVT VT =
N->getValueType(0);
6243 if (VT != MVT::f32 || PTXVersion < 88 || SmVersion < 100)
6248 unsigned MinMaxOp2 =
N->getOpcode();
6278 EVT VT =
N->getValueType(0);
6282 const SDValue &Num =
N->getOperand(0);
6283 const SDValue &Den =
N->getOperand(1);
6286 if (U->getOpcode() == DivOpc && U->getOperand(0) == Num &&
6305 if (!
Op.hasOneUse())
6307 EVT ToVT =
N->getValueType(0);
6308 EVT FromVT =
Op.getValueType();
6309 if (!((ToVT == MVT::i32 && FromVT == MVT::i16) ||
6310 (ToVT == MVT::i64 && FromVT == MVT::i32)))
6317 unsigned ExtOpcode =
N->getOpcode();
6318 unsigned Opcode = 0;
6320 Opcode = NVPTXISD::MUL_WIDE_SIGNED;
6322 Opcode = NVPTXISD::MUL_WIDE_UNSIGNED;
6327 const auto ShiftAmt =
Op.getConstantOperandVal(1);
6350 EVT OrigVT =
Op.getOperand(0).getValueType();
6356 EVT OrigVT =
Op.getOperand(0).getValueType();
6383 IsSigned = (LHSSign ==
Signed);
6387 const APInt &Val = CI->getAPIntValue();
6389 return Val.
isIntN(OptSize);
6398 return LHSSign == RHSSign;
6408 EVT MulType =
N->getValueType(0);
6409 if (MulType != MVT::i32 && MulType != MVT::i64) {
6449 if (MulType == MVT::i32) {
6450 DemotedVT = MVT::i16;
6452 DemotedVT = MVT::i32;
6464 Opc = NVPTXISD::MUL_WIDE_SIGNED;
6466 Opc = NVPTXISD::MUL_WIDE_UNSIGNED;
6474 return Const && Const->getZExtValue() == 1;
6482 return Add->getOperand(1);
6485 return Add->getOperand(0);
6526 (ConstOpNo == 1) ?
X : NewMul,
6527 (ConstOpNo == 1) ? NewMul :
X);
6538 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
6588 unsigned int SmVersion) {
6589 EVT CCType =
N->getValueType(0);
6593 EVT AType =
A.getValueType();
6594 if (!(CCType == MVT::v2i1 && (AType == MVT::v2f16 || AType == MVT::v2bf16)))
6597 if (
A.getValueType() == MVT::v2bf16 && SmVersion < 90)
6608 DL, DCI.
DAG.
getVTList(MVT::i1, MVT::i1), {A, B, N->getOperand(2)});
6636 if (!(VectorBits == 16 || VectorBits == 32 || VectorBits == 64))
6641 if (!Index || Index->getZExtValue() == 0)
6656 if (EltVT != EltIVT)
6659 if (EltVT !=
N->getValueType(0))
6686 unsigned BitWidth =
N->getValueType(0).getSizeInBits();
6701 m_Zero(), LogicalShift));
6708 LogicalShift,
m_Zero()));
6710 if (!MatchedUGT && !MatchedULT)
6715 : NVPTXISD::SHL_CLAMP;
6724 if (VectorVT != MVT::v4i8)
6735 for (
int I = 0;
I < 4; ++
I) {
6754 auto VT =
N->getValueType(0);
6761 auto Op0 =
N->getOperand(0);
6762 auto Op1 =
N->getOperand(1);
6769 std::pair<SDValue *, uint64_t *> OpData[2] = {{&Op0, &Op0Bytes},
6775 for (
auto &[
Op, OpBytes] : OpData) {
6778 *
Op =
Op->getOperand(0);
6781 Op->getOperand(0).getValueType() == MVT::i32))
6786 if (!
Op->hasOneUse())
6789 *
Op =
Op->getOperand(0);
6797 assert((*OpBytes == 0x10 || *OpBytes == 0x54) &&
6798 "PRMT selector values out of range");
6800 *
Op =
Op->getOperand(0);
6806 auto &DAG = DCI.
DAG;
6810 (Op1Bytes << 8) | Op0Bytes,
DL, DAG);
6819 assert(ASCN2->getDestAddressSpace() == ASCN1->getSrcAddressSpace());
6822 if (ASCN1->getDestAddressSpace() == ASCN2->getSrcAddressSpace())
6823 return ASCN2->getOperand(0);
6841 const auto GetSelector = [](
unsigned S0,
unsigned S1,
unsigned S2,
6843 return APInt(32, S0 | (
S1 << 4) | (S2 << 8) | (S3 << 12));
6848 return GetSelector(V, V + 1, V + 2, V + 3);
6850 return GetSelector(V, (V - 1) & 7, (V - 2) & 7, (V - 3) & 7);
6852 return GetSelector(V, V, V, V);
6854 return GetSelector(V, std::max(V, 1U), std::max(V, 2U), 3U);
6856 return GetSelector(0, std::min(V, 1U), std::min(V, 2U), V);
6858 unsigned V1 = (V & 1) << 1;
6859 return GetSelector(V1, V1 + 1, V1, V1 + 1);
6867 assert(
A.getBitWidth() == 32 &&
B.getBitWidth() == 32 &&
6868 Selector.
getBitWidth() == 32 &&
"PRMT must have i32 operands");
6872 APInt Result(32, 0);
6877 APInt Byte = BitField.extractBits(8, Idx * 8);
6879 Byte = Byte.ashr(8);
6880 Result.insertBits(Byte,
I * 8);
6895 N->getConstantOperandAPInt(1),
6896 N->getConstantOperandAPInt(2),
6897 N->getConstantOperandVal(3)),
6898 SDLoc(
N),
N->getValueType(0));
6913 switch (R.getOpcode()) {
6937 return DCI.
DAG.
getNode(NVPTXISD::ProxyReg,
SDLoc(R), R.getValueType(),
6945 for (
auto &
Op : R->ops()) {
6959 R.getValueType(), V, R.getOperand(1));
6968 switch (AddIntrinsicID) {
6971 case Intrinsic::nvvm_add_rn_sat_f16:
6972 case Intrinsic::nvvm_add_rn_sat_v2f16:
6973 return NVPTXISD::SUB_RN_SAT;
6974 case Intrinsic::nvvm_add_rn_ftz_sat_f16:
6975 case Intrinsic::nvvm_add_rn_ftz_sat_v2f16:
6976 return NVPTXISD::SUB_RN_FTZ_SAT;
7006 unsigned IID =
N->getConstantOperandVal(0);
7011 case Intrinsic::nvvm_add_rn_sat_f16:
7012 case Intrinsic::nvvm_add_rn_ftz_sat_f16:
7013 case Intrinsic::nvvm_add_rn_sat_v2f16:
7014 case Intrinsic::nvvm_add_rn_ftz_sat_v2f16:
7037 DAGCombinerInfo &DCI)
const {
7039 switch (
N->getOpcode()) {
7066 STI.getSmVersion());
7073 case NVPTXISD::PRMT:
7075 case NVPTXISD::ProxyReg:
7103 EVT ToVT =
Op->getValueType(0);
7104 if (ToVT != MVT::v2i8) {
7131 case Intrinsic::nvvm_ldu_global_i:
7132 case Intrinsic::nvvm_ldu_global_f:
7133 case Intrinsic::nvvm_ldu_global_p: {
7134 EVT ResVT =
N->getValueType(0);
7146 bool NeedTrunc =
false;
7152 unsigned Opcode = 0;
7160 LdResVTs = DAG.
getVTList(EltVT, EltVT, MVT::Other);
7164 EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
7177 OtherOps.
append(
N->op_begin() + 2,
N->op_end());
7187 for (
unsigned i = 0; i < NumElts; ++i) {
7205 "Custom handling of non-i8 ldu/ldg?");
7228 case Intrinsic::nvvm_tcgen05_ld_16x64b_x4:
7229 case Intrinsic::nvvm_tcgen05_ld_16x64b_x8:
7230 case Intrinsic::nvvm_tcgen05_ld_16x64b_x16:
7231 case Intrinsic::nvvm_tcgen05_ld_16x64b_x32:
7232 case Intrinsic::nvvm_tcgen05_ld_16x64b_x64:
7233 case Intrinsic::nvvm_tcgen05_ld_16x64b_x128:
7234 case Intrinsic::nvvm_tcgen05_ld_32x32b_x4:
7235 case Intrinsic::nvvm_tcgen05_ld_32x32b_x8:
7236 case Intrinsic::nvvm_tcgen05_ld_32x32b_x16:
7237 case Intrinsic::nvvm_tcgen05_ld_32x32b_x32:
7238 case Intrinsic::nvvm_tcgen05_ld_32x32b_x64:
7239 case Intrinsic::nvvm_tcgen05_ld_32x32b_x128:
7240 case Intrinsic::nvvm_tcgen05_ld_16x128b_x2:
7241 case Intrinsic::nvvm_tcgen05_ld_16x128b_x4:
7242 case Intrinsic::nvvm_tcgen05_ld_16x128b_x8:
7243 case Intrinsic::nvvm_tcgen05_ld_16x128b_x16:
7244 case Intrinsic::nvvm_tcgen05_ld_16x128b_x32:
7245 case Intrinsic::nvvm_tcgen05_ld_16x128b_x64:
7246 case Intrinsic::nvvm_tcgen05_ld_16x256b_x1:
7247 case Intrinsic::nvvm_tcgen05_ld_16x256b_x2:
7248 case Intrinsic::nvvm_tcgen05_ld_16x256b_x4:
7249 case Intrinsic::nvvm_tcgen05_ld_16x256b_x8:
7250 case Intrinsic::nvvm_tcgen05_ld_16x256b_x16:
7251 case Intrinsic::nvvm_tcgen05_ld_16x256b_x32:
7253 Results.push_back(Res->first);
7254 Results.push_back(Res->second);
7258 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x4:
7259 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x8:
7260 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x16:
7261 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x32:
7262 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x64:
7263 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x128:
7265 Results.push_back(Res->first);
7266 Results.push_back(Res->second);
7270 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x8_i32:
7271 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x8_f32:
7272 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x64_i32:
7273 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x64_f32:
7274 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x4_i32:
7275 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x4_f32:
7276 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x32_i32:
7277 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x32_f32:
7278 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x16_i32:
7279 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x16_f32:
7280 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x128_i32:
7281 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x128_f32:
7282 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x8_i32:
7283 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x8_f32:
7284 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x64_i32:
7285 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x64_f32:
7286 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x4_i32:
7287 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x4_f32:
7288 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x32_i32:
7289 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x32_f32:
7290 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x16_i32:
7291 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x16_f32:
7292 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x128_i32:
7293 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x128_f32:
7295 Results.push_back(std::get<0>(*Res));
7296 Results.push_back(std::get<1>(*Res));
7297 Results.push_back(std::get<2>(*Res));
7312 assert(
Reg.getValueType() == MVT::i128 &&
7313 "Custom lowering for CopyFromReg with 128-bit reg only");
7315 N->getValueType(2)};
7337 DAG.
getNode(NVPTXISD::ProxyReg,
SDLoc(
N), VT, {Chain, NewReg});
7346 assert(
N->getValueType(0) == MVT::i128 &&
7347 "Custom lowering for atomic128 only supports i128");
7355 "Support for b128 atomics introduced in PTX ISA version 8.3 and "
7356 "requires target sm_90.",
7367 for (
const auto &
Op : AN->
ops().drop_front(2)) {
7382 {Result.getValue(0), Result.getValue(1)}));
7383 Results.push_back(Result.getValue(2));
7386void NVPTXTargetLowering::ReplaceNodeResults(
7388 switch (
N->getOpcode()) {
7404 case NVPTXISD::ProxyReg:
7420 if (Ty->isHalfTy() && STI.getSmVersion() >= 70 &&
7421 STI.getPTXVersion() >= 63)
7423 if (Ty->isBFloatTy() && STI.getSmVersion() >= 90 &&
7424 STI.getPTXVersion() >= 78)
7426 if (Ty->isFloatTy())
7428 if (Ty->isDoubleTy() && STI.hasAtomAddF64())
7434 assert(Ty->isIntegerTy() &&
"Ty should be integer at this point");
7454 if (STI.hasAtomBitwise64())
7475 if (STI.hasAtomMinMax64())
7520 ->getBitWidth() < STI.getMinCmpXchgSizeInBits()) ||
7551 STI.getMinCmpXchgSizeInBits())
7574 assert(SSID.has_value() &&
"Expected an atomic operation");
7598 assert(SSID.has_value() &&
"Expected an atomic operation");
7602 ->getBitWidth() < STI.getMinCmpXchgSizeInBits()
7628 case ISD::VP_FP_TO_UINT:
7630 return ISD::VP_FP_TO_SINT;
7651 unsigned Mode =
Op.getConstantOperandVal(3);
7661 "PRMT must have i32 operands");
7670 KnownBits Byte = BitField.extractBits(8, Idx * 8);
7681 auto ExtType = LD->getConstantOperandVal(LD->getNumOperands() - 1);
7686 auto DestVT = LD->getValueType(0);
7687 if (DestVT.isVector())
7700 switch (
Op.getOpcode()) {
7701 case NVPTXISD::PRMT:
7727 APInt &Src = Idx < 4 ? DemandedLHS : DemandedRHS;
7728 unsigned ByteStart = (Idx % 4) * 8;
7730 Src.
setBit(ByteStart + 7);
7732 Src.setBits(ByteStart, ByteStart + 8);
7735 return {DemandedLHS, DemandedRHS};
7765 const unsigned LeadingBytes =
DemandedBits.countLeadingZeros() / 8;
7766 const unsigned SelBits = (4 - LeadingBytes) * 4;
7767 if (Selector.
getLoBits(SelBits) ==
APInt(32, 0x3210).getLoBits(SelBits))
7769 if (Selector.
getLoBits(SelBits) ==
APInt(32, 0x7654).getLoBits(SelBits))
7782 if ((DemandedOp0 && DemandedOp0 != Op0) ||
7783 (DemandedOp1 && DemandedOp1 != Op1)) {
7784 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
7785 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
7797 switch (
Op.getOpcode()) {
7798 case NVPTXISD::PRMT:
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformADDCombineWithOperands - Try DAG combinations for an ADD with operands N0 and N1.
static SDValue PerformADDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
static SDValue PerformVSELECTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue PerformMULCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
static SDValue PerformBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformBUILD_VECTORCombine - Target-specific dag combine xforms for ISD::BUILD_VECTOR.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file contains the declarations of entities that describe floating point environment and related ...
static bool IsIndirectCall(const MachineInstr *MI)
Module.h This file contains the declarations for the Module class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first DebugLoc that has line number information, given a range of instructions.
Register const TargetRegisterInfo * TRI
NVPTX address space definition.
static SDValue reportInvalidTensormapReplaceUsage(SDValue Op, SelectionDAG &DAG, unsigned Val)
static bool shouldConvertToIndirectCall(const CallBase *CB, const GlobalAddressSDNode *Func)
static SDValue combineADDRSPACECAST(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static cl::opt< bool > sched4reg("nvptx-sched4reg", cl::desc("NVPTX Specific: schedule for register pressue"), cl::init(false))
static SDValue lowerTcgen05St(SDValue Op, SelectionDAG &DAG, bool hasOffset=false)
static SDValue PerformEXTRACTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static cl::opt< NVPTX::DivPrecisionLevel > UsePrecDivF32("nvptx-prec-divf32", cl::Hidden, cl::desc("NVPTX Specific: Override the precision of the lowering for f32 fdiv"), cl::values(clEnumValN(NVPTX::DivPrecisionLevel::Approx, "0", "Use div.approx"), clEnumValN(NVPTX::DivPrecisionLevel::Full, "1", "Use div.full"), clEnumValN(NVPTX::DivPrecisionLevel::IEEE754, "2", "Use IEEE Compliant F32 div.rnd if available (default)"), clEnumValN(NVPTX::DivPrecisionLevel::IEEE754_NoFTZ, "3", "Use IEEE Compliant F32 div.rnd if available, no FTZ")), cl::init(NVPTX::DivPrecisionLevel::IEEE754))
static bool isConstOne(const SDValue &Operand)
static cl::opt< unsigned > FMAContractLevelOpt("nvptx-fma-level", cl::Hidden, cl::desc("NVPTX Specific: FMA contraction (0: don't do it" " 1: do it 2: do it aggressively"), cl::init(2))
static bool IsPTXVectorType(MVT VT)
static SDValue PerformSELECTShiftCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
Transform patterns like: (select (ugt shift_amt, BitWidth-1), 0, (srl/shl x, shift_amt)) (select (ult...
static SDValue lowerLOADi1(LoadSDNode *LD, SelectionDAG &DAG)
static SDValue lowerIntrinsicVoid(SDValue Op, SelectionDAG &DAG)
static MachinePointerInfo refinePtrAS(SDValue &Ptr, SelectionDAG &DAG, const DataLayout &DL, const TargetLowering &TL)
static SDValue lowerROT(SDValue Op, SelectionDAG &DAG)
static void ComputePTXValueVTs(const TargetLowering &TLI, const DataLayout &DL, LLVMContext &Ctx, CallingConv::ID CallConv, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > &Offsets, uint64_t StartingOffset=0)
ComputePTXValueVTs - For the given Type Ty, returns the set of primitive legal-ish MVTs that compose ...
static void ReplaceBITCAST(SDNode *Node, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)
static void replaceAtomicSwap128(SDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI, SmallVectorImpl< SDValue > &Results)
static unsigned getMinMax3Opcode(unsigned MinMax2Opcode)
Get 3-input version of a 2-input min/max opcode.
static SDValue lowerSTOREVector(SDValue Op, SelectionDAG &DAG, const NVPTXSubtarget &STI)
static SDValue lowerLoadVector(SDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI)
static void replaceProxyReg(SDNode *N, SelectionDAG &DAG, const TargetLowering &TLI, SmallVectorImpl< SDValue > &Results)
static void ReplaceCopyFromReg_128(SDNode *N, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)
#define TCGEN05_LD_RED_INST(SHAPE, NUM, TYPE)
static SDValue lowerCTLZCTPOP(SDValue Op, SelectionDAG &DAG)
static SDValue combineMADConstOne(SDValue X, SDValue Add, EVT VT, SDLoc DL, TargetLowering::DAGCombinerInfo &DCI)
static unsigned getTcgen05LdRedID(Intrinsic::ID IID)
static SDValue combinePRMT(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
static SDValue combinePackingMovIntoStore(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, unsigned Front, unsigned Back)
Fold packing movs into a store.
static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)
static SDValue getBuildVectorizedValue(unsigned N, const SDLoc &dl, SelectionDAG &DAG, T GetElement)
static Align getArgumentAlignment(const CallBase *CB, Type *Ty, unsigned Idx, const DataLayout &DL)
static SDValue getExtractVectorizedValue(SDValue V, unsigned I, EVT VT, const SDLoc &dl, SelectionDAG &DAG)
static unsigned canMergeParamLoadStoresStartingAt(unsigned Idx, uint32_t AccessSize, const SmallVectorImpl< EVT > &ValueVTs, const SmallVectorImpl< T > &Offsets, Align ParamAlignment)
static EVT getVectorizedVT(EVT VT, unsigned N, LLVMContext &C)
static SDValue lowerIntrinsicWOChain(SDValue Op, SelectionDAG &DAG)
static SDValue PerformFMinMaxCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, unsigned PTXVersion, unsigned SmVersion)
PerformFMinMaxCombine - Combine (fmaxnum (fmaxnum a, b), c) into (fmaxnum3 a, b, c).
static SDValue combineMulWide(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
static SDValue PerformFADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
static std::optional< unsigned > getScalar3OpcodeForReduction(unsigned ReductionOpcode)
Get 3-input scalar reduction opcode.
static SDValue lowerIntrinsicWChain(SDValue Op, SelectionDAG &DAG)
static bool isNonCoalescableBuildVector(const SDValue &BV)
Check if a v2f32 BUILD_VECTOR provably packs values from non-adjacent register pairs (non-coalescable...
static SDValue PerformScalarizeV2F32Op(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
Scalarize a v2f32 arithmetic node (FADD, FMUL, FSUB, FMA) when at least one operand is a BUILD_VECTOR...
static bool isConstZero(const SDValue &Operand)
static unsigned getF16SubOpc(Intrinsic::ID AddIntrinsicID)
static SDValue LowerVectorArith(SDValue Op, SelectionDAG &DAG)
static SDValue LowerTcgen05MMADisableOutputLane(SDValue Op, SelectionDAG &DAG)
static bool IsMulWideOperandDemotable(SDValue Op, unsigned OptSize, OperandSignedness &S)
IsMulWideOperandDemotable - Checks if the provided DAG node is an operand that can be demoted to OptS...
static unsigned getTcgen05MMADisableOutputLane(unsigned IID)
static std::pair< APInt, APInt > getPRMTDemandedBits(const APInt &SelectorVal, const APInt &DemandedBits)
static APInt computePRMT(APInt A, APInt B, APInt Selector, unsigned Mode)
static ISD::NodeType getScalarOpcodeForReduction(unsigned ReductionOpcode)
static SDValue PerformREMCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
static SDValue lowerBSWAP(SDValue Op, SelectionDAG &DAG)
static SDValue lowerMSTORE(SDValue Op, SelectionDAG &DAG)
static SDValue PerformMULCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI)
static void computeKnownBitsForPRMT(const SDValue Op, KnownBits &Known, const SelectionDAG &DAG, unsigned Depth)
static SDValue combineUnpackingMovIntoLoad(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
Fold unpacking movs into a load by increasing the number of return values.
#define TCGEN05_LD_RED_INTR(SHAPE, NUM, TYPE)
static SDValue lowerTensormapReplaceElemtype(SDValue Op, SelectionDAG &DAG)
static SDValue LowerClusterLaunchControlQueryCancel(SDValue Op, SelectionDAG &DAG)
static std::optional< std::pair< SDValue, SDValue > > lowerTcgen05Ld(SDNode *N, SelectionDAG &DAG, bool HasOffset=false)
static SDValue lowerCvtRSIntrinsics(SDValue Op, SelectionDAG &DAG)
static std::optional< std::pair< SDValue, SDValue > > replaceLoadVector(SDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI)
replaceLoadVector - Convert vector loads into multi-output scalar loads.
static SDValue expandFSH64(SDValue A, SDValue B, SDValue ShiftAmount, SDLoc DL, unsigned Opcode, SelectionDAG &DAG)
static bool AreMulWideOperandsDemotable(SDValue LHS, SDValue RHS, unsigned OptSize, bool &IsSigned)
AreMulWideOperandsDemotable - Checks if the given LHS and RHS operands can be demoted to OptSize bits...
static std::pair< MemSDNode *, uint32_t > convertMLOADToLoadWithUsedBytesMask(MemSDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI)
static SDValue TryMULWIDECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
TryMULWIDECombine - Attempt to replace a multiply of M bits with a multiply of M/2 bits that produces...
static SDValue lowerPrmtIntrinsic(SDValue Op, SelectionDAG &DAG)
static SDValue combineMulSelectConstOne(SDValue X, SDValue Select, EVT VT, SDLoc DL, TargetLowering::DAGCombinerInfo &DCI)
static SDValue buildTreeReduction(const SmallVector< SDValue > &Elements, EVT EltTy, ArrayRef< std::pair< unsigned, unsigned > > Ops, const SDLoc &DL, const SDNodeFlags Flags, SelectionDAG &DAG)
Reduces the elements using the scalar operations provided.
static SDValue combineProxyReg(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SmallVector< unsigned, 16 > VectorizePTXValueVTs(const SmallVectorImpl< EVT > &ValueVTs, const SmallVectorImpl< T > &Offsets, Align ParamAlignment, bool IsVAArg=false)
static SDValue getPRMT(SDValue A, SDValue B, SDValue Selector, SDLoc DL, SelectionDAG &DAG, unsigned Mode=NVPTX::PTXPrmtMode::NONE)
static SDValue matchMADConstOnePattern(SDValue Add)
static SDValue correctParamType(SDValue V, EVT ExpectedVT, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, SDLoc dl)
static ISD::NodeType getExtOpcode(const ISD::ArgFlagsTy &Flags)
static cl::opt< bool > UsePrecSqrtF32("nvptx-prec-sqrtf32", cl::Hidden, cl::desc("NVPTX Specific: 0 use sqrt.approx, 1 use sqrt.rn."), cl::init(true))
static void computeKnownBitsForLoadV(const SDValue Op, KnownBits &Known)
static APInt getPRMTSelector(const APInt &Selector, unsigned Mode)
static EVT promoteScalarIntegerPTX(const EVT VT)
PromoteScalarIntegerPTX Used to make sure the arguments/returns are suitable for passing and promote ...
static std::optional< std::tuple< SDValue, SDValue, SDValue > > lowerTcgen05LdRed(SDNode *N, SelectionDAG &DAG)
static SDValue simplifyDemandedBitsForPRMT(SDValue PRMT, const APInt &DemandedBits, SelectionDAG &DAG, const TargetLowering &TLI, unsigned Depth)
static SDValue lowerFREM(SDValue Op, SelectionDAG &DAG)
static SDValue canonicalizePRMTInput(SDValue Op, SelectionDAG &DAG)
static SDValue sinkProxyReg(SDValue R, SDValue Chain, TargetLowering::DAGCombinerInfo &DCI)
static SDValue lowerFSH(SDValue Op, SelectionDAG &DAG)
static SDValue lowerTensormapReplaceSwizzleMode(SDValue Op, SelectionDAG &DAG)
static SDValue combineIntrinsicWOChain(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const NVPTXSubtarget &STI)
static SDValue PromoteBinOpToF32(SDNode *N, SelectionDAG &DAG)
static SDValue PerformSETCCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, unsigned int SmVersion)
static std::optional< std::pair< unsigned int, MVT > > getVectorLoweringShape(EVT VectorEVT, const NVPTXSubtarget &STI, unsigned AddressSpace)
static SDValue combineF16AddWithNeg(SDNode *N, SelectionDAG &DAG, Intrinsic::ID AddIntrinsicID)
static cl::opt< bool > UseApproxLog2F32("nvptx-approx-log2f32", cl::desc("NVPTX Specific: whether to use lg2.approx for log2"), cl::init(false))
Whereas CUDA's implementation (see libdevice) uses ex2.approx for exp2(), it does NOT use lg2....
static SDValue lowerSELECT(SDValue Op, SelectionDAG &DAG)
static SDValue combineLOAD(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const NVPTXSubtarget &STI)
static SDValue combineSTORE(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const NVPTXSubtarget &STI)
static SDValue PerformSHLCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
PerformSHLCombine - Runs PTX-specific DAG combine patterns on SHL nodes.
MachineInstr unsigned OpIdx
const SmallVectorImpl< MachineOperand > & Cond
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
Contains matchers for matching SelectionDAG nodes and values.
This file defines the SmallVector class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
This file describes how to lower LLVM code to machine code.
static const fltSemantics & IEEEsingle()
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
Class for arbitrary precision integers.
LLVM_ABI APInt getLoBits(unsigned numBits) const
Compute an APInt containing numBits lowbits from this APInt.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
LLVM_ABI APInt getHiBits(unsigned numBits) const
Compute an APInt containing numBits highbits from this APInt.
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
bool slt(const APInt &RHS) const
Signed less than comparison.
LLVM_ABI APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
bool sge(const APInt &RHS) const
Signed greater or equal comparison.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
an instruction that atomically reads a memory location, combines it with another value,...
@ Min
*p = old <signed v ? old : v
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ UMax
*p = old >unsigned v ? old : v
@ UDecWrap
Decrement one until a minimum value or zero.
bool isFloatingPointOperation() const
BinOp getOperation() const
This is an SDNode representing atomic operations.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
FunctionType * getFunctionType() const
const APInt & getAPIntValue() const
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
LLVM_ABI TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
LLVM_ABI Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
Diagnostic information for unsupported feature in backend.
void addFnAttr(Attribute::AttrKind Kind)
Add function attributes to this function.
Module * getParent()
Get the module that this global value is contained inside of...
Common base class shared among various IRBuilders.
This is an important class for using LLVM in a threaded context.
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
This class is used to represent ISD::LOAD nodes.
MCSection * getDataSection() const
static constexpr unsigned NoRegister
Instances of this class represent a uniqued identifier for a section in the current translation unit.
StringRef getName() const
getName - Get the symbol name.
static auto integer_fixedlen_vector_valuetypes()
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static auto fixedlen_vector_valuetypes()
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static MVT getVectorVT(MVT VT, unsigned NumElements)
MVT getVectorElementType() const
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
MVT getScalarType() const
If this is a vector, return the element type, otherwise return this.
static auto fp_fixedlen_vector_valuetypes()
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
Function & getFunction()
Return the LLVM function that this machine code represents.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
@ EK_Inline
EK_Inline - Jump table entries are emitted inline at their point of use.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
This SDNode is used for target intrinsics that touch memory and need an associated MachineMemOperand.
This is an abstract virtual class for memory operations.
MachineMemOperand * getMemOperand() const
Return the unique MachineMemOperand object describing the memory reference performed by operation.
EVT getMemoryVT() const
Return the type of the in-memory value.
A Module instance is used to store all the information related to an LLVM module.
static unsigned getFromTypeWidthForLoad(const MemSDNode *Mem)
bool hasTensormapReplaceSwizzleModeSupport(unsigned value) const
bool hasUsedBytesMaskPragma() const
bool hasTensormapReplaceElemtypeSupport(unsigned value) const
bool hasAtomSwap128() const
bool hasF32x2Instructions() const
bool has256BitVectorLoadStore(unsigned AS) const
AtomicOrdering atomicOperationOrderAfterFenceSplit(const Instruction *I) const override
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint letter, return the type of constraint it is for this target.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
const NVPTXTargetMachine * nvTM
bool SimplifyDemandedBitsForTargetNode(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0) const override
Attempt to simplify any target nodes based on the demanded bits/elts, returning true on success.
AtomicExpansionKind shouldExpandAtomicRMWInIR(const AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
NVPTXTargetLowering(const NVPTXTargetMachine &TM, const NVPTXSubtarget &STI)
std::string getPrototype(const DataLayout &DL, Type *, const ArgListTy &, const SmallVectorImpl< ISD::OutputArg > &, std::optional< unsigned > FirstVAArg, const CallBase &CB, unsigned UniqueCallSite) const
unsigned getPreferredFPToIntOpcode(unsigned Op, EVT FromVT, EVT ToVT) const override
bool useF32FTZ(const MachineFunction &MF) const
SDValue LowerSTACKSAVE(SDValue Op, SelectionDAG &DAG) const
SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, int &ExtraSteps, bool &UseOneConst, bool Reciprocal) const override
Hooks for building estimates in place of slower divisions and square roots.
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &dl, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const
Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
std::string getParamName(const Function *F, int Idx) const
TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
Return the preferred vector type legalization action.
NVPTX::DivPrecisionLevel getDivF32Level(const MachineFunction &MF, const SDNode &N) const
bool shouldInsertFencesForAtomic(const Instruction *) const override
Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic.
SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx, EVT VT) const override
Return the ValueType of the result of SETCC operations.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
Inserts in the IR a target-specific intrinsic specifying a fence.
void getTgtMemIntrinsic(SmallVectorImpl< IntrinsicInfo > &Infos, const CallBase &I, MachineFunction &MF, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
bool allowFMA(MachineFunction &MF, CodeGenOptLevel OptLevel) const
bool usePrecSqrtF32(const SDNode *N=nullptr) const
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
SDValue LowerCall(CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
MCSection * SelectSectionForGlobal(const GlobalObject *GO, SectionKind Kind, const TargetMachine &TM) const override
~NVPTXTargetObjectFile() override
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
const DebugLoc & getDebugLoc() const
Represents one node in the SelectionDAG.
ArrayRef< SDUse > ops() const
const APInt & getAsAPIntVal() const
Helper method returns the APInt value of a ConstantSDNode.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool hasOneUse() const
Return true if there is exactly one use of this node.
unsigned getIROrder() const
Return the node ordering.
SDNodeFlags getFlags() const
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
SDVTList getVTList() const
const SDValue & getOperand(unsigned Num) const
bool isUndef() const
Returns true if the node type is UNDEF or POISON.
iterator_range< user_iterator > users()
void setFlags(SDNodeFlags NewFlags)
Represents a use of a SDNode.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
uint64_t getScalarValueSizeInBits() const
uint64_t getConstantOperandVal(unsigned i) const
unsigned getOpcode() const
SectionKind - This is a simple POD value that classifies the properties of a section.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
LLVM_ABI SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)
Return an AddrSpaceCastSDNode.
const TargetSubtargetInfo & getSubtarget() const
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
LLVM_ABI SDValue getFreeze(SDValue V)
Return a freeze using the SDLoc of the value operand.
LLVM_ABI SDValue getSymbolFunctionGlobalAddress(SDValue Op, Function **TargetFunction=nullptr)
Return a GlobalAddress of the function from the current module with name matching the given ExternalS...
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
LLVM_ABI SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=LocationSize::precise(0), const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false, SDNodeFlags Flags={})
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
LLVM_ABI Align getEVTAlign(EVT MemoryVT) const
Compute the default alignment value for the given type.
LLVM_ABI SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
LLVM_ABI SDNode * MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs, ArrayRef< SDValue > Ops)
This mutates the specified node to have the specified return type, opcode, and operands.
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
const DataLayout & getDataLayout() const
LLVM_ABI SDValue getTokenFactor(const SDLoc &DL, SmallVectorImpl< SDValue > &Vals)
Creates a new TokenFactor containing Vals.
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
LLVM_ABI SDValue getExternalSymbol(const char *Sym, EVT VT)
LLVM_ABI SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVM_ABI SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
MachineFunction & getMachineFunction() const
LLVM_ABI KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
ArrayRef< int > getMask() const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
constexpr size_t size() const
size - Get the string size.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
void setMaxDivRemBitWidthSupported(unsigned SizeInBits)
Set the size in bits of the maximum div/rem the backend supports.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
Convenience method to set an operation to Promote and specify the type in a single call.
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth)
Tells the code generator which bitwidths to bypass.
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const
Return the preferred vector type legalization action.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrNegativeOneBooleanContent
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
Align getMinStackArgumentAlignment() const
Return the minimum stack alignment of an argument.
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
std::vector< ArgListEntry > ArgListTy
virtual Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const
virtual Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const
Inserts in the IR a target-specific intrinsic specifying a fence.
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
void setJumpIsExpensive(bool isExpensive=true)
Tells the code generator not to expand logic operations on comparison predicates into separate sequen...
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth=0) const
More limited version of SimplifyDemandedBits that can be used to "lookthrough" ops that don't contrib...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
TargetLowering(const TargetLowering &)=delete
SDValue expandRoundInexactToOdd(EVT ResultVT, SDValue Op, const SDLoc &DL, SelectionDAG &DAG) const
Truncate Op to ResultVT.
SDValue expandFP_ROUND(SDNode *Node, SelectionDAG &DAG) const
Expand round(fp) to fp conversion.
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
Primary interface to the complete machine description for the target machine.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
MCSymbol * getSymbol(const GlobalValue *GV) const
FPOpFusion::FPOpFusionMode AllowFPOpFusion
AllowFPOpFusion - This flag is set by the -fp-contract=xxx option.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetFrameLowering * getFrameLowering() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isVoidTy() const
Return true if this is 'void'.
Type * getType() const
All values are typed, get the type of this value.
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_ABI APInt pow(const APInt &X, int64_t N)
Compute X^N for N>=0.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ POISON
POISON - A poison node.
@ MLOAD
Masked load and store - consecutive vector load and store operations with additional mask operand tha...
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
@ FADD
Simple binary floating point operators.
@ VECREDUCE_FMAXIMUM
FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the llvm.minimum and llvm....
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ SIGN_EXTEND
Conversion operators.
@ READSTEADYCOUNTER
READSTEADYCOUNTER - This corresponds to the readfixedcounter intrinsic.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ SSUBO
Same for subtraction.
@ BRIND
BRIND - Indirect branch.
@ BR_JT
BR_JT - Jumptable branch.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ UNDEF
UNDEF - An undefined node.
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum maximum on two values, following IEEE-754 definition...
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
@ BF16_TO_FP
BF16_TO_FP, FP_TO_BF16 - These operators are used to perform promotions and truncation for bfloat16.
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ ADDRSPACECAST
ADDRSPACECAST - This operator converts between pointers of different address spaces.
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
@ SADDO_CARRY
Carry-using overflow-aware nodes for multiple precision addition and subtraction.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
LLVM_ABI bool allOperandsUndef(const SDNode *N)
Return true if the node has at least one operand and all operands of the specified node are ISD::UNDE...
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
LLVM_ABI StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
@ Bitcast
Perform the operation on a different, but equivalently sized type.
@ ADDRESS_SPACE_SHARED_CLUSTER
@ ATOMIC_CMP_SWAP_B128
These nodes are used to lower atomic instructions with i128 type.
bool isPackedVectorTy(EVT VT)
match_combine_or< CastInst_match< OpTy, TruncInst >, OpTy > m_TruncOrSelf(const OpTy &Op)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
NodeAddr< NodeBase * > Node
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
FunctionAddr VTableAddr Value
bool shouldEmitPTXNoReturn(const Value *V, const TargetMachine &TM)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
MaybeAlign getAlign(const CallInst &I, unsigned Index)
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs=nullptr, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
uint64_t PowerOf2Ceil(uint64_t A)
Returns the power of two which is greater than or equal to the given value.
bool isReleaseOrStronger(AtomicOrdering AO)
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
auto reverse(ContainerTy &&C)
std::optional< SyncScope::ID > getAtomicSyncScopeID(const Instruction *I)
A helper function that returns an atomic operation's sync scope; returns std::nullopt if it is not an...
unsigned promoteScalarArgumentSize(unsigned size)
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
bool shouldPassAsArray(Type *Ty)
CodeGenOptLevel
Code generation optimization level.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
AtomicOrdering
Atomic ordering for LLVM's memory model.
Align getFunctionByValParamAlign(const Function *F, Type *ArgTy, Align InitialAlign, const DataLayout &DL)
@ Sub
Subtraction of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
bool isParamGridConstant(const Argument &Arg)
bool isAcquireOrStronger(AtomicOrdering AO)
constexpr unsigned BitWidth
bool isKernelFunction(const Function &F)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Function * getMaybeBitcastedCallee(const CallBase *CB)
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Align getFunctionArgumentAlignment(const Function *F, Type *Ty, unsigned Idx, const DataLayout &DL)
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
Align getFunctionParamOptimizedAlign(const Function *F, Type *ArgTy, const DataLayout &DL)
Since function arguments are passed via .param space, we may want to increase their alignment in a wa...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
EVT changeTypeToInteger() const
Return the type converted to an equivalently sized integer or vector with integer element type.
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
bool is32BitVector() const
Return true if this is a 32-bit vector type.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
uint64_t getScalarSizeInBits() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsEq(EVT VT) const
Return true if this has the same number of bits as VT.
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
EVT changeElementType(LLVMContext &Context, EVT EltVT) const
Return a VT for a type whose attributes match ourselves with the exception of the element type that i...
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool isInteger() const
Return true if this is an integer or a vector integer type.
static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
KnownBits concat(const KnownBits &Lo) const
Concatenate the bits from Lo onto the bottom of *this.
unsigned getBitWidth() const
Get the bit width of this value.
void resetAll()
Resets the known state of all bits.
void insertBits(const KnownBits &SubBits, unsigned BitPosition)
Insert the bits from a smaller known bits starting at bitPosition.
This class contains a discriminated union of information about pointers in memory operands,...
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
These are IR-level optimization flags that may be propagated to SDNodes.
bool hasAllowContract() const
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
Type * RetTy
Same as OrigRetTy, or partially legalized for soft float libcalls.
bool isAfterLegalizeDAG() const
bool isBeforeLegalize() const
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
bool CombineTo(SDValue O, SDValue N)