41#define GEN_CHECK_COMPRESS_INSTR
42#include "RISCVGenCompressInstEmitter.inc"
44#define GET_INSTRINFO_CTOR_DTOR
45#include "RISCVGenInstrInfo.inc"
47#define DEBUG_TYPE "riscv-instr-info"
49 "Number of registers within vector register groups spilled");
51 "Number of registers within vector register groups reloaded");
55 cl::desc(
"Prefer whole register move for vector registers."));
58 "riscv-force-machine-combiner-strategy",
cl::Hidden,
59 cl::desc(
"Force machine combiner to use a specific strategy for machine "
60 "trace metrics evaluation."),
65 "MinInstrCount strategy.")));
71#define GET_RISCVVPseudosTable_IMPL
72#include "RISCVGenSearchableTables.inc"
78#define GET_RISCVMaskedPseudosTable_IMPL
79#include "RISCVGenSearchableTables.inc"
85 RISCV::ADJCALLSTACKUP),
88#define GET_INSTRINFO_HELPERS
89#include "RISCVGenInstrInfo.inc"
92 if (
STI.hasStdExtZca())
101 int &FrameIndex)
const {
111 case RISCV::VL1RE8_V:
112 case RISCV::VL1RE16_V:
113 case RISCV::VL1RE32_V:
114 case RISCV::VL1RE64_V:
117 case RISCV::VL2RE8_V:
118 case RISCV::VL2RE16_V:
119 case RISCV::VL2RE32_V:
120 case RISCV::VL2RE64_V:
123 case RISCV::VL4RE8_V:
124 case RISCV::VL4RE16_V:
125 case RISCV::VL4RE32_V:
126 case RISCV::VL4RE64_V:
129 case RISCV::VL8RE8_V:
130 case RISCV::VL8RE16_V:
131 case RISCV::VL8RE32_V:
132 case RISCV::VL8RE64_V:
140 switch (
MI.getOpcode()) {
164 case RISCV::VL1RE8_V:
165 case RISCV::VL2RE8_V:
166 case RISCV::VL4RE8_V:
167 case RISCV::VL8RE8_V:
168 if (!
MI.getOperand(1).isFI())
170 FrameIndex =
MI.getOperand(1).getIndex();
173 return MI.getOperand(0).getReg();
176 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
177 MI.getOperand(2).getImm() == 0) {
178 FrameIndex =
MI.getOperand(1).getIndex();
179 return MI.getOperand(0).getReg();
186 int &FrameIndex)
const {
194 switch (
MI.getOpcode()) {
219 if (!
MI.getOperand(1).isFI())
221 FrameIndex =
MI.getOperand(1).getIndex();
224 return MI.getOperand(0).getReg();
227 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
228 MI.getOperand(2).getImm() == 0) {
229 FrameIndex =
MI.getOperand(1).getIndex();
230 return MI.getOperand(0).getReg();
240 case RISCV::VFMV_V_F:
243 case RISCV::VFMV_S_F:
245 return MI.getOperand(1).isUndef();
253 return DstReg > SrcReg && (DstReg - SrcReg) < NumRegs;
264 assert(
MBBI->getOpcode() == TargetOpcode::COPY &&
265 "Unexpected COPY instruction.");
269 bool FoundDef =
false;
270 bool FirstVSetVLI =
false;
271 unsigned FirstSEW = 0;
274 if (
MBBI->isMetaInstruction())
277 if (RISCVInstrInfo::isVectorConfigInstr(*
MBBI)) {
287 unsigned FirstVType =
MBBI->getOperand(2).getImm();
292 if (FirstLMul != LMul)
297 if (!RISCVInstrInfo::isVLPreservingConfig(*
MBBI))
303 unsigned VType =
MBBI->getOperand(2).getImm();
321 }
else if (
MBBI->isInlineAsm() ||
MBBI->isCall()) {
323 }
else if (
MBBI->getNumDefs()) {
326 if (
MBBI->modifiesRegister(RISCV::VL,
nullptr))
332 if (!MO.isReg() || !MO.isDef())
334 if (!FoundDef &&
TRI->regsOverlap(MO.getReg(), SrcReg)) {
349 if (MO.getReg() != SrcReg)
390 uint16_t SrcEncoding =
TRI->getEncodingValue(SrcReg);
391 uint16_t DstEncoding =
TRI->getEncodingValue(DstReg);
393 assert(!Fractional &&
"It is impossible be fractional lmul here.");
394 unsigned NumRegs = NF * LMulVal;
400 SrcEncoding += NumRegs - 1;
401 DstEncoding += NumRegs - 1;
407 unsigned,
unsigned> {
415 uint16_t Diff = DstEncoding - SrcEncoding;
416 if (
I + 8 <= NumRegs && Diff >= 8 && SrcEncoding % 8 == 7 &&
417 DstEncoding % 8 == 7)
419 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
420 if (
I + 4 <= NumRegs && Diff >= 4 && SrcEncoding % 4 == 3 &&
421 DstEncoding % 4 == 3)
423 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
424 if (
I + 2 <= NumRegs && Diff >= 2 && SrcEncoding % 2 == 1 &&
425 DstEncoding % 2 == 1)
427 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
430 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
435 if (
I + 8 <= NumRegs && SrcEncoding % 8 == 0 && DstEncoding % 8 == 0)
437 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
438 if (
I + 4 <= NumRegs && SrcEncoding % 4 == 0 && DstEncoding % 4 == 0)
440 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
441 if (
I + 2 <= NumRegs && SrcEncoding % 2 == 0 && DstEncoding % 2 == 0)
443 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
446 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
449 while (
I != NumRegs) {
454 auto [LMulCopied, RegClass,
Opc, VVOpc, VIOpc] =
455 GetCopyInfo(SrcEncoding, DstEncoding);
459 if (LMul == LMulCopied &&
462 if (DefMBBI->getOpcode() == VIOpc)
469 RegClass, ReversedCopy ? (SrcEncoding - NumCopied + 1) : SrcEncoding);
471 RegClass, ReversedCopy ? (DstEncoding - NumCopied + 1) : DstEncoding);
479 MIB = MIB.add(DefMBBI->getOperand(2));
487 MIB.addImm(Log2SEW ? Log2SEW : 3);
499 SrcEncoding += (ReversedCopy ? -NumCopied : NumCopied);
500 DstEncoding += (ReversedCopy ? -NumCopied : NumCopied);
509 bool RenamableDest,
bool RenamableSrc)
const {
513 if (RISCV::GPRRegClass.
contains(DstReg, SrcReg)) {
520 if (RISCV::GPRF16RegClass.
contains(DstReg, SrcReg)) {
526 if (RISCV::GPRF32RegClass.
contains(DstReg, SrcReg)) {
532 if (RISCV::GPRPairRegClass.
contains(DstReg, SrcReg)) {
534 if (
STI.hasStdExtZdinx()) {
543 if (
STI.hasStdExtP()) {
552 MCRegister EvenReg =
TRI->getSubReg(SrcReg, RISCV::sub_gpr_even);
553 MCRegister OddReg =
TRI->getSubReg(SrcReg, RISCV::sub_gpr_odd);
555 if (OddReg == RISCV::DUMMY_REG_PAIR_WITH_X0)
557 assert(DstReg != RISCV::X0_Pair &&
"Cannot write to X0_Pair");
561 TRI->getSubReg(DstReg, RISCV::sub_gpr_even))
562 .
addReg(EvenReg, KillFlag)
565 TRI->getSubReg(DstReg, RISCV::sub_gpr_odd))
572 if (RISCV::VCSRRegClass.
contains(SrcReg) &&
573 RISCV::GPRRegClass.
contains(DstReg)) {
575 .
addImm(RISCVSysReg::lookupSysRegByName(
TRI->getName(SrcReg))->Encoding)
580 if (RISCV::FPR16RegClass.
contains(DstReg, SrcReg)) {
582 if (
STI.hasStdExtZfh()) {
583 Opc = RISCV::FSGNJ_H;
586 (
STI.hasStdExtZfhmin() ||
STI.hasStdExtZfbfmin()) &&
587 "Unexpected extensions");
589 DstReg =
TRI->getMatchingSuperReg(DstReg, RISCV::sub_16,
590 &RISCV::FPR32RegClass);
591 SrcReg =
TRI->getMatchingSuperReg(SrcReg, RISCV::sub_16,
592 &RISCV::FPR32RegClass);
593 Opc = RISCV::FSGNJ_S;
597 .
addReg(SrcReg, KillFlag);
601 if (RISCV::FPR32RegClass.
contains(DstReg, SrcReg)) {
604 .
addReg(SrcReg, KillFlag);
608 if (RISCV::FPR64RegClass.
contains(DstReg, SrcReg)) {
611 .
addReg(SrcReg, KillFlag);
615 if (RISCV::FPR32RegClass.
contains(DstReg) &&
616 RISCV::GPRRegClass.
contains(SrcReg)) {
618 .
addReg(SrcReg, KillFlag);
622 if (RISCV::GPRRegClass.
contains(DstReg) &&
623 RISCV::FPR32RegClass.
contains(SrcReg)) {
625 .
addReg(SrcReg, KillFlag);
629 if (RISCV::FPR64RegClass.
contains(DstReg) &&
630 RISCV::GPRRegClass.
contains(SrcReg)) {
631 assert(
STI.getXLen() == 64 &&
"Unexpected GPR size");
633 .
addReg(SrcReg, KillFlag);
637 if (RISCV::GPRRegClass.
contains(DstReg) &&
638 RISCV::FPR64RegClass.
contains(SrcReg)) {
639 assert(
STI.getXLen() == 64 &&
"Unexpected GPR size");
641 .
addReg(SrcReg, KillFlag);
647 TRI->getCommonMinimalPhysRegClass(SrcReg, DstReg);
658 Register SrcReg,
bool IsKill,
int FI,
667 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
668 Opcode = RegInfo.getRegSizeInBits(RISCV::GPRRegClass) == 32 ? RISCV::SW
670 }
else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
671 Opcode = RISCV::SH_INX;
672 }
else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
673 Opcode = RISCV::SW_INX;
674 }
else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
675 if (!
STI.is64Bit() &&
STI.hasStdExtZilsd() &&
676 Alignment >=
STI.getZilsdAlign()) {
677 Opcode = RISCV::SD_RV32;
679 Opcode = RISCV::PseudoRV32ZdinxSD;
681 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
683 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
685 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
687 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
688 Opcode = RISCV::VS1R_V;
689 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
690 Opcode = RISCV::VS2R_V;
691 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
692 Opcode = RISCV::VS4R_V;
693 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
694 Opcode = RISCV::VS8R_V;
695 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
696 Opcode = RISCV::PseudoVSPILL2_M1;
697 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
698 Opcode = RISCV::PseudoVSPILL2_M2;
699 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
700 Opcode = RISCV::PseudoVSPILL2_M4;
701 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
702 Opcode = RISCV::PseudoVSPILL3_M1;
703 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
704 Opcode = RISCV::PseudoVSPILL3_M2;
705 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
706 Opcode = RISCV::PseudoVSPILL4_M1;
707 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
708 Opcode = RISCV::PseudoVSPILL4_M2;
709 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
710 Opcode = RISCV::PseudoVSPILL5_M1;
711 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
712 Opcode = RISCV::PseudoVSPILL6_M1;
713 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
714 Opcode = RISCV::PseudoVSPILL7_M1;
715 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
716 Opcode = RISCV::PseudoVSPILL8_M1;
759 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
760 Opcode = RegInfo.getRegSizeInBits(RISCV::GPRRegClass) == 32 ? RISCV::LW
762 }
else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
763 Opcode = RISCV::LH_INX;
764 }
else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
765 Opcode = RISCV::LW_INX;
766 }
else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
767 if (!
STI.is64Bit() &&
STI.hasStdExtZilsd() &&
768 Alignment >=
STI.getZilsdAlign()) {
769 Opcode = RISCV::LD_RV32;
771 Opcode = RISCV::PseudoRV32ZdinxLD;
773 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
775 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
777 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
779 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
780 Opcode = RISCV::VL1RE8_V;
781 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
782 Opcode = RISCV::VL2RE8_V;
783 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
784 Opcode = RISCV::VL4RE8_V;
785 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
786 Opcode = RISCV::VL8RE8_V;
787 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
788 Opcode = RISCV::PseudoVRELOAD2_M1;
789 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
790 Opcode = RISCV::PseudoVRELOAD2_M2;
791 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
792 Opcode = RISCV::PseudoVRELOAD2_M4;
793 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
794 Opcode = RISCV::PseudoVRELOAD3_M1;
795 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
796 Opcode = RISCV::PseudoVRELOAD3_M2;
797 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
798 Opcode = RISCV::PseudoVRELOAD4_M1;
799 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
800 Opcode = RISCV::PseudoVRELOAD4_M2;
801 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
802 Opcode = RISCV::PseudoVRELOAD5_M1;
803 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
804 Opcode = RISCV::PseudoVRELOAD6_M1;
805 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
806 Opcode = RISCV::PseudoVRELOAD7_M1;
807 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
808 Opcode = RISCV::PseudoVRELOAD8_M1;
846 if (
Ops.size() != 1 ||
Ops[0] != 1)
849 switch (
MI.getOpcode()) {
851 if (RISCVInstrInfo::isSEXT_W(
MI))
853 if (RISCVInstrInfo::isZEXT_W(
MI))
855 if (RISCVInstrInfo::isZEXT_B(
MI))
862 case RISCV::ZEXT_H_RV32:
863 case RISCV::ZEXT_H_RV64:
870 case RISCV::VMV_X_S: {
873 if (ST.getXLen() < (1U << Log2SEW))
888 case RISCV::VFMV_F_S: {
915 return BuildMI(*
MI.getParent(), InsertPt,
MI.getDebugLoc(),
get(*LoadOpc),
924 return RISCV::PseudoCCLB;
926 return RISCV::PseudoCCLBU;
928 return RISCV::PseudoCCLH;
930 return RISCV::PseudoCCLHU;
932 return RISCV::PseudoCCLW;
934 return RISCV::PseudoCCLWU;
936 return RISCV::PseudoCCLD;
938 return RISCV::PseudoCCQC_E_LB;
939 case RISCV::QC_E_LBU:
940 return RISCV::PseudoCCQC_E_LBU;
942 return RISCV::PseudoCCQC_E_LH;
943 case RISCV::QC_E_LHU:
944 return RISCV::PseudoCCQC_E_LHU;
946 return RISCV::PseudoCCQC_E_LW;
957 if (
MI.getOpcode() != RISCV::PseudoCCMOVGPR)
962 if (!
STI.hasShortForwardBranchILoad() || !PredOpc)
966 if (
Ops.size() != 1 || (
Ops[0] != 1 &&
Ops[0] != 2))
969 bool Invert =
Ops[0] == 2;
978 MI.getDebugLoc(),
get(PredOpc), DestReg);
989 unsigned BCC =
MI.getOperand(
MI.getNumExplicitOperands() - 3).getImm();
995 NewMI.
add({
MI.getOperand(
MI.getNumExplicitOperands() - 2),
996 MI.getOperand(
MI.getNumExplicitOperands() - 1)});
1005 bool DstIsDead)
const {
1021 bool SrcRenamable =
false;
1025 bool LastItem = ++Num == Seq.
size();
1030 switch (Inst.getOpndKind()) {
1040 .
addReg(SrcReg, SrcRegState)
1047 .
addReg(SrcReg, SrcRegState)
1048 .
addReg(SrcReg, SrcRegState)
1054 .
addReg(SrcReg, SrcRegState)
1062 SrcRenamable = DstRenamable;
1072 case RISCV::CV_BEQIMM:
1073 case RISCV::QC_BEQI:
1074 case RISCV::QC_E_BEQI:
1075 case RISCV::NDS_BBC:
1076 case RISCV::NDS_BEQC:
1080 case RISCV::QC_BNEI:
1081 case RISCV::QC_E_BNEI:
1082 case RISCV::CV_BNEIMM:
1083 case RISCV::NDS_BBS:
1084 case RISCV::NDS_BNEC:
1087 case RISCV::QC_BLTI:
1088 case RISCV::QC_E_BLTI:
1091 case RISCV::QC_BGEI:
1092 case RISCV::QC_E_BGEI:
1095 case RISCV::QC_BLTUI:
1096 case RISCV::QC_E_BLTUI:
1099 case RISCV::QC_BGEUI:
1100 case RISCV::QC_E_BGEUI:
1132 "Unknown conditional branch");
1143 case RISCV::QC_MVEQ:
1144 return RISCV::QC_MVNE;
1145 case RISCV::QC_MVNE:
1146 return RISCV::QC_MVEQ;
1147 case RISCV::QC_MVLT:
1148 return RISCV::QC_MVGE;
1149 case RISCV::QC_MVGE:
1150 return RISCV::QC_MVLT;
1151 case RISCV::QC_MVLTU:
1152 return RISCV::QC_MVGEU;
1153 case RISCV::QC_MVGEU:
1154 return RISCV::QC_MVLTU;
1155 case RISCV::QC_MVEQI:
1156 return RISCV::QC_MVNEI;
1157 case RISCV::QC_MVNEI:
1158 return RISCV::QC_MVEQI;
1159 case RISCV::QC_MVLTI:
1160 return RISCV::QC_MVGEI;
1161 case RISCV::QC_MVGEI:
1162 return RISCV::QC_MVLTI;
1163 case RISCV::QC_MVLTUI:
1164 return RISCV::QC_MVGEUI;
1165 case RISCV::QC_MVGEUI:
1166 return RISCV::QC_MVLTUI;
1171 switch (SelectOpc) {
1190 case RISCV::Select_GPR_Using_CC_Imm5_Zibi:
1200 case RISCV::Select_GPR_Using_CC_SImm5_CV:
1205 return RISCV::CV_BEQIMM;
1207 return RISCV::CV_BNEIMM;
1210 case RISCV::Select_GPRNoX0_Using_CC_SImm5NonZero_QC:
1215 return RISCV::QC_BEQI;
1217 return RISCV::QC_BNEI;
1219 return RISCV::QC_BLTI;
1221 return RISCV::QC_BGEI;
1224 case RISCV::Select_GPRNoX0_Using_CC_UImm5NonZero_QC:
1229 return RISCV::QC_BLTUI;
1231 return RISCV::QC_BGEUI;
1234 case RISCV::Select_GPRNoX0_Using_CC_SImm16NonZero_QC:
1239 return RISCV::QC_E_BEQI;
1241 return RISCV::QC_E_BNEI;
1243 return RISCV::QC_E_BLTI;
1245 return RISCV::QC_E_BGEI;
1248 case RISCV::Select_GPRNoX0_Using_CC_UImm16NonZero_QC:
1253 return RISCV::QC_E_BLTUI;
1255 return RISCV::QC_E_BGEUI;
1258 case RISCV::Select_GPR_Using_CC_UImmLog2XLen_NDS:
1263 return RISCV::NDS_BBC;
1265 return RISCV::NDS_BBS;
1268 case RISCV::Select_GPR_Using_CC_UImm7_NDS:
1273 return RISCV::NDS_BEQC;
1275 return RISCV::NDS_BNEC;
1317 case RISCV::QC_BEQI:
1318 return RISCV::QC_BNEI;
1319 case RISCV::QC_BNEI:
1320 return RISCV::QC_BEQI;
1321 case RISCV::QC_BLTI:
1322 return RISCV::QC_BGEI;
1323 case RISCV::QC_BGEI:
1324 return RISCV::QC_BLTI;
1325 case RISCV::QC_BLTUI:
1326 return RISCV::QC_BGEUI;
1327 case RISCV::QC_BGEUI:
1328 return RISCV::QC_BLTUI;
1329 case RISCV::QC_E_BEQI:
1330 return RISCV::QC_E_BNEI;
1331 case RISCV::QC_E_BNEI:
1332 return RISCV::QC_E_BEQI;
1333 case RISCV::QC_E_BLTI:
1334 return RISCV::QC_E_BGEI;
1335 case RISCV::QC_E_BGEI:
1336 return RISCV::QC_E_BLTI;
1337 case RISCV::QC_E_BLTUI:
1338 return RISCV::QC_E_BGEUI;
1339 case RISCV::QC_E_BGEUI:
1340 return RISCV::QC_E_BLTUI;
1348 bool AllowModify)
const {
1349 TBB = FBB =
nullptr;
1354 if (
I ==
MBB.end() || !isUnpredicatedTerminator(*
I))
1360 int NumTerminators = 0;
1361 for (
auto J =
I.getReverse(); J !=
MBB.rend() && isUnpredicatedTerminator(*J);
1364 if (J->getDesc().isUnconditionalBranch() ||
1365 J->getDesc().isIndirectBranch()) {
1372 if (AllowModify && FirstUncondOrIndirectBr !=
MBB.end()) {
1373 while (std::next(FirstUncondOrIndirectBr) !=
MBB.end()) {
1374 std::next(FirstUncondOrIndirectBr)->eraseFromParent();
1377 I = FirstUncondOrIndirectBr;
1381 if (
I->getDesc().isIndirectBranch())
1385 if (
I->isPreISelOpcode())
1389 if (NumTerminators > 2)
1393 if (NumTerminators == 1 &&
I->getDesc().isUnconditionalBranch()) {
1399 if (NumTerminators == 1 &&
I->getDesc().isConditionalBranch()) {
1405 if (NumTerminators == 2 && std::prev(
I)->getDesc().isConditionalBranch() &&
1406 I->getDesc().isUnconditionalBranch()) {
1417 int *BytesRemoved)
const {
1424 if (!
I->getDesc().isUnconditionalBranch() &&
1425 !
I->getDesc().isConditionalBranch())
1431 I->eraseFromParent();
1435 if (
I ==
MBB.begin())
1438 if (!
I->getDesc().isConditionalBranch())
1444 I->eraseFromParent();
1457 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
1459 "RISC-V branch conditions have two components!");
1493 assert(RS &&
"RegScavenger required for long branching");
1495 "new block should be inserted for expanding unconditional branch");
1498 "restore block should be inserted for restoring clobbered registers");
1507 "Branch offsets outside of the signed 32-bit range not supported");
1513 auto II =
MBB.end();
1519 RS->enterBasicBlockEnd(
MBB);
1521 if (
STI.hasStdExtZicfilp())
1522 RC = &RISCV::GPRX7RegClass;
1524 RS->scavengeRegisterBackwards(*RC,
MI.getIterator(),
1528 RS->setRegUsed(TmpGPR);
1533 TmpGPR =
STI.hasStdExtE() ? RISCV::X9 : RISCV::X27;
1535 if (
STI.hasStdExtZicfilp())
1539 if (FrameIndex == -1)
1544 TRI->eliminateFrameIndex(std::prev(
MI.getIterator()),
1547 MI.getOperand(1).setMBB(&RestoreBB);
1551 TRI->eliminateFrameIndex(RestoreBB.
back(),
1561 assert((
Cond.size() == 3) &&
"Invalid branch condition!");
1566 Cond[0].setImm(RISCV::BNE);
1569 Cond[0].setImm(RISCV::BNEI);
1572 Cond[0].setImm(RISCV::BEQ);
1575 Cond[0].setImm(RISCV::BEQI);
1578 Cond[0].setImm(RISCV::BGE);
1581 Cond[0].setImm(RISCV::BLT);
1584 Cond[0].setImm(RISCV::BGEU);
1587 Cond[0].setImm(RISCV::BLTU);
1589 case RISCV::CV_BEQIMM:
1590 Cond[0].setImm(RISCV::CV_BNEIMM);
1592 case RISCV::CV_BNEIMM:
1593 Cond[0].setImm(RISCV::CV_BEQIMM);
1595 case RISCV::QC_BEQI:
1596 Cond[0].setImm(RISCV::QC_BNEI);
1598 case RISCV::QC_BNEI:
1599 Cond[0].setImm(RISCV::QC_BEQI);
1601 case RISCV::QC_BGEI:
1602 Cond[0].setImm(RISCV::QC_BLTI);
1604 case RISCV::QC_BLTI:
1605 Cond[0].setImm(RISCV::QC_BGEI);
1607 case RISCV::QC_BGEUI:
1608 Cond[0].setImm(RISCV::QC_BLTUI);
1610 case RISCV::QC_BLTUI:
1611 Cond[0].setImm(RISCV::QC_BGEUI);
1613 case RISCV::QC_E_BEQI:
1614 Cond[0].setImm(RISCV::QC_E_BNEI);
1616 case RISCV::QC_E_BNEI:
1617 Cond[0].setImm(RISCV::QC_E_BEQI);
1619 case RISCV::QC_E_BGEI:
1620 Cond[0].setImm(RISCV::QC_E_BLTI);
1622 case RISCV::QC_E_BLTI:
1623 Cond[0].setImm(RISCV::QC_E_BGEI);
1625 case RISCV::QC_E_BGEUI:
1626 Cond[0].setImm(RISCV::QC_E_BLTUI);
1628 case RISCV::QC_E_BLTUI:
1629 Cond[0].setImm(RISCV::QC_E_BGEUI);
1631 case RISCV::NDS_BBC:
1632 Cond[0].setImm(RISCV::NDS_BBS);
1634 case RISCV::NDS_BBS:
1635 Cond[0].setImm(RISCV::NDS_BBC);
1637 case RISCV::NDS_BEQC:
1638 Cond[0].setImm(RISCV::NDS_BNEC);
1640 case RISCV::NDS_BNEC:
1641 Cond[0].setImm(RISCV::NDS_BEQC);
1651 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1652 MI->getOperand(1).getReg() == RISCV::X0) {
1653 Imm =
MI->getOperand(2).getImm();
1658 if (
MI->getOpcode() == RISCV::BSETI &&
MI->getOperand(1).isReg() &&
1659 MI->getOperand(1).getReg() == RISCV::X0 &&
1660 MI->getOperand(2).getImm() == 11) {
1674 if (Reg == RISCV::X0) {
1682 bool IsSigned =
false;
1683 bool IsEquality =
false;
1684 switch (
MI.getOpcode()) {
1720 MI.eraseFromParent();
1746 auto searchConst = [&](int64_t C1) ->
Register {
1748 auto DefC1 = std::find_if(++
II, E, [&](
const MachineInstr &
I) ->
bool {
1751 I.getOperand(0).getReg().isVirtual();
1754 return DefC1->getOperand(0).getReg();
1766 if (
isFromLoadImm(MRI, LHS, C0) && C0 != 0 && LHS.getReg().isVirtual() &&
1767 MRI.
hasOneUse(LHS.getReg()) && (IsSigned || C0 != -1)) {
1769 if (
Register RegZ = searchConst(C0 + 1)) {
1777 MI.eraseFromParent();
1787 if (
isFromLoadImm(MRI, RHS, C0) && C0 != 0 && RHS.getReg().isVirtual() &&
1790 if (
Register RegZ = searchConst(C0 - 1)) {
1798 MI.eraseFromParent();
1808 assert(
MI.getDesc().isBranch() &&
"Unexpected opcode!");
1810 int NumOp =
MI.getNumExplicitOperands();
1811 return MI.getOperand(NumOp - 1).getMBB();
1815 int64_t BrOffset)
const {
1816 unsigned XLen =
STI.getXLen();
1823 case RISCV::NDS_BBC:
1824 case RISCV::NDS_BBS:
1825 case RISCV::NDS_BEQC:
1826 case RISCV::NDS_BNEC:
1836 case RISCV::CV_BEQIMM:
1837 case RISCV::CV_BNEIMM:
1838 case RISCV::QC_BEQI:
1839 case RISCV::QC_BNEI:
1840 case RISCV::QC_BGEI:
1841 case RISCV::QC_BLTI:
1842 case RISCV::QC_BLTUI:
1843 case RISCV::QC_BGEUI:
1844 case RISCV::QC_E_BEQI:
1845 case RISCV::QC_E_BNEI:
1846 case RISCV::QC_E_BGEI:
1847 case RISCV::QC_E_BLTI:
1848 case RISCV::QC_E_BLTUI:
1849 case RISCV::QC_E_BGEUI:
1852 case RISCV::PseudoBR:
1854 case RISCV::PseudoJump:
1865 case RISCV::ADD:
return RISCV::PseudoCCADD;
1866 case RISCV::SUB:
return RISCV::PseudoCCSUB;
1867 case RISCV::SLL:
return RISCV::PseudoCCSLL;
1868 case RISCV::SRL:
return RISCV::PseudoCCSRL;
1869 case RISCV::SRA:
return RISCV::PseudoCCSRA;
1870 case RISCV::AND:
return RISCV::PseudoCCAND;
1871 case RISCV::OR:
return RISCV::PseudoCCOR;
1872 case RISCV::XOR:
return RISCV::PseudoCCXOR;
1873 case RISCV::MAX:
return RISCV::PseudoCCMAX;
1874 case RISCV::MAXU:
return RISCV::PseudoCCMAXU;
1875 case RISCV::MIN:
return RISCV::PseudoCCMIN;
1876 case RISCV::MINU:
return RISCV::PseudoCCMINU;
1877 case RISCV::MUL:
return RISCV::PseudoCCMUL;
1878 case RISCV::LUI:
return RISCV::PseudoCCLUI;
1879 case RISCV::QC_LI:
return RISCV::PseudoCCQC_LI;
1880 case RISCV::QC_E_LI:
return RISCV::PseudoCCQC_E_LI;
1882 case RISCV::ADDI:
return RISCV::PseudoCCADDI;
1883 case RISCV::SLLI:
return RISCV::PseudoCCSLLI;
1884 case RISCV::SRLI:
return RISCV::PseudoCCSRLI;
1885 case RISCV::SRAI:
return RISCV::PseudoCCSRAI;
1886 case RISCV::ANDI:
return RISCV::PseudoCCANDI;
1887 case RISCV::ORI:
return RISCV::PseudoCCORI;
1888 case RISCV::XORI:
return RISCV::PseudoCCXORI;
1890 case RISCV::ADDW:
return RISCV::PseudoCCADDW;
1891 case RISCV::SUBW:
return RISCV::PseudoCCSUBW;
1892 case RISCV::SLLW:
return RISCV::PseudoCCSLLW;
1893 case RISCV::SRLW:
return RISCV::PseudoCCSRLW;
1894 case RISCV::SRAW:
return RISCV::PseudoCCSRAW;
1896 case RISCV::ADDIW:
return RISCV::PseudoCCADDIW;
1897 case RISCV::SLLIW:
return RISCV::PseudoCCSLLIW;
1898 case RISCV::SRLIW:
return RISCV::PseudoCCSRLIW;
1899 case RISCV::SRAIW:
return RISCV::PseudoCCSRAIW;
1901 case RISCV::ANDN:
return RISCV::PseudoCCANDN;
1902 case RISCV::ORN:
return RISCV::PseudoCCORN;
1903 case RISCV::XNOR:
return RISCV::PseudoCCXNOR;
1905 case RISCV::NDS_BFOS:
return RISCV::PseudoCCNDS_BFOS;
1906 case RISCV::NDS_BFOZ:
return RISCV::PseudoCCNDS_BFOZ;
1910 return RISCV::INSTRUCTION_LIST_END;
1919 if (!
Reg.isVirtual())
1927 if (!STI.hasShortForwardBranchIMinMax() &&
1928 (
MI->getOpcode() == RISCV::MAX ||
MI->getOpcode() == RISCV::MIN ||
1929 MI->getOpcode() == RISCV::MINU ||
MI->getOpcode() == RISCV::MAXU))
1932 if (!STI.hasShortForwardBranchIMul() &&
MI->getOpcode() == RISCV::MUL)
1939 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1940 MI->getOperand(1).getReg() == RISCV::X0)
1945 if (MO.isFI() || MO.isCPI() || MO.isJTI())
1958 bool DontMoveAcrossStores =
true;
1959 if (!
MI->isSafeToMove(DontMoveAcrossStores))
1967 bool PreferFalse)
const {
1968 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1969 "Unknown select instruction");
1970 if (!
STI.hasShortForwardBranchIALU())
1976 bool Invert = !
DefMI;
1984 Register DestReg =
MI.getOperand(0).getReg();
1990 assert(PredOpc != RISCV::INSTRUCTION_LIST_END &&
"Unexpected opcode!");
1997 NewMI.
add(FalseReg);
2005 unsigned BCCOpcode =
MI.getOperand(
MI.getNumExplicitOperands() - 3).getImm();
2011 NewMI.
add(
MI.getOperand(
MI.getNumExplicitOperands() - 2));
2012 NewMI.
add(
MI.getOperand(
MI.getNumExplicitOperands() - 1));
2022 if (
DefMI->getParent() !=
MI.getParent())
2026 DefMI->eraseFromParent();
2031 if (
MI.isMetaInstruction())
2034 unsigned Opcode =
MI.getOpcode();
2036 if (Opcode == TargetOpcode::INLINEASM ||
2037 Opcode == TargetOpcode::INLINEASM_BR) {
2039 return getInlineAsmLength(
MI.getOperand(0).getSymbolName(),
2044 if (
STI.hasStdExtZca()) {
2045 if (isCompressibleInst(
MI,
STI))
2052 if (Opcode == TargetOpcode::BUNDLE)
2053 return getInstBundleLength(
MI);
2055 if (
MI.getParent() &&
MI.getParent()->getParent()) {
2056 if (isCompressibleInst(
MI,
STI))
2061 case RISCV::PseudoMV_FPR16INX:
2062 case RISCV::PseudoMV_FPR32INX:
2064 return STI.hasStdExtZca() ? 2 : 4;
2066 case RISCV::PseudoCCMOVGPRNoX0:
2067 return get(
MI.getOperand(
MI.getNumExplicitOperands() - 3).getImm())
2070 case RISCV::PseudoCCMOVGPR:
2071 case RISCV::PseudoCCADD:
2072 case RISCV::PseudoCCSUB:
2073 case RISCV::PseudoCCSLL:
2074 case RISCV::PseudoCCSRL:
2075 case RISCV::PseudoCCSRA:
2076 case RISCV::PseudoCCAND:
2077 case RISCV::PseudoCCOR:
2078 case RISCV::PseudoCCXOR:
2079 case RISCV::PseudoCCADDI:
2080 case RISCV::PseudoCCANDI:
2081 case RISCV::PseudoCCORI:
2082 case RISCV::PseudoCCXORI:
2083 case RISCV::PseudoCCLUI:
2084 case RISCV::PseudoCCSLLI:
2085 case RISCV::PseudoCCSRLI:
2086 case RISCV::PseudoCCSRAI:
2087 case RISCV::PseudoCCADDW:
2088 case RISCV::PseudoCCSUBW:
2089 case RISCV::PseudoCCSLLW:
2090 case RISCV::PseudoCCSRLW:
2091 case RISCV::PseudoCCSRAW:
2092 case RISCV::PseudoCCADDIW:
2093 case RISCV::PseudoCCSLLIW:
2094 case RISCV::PseudoCCSRLIW:
2095 case RISCV::PseudoCCSRAIW:
2096 case RISCV::PseudoCCANDN:
2097 case RISCV::PseudoCCORN:
2098 case RISCV::PseudoCCXNOR:
2099 case RISCV::PseudoCCMAX:
2100 case RISCV::PseudoCCMIN:
2101 case RISCV::PseudoCCMAXU:
2102 case RISCV::PseudoCCMINU:
2103 case RISCV::PseudoCCMUL:
2104 case RISCV::PseudoCCLB:
2105 case RISCV::PseudoCCLH:
2106 case RISCV::PseudoCCLW:
2107 case RISCV::PseudoCCLHU:
2108 case RISCV::PseudoCCLBU:
2109 case RISCV::PseudoCCLWU:
2110 case RISCV::PseudoCCLD:
2111 case RISCV::PseudoCCQC_LI:
2112 return get(
MI.getOperand(
MI.getNumExplicitOperands() - 3).getImm())
2115 case RISCV::PseudoCCQC_E_LI:
2116 case RISCV::PseudoCCQC_E_LB:
2117 case RISCV::PseudoCCQC_E_LH:
2118 case RISCV::PseudoCCQC_E_LW:
2119 case RISCV::PseudoCCQC_E_LHU:
2120 case RISCV::PseudoCCQC_E_LBU:
2121 return get(
MI.getOperand(
MI.getNumExplicitOperands() - 3).getImm())
2124 case TargetOpcode::STACKMAP:
2127 case TargetOpcode::PATCHPOINT:
2130 case TargetOpcode::STATEPOINT: {
2134 return std::max(NumBytes, 8U);
2136 case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
2137 case TargetOpcode::PATCHABLE_FUNCTION_EXIT:
2138 case TargetOpcode::PATCHABLE_TAIL_CALL: {
2141 if (Opcode == TargetOpcode::PATCHABLE_FUNCTION_ENTER &&
2142 F.hasFnAttribute(
"patchable-function-entry")) {
2144 if (
F.getFnAttribute(
"patchable-function-entry")
2146 .getAsInteger(10, Num))
2147 return get(Opcode).getSize();
2150 return (
STI.hasStdExtZca() ? 2 : 4) * Num;
2154 return STI.is64Bit() ? 68 : 44;
2157 return get(Opcode).getSize();
2161unsigned RISCVInstrInfo::getInstBundleLength(
const MachineInstr &
MI)
const {
2165 while (++
I != E &&
I->isInsideBundle()) {
2166 assert(!
I->isBundle() &&
"No nested bundle!");
2173 const unsigned Opcode =
MI.getOpcode();
2177 case RISCV::FSGNJ_D:
2178 case RISCV::FSGNJ_S:
2179 case RISCV::FSGNJ_H:
2180 case RISCV::FSGNJ_D_INX:
2181 case RISCV::FSGNJ_D_IN32X:
2182 case RISCV::FSGNJ_S_INX:
2183 case RISCV::FSGNJ_H_INX:
2185 return MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
2186 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg();
2190 return (
MI.getOperand(1).isReg() &&
2191 MI.getOperand(1).getReg() == RISCV::X0) ||
2192 (
MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 0);
2194 return MI.isAsCheapAsAMove();
2197std::optional<DestSourcePair>
2201 switch (
MI.getOpcode()) {
2207 if (
MI.getOperand(1).isReg() &&
MI.getOperand(1).getReg() == RISCV::X0 &&
2208 MI.getOperand(2).isReg())
2210 if (
MI.getOperand(2).isReg() &&
MI.getOperand(2).getReg() == RISCV::X0 &&
2211 MI.getOperand(1).isReg())
2216 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isImm() &&
2217 MI.getOperand(2).getImm() == 0)
2221 if (
MI.getOperand(2).isReg() &&
MI.getOperand(2).getReg() == RISCV::X0 &&
2222 MI.getOperand(1).isReg())
2226 case RISCV::SH1ADD_UW:
2228 case RISCV::SH2ADD_UW:
2230 case RISCV::SH3ADD_UW:
2231 if (
MI.getOperand(1).isReg() &&
MI.getOperand(1).getReg() == RISCV::X0 &&
2232 MI.getOperand(2).isReg())
2235 case RISCV::FSGNJ_D:
2236 case RISCV::FSGNJ_S:
2237 case RISCV::FSGNJ_H:
2238 case RISCV::FSGNJ_D_INX:
2239 case RISCV::FSGNJ_D_IN32X:
2240 case RISCV::FSGNJ_S_INX:
2241 case RISCV::FSGNJ_H_INX:
2243 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
2244 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg())
2248 return std::nullopt;
2256 const auto &SchedModel =
STI.getSchedModel();
2257 return (!SchedModel.hasInstrSchedModel() || SchedModel.isOutOfOrder())
2269 RISCV::getNamedOperandIdx(Root.
getOpcode(), RISCV::OpName::frm);
2273 return RISCV::getNamedOperandIdx(
MI->getOpcode(),
2274 RISCV::OpName::frm) < 0;
2276 "New instructions require FRM whereas the old one does not have it");
2283 for (
auto *NewMI : InsInstrs) {
2285 if (
static_cast<unsigned>(RISCV::getNamedOperandIdx(
2286 NewMI->getOpcode(), RISCV::OpName::frm)) != NewMI->getNumOperands())
2328bool RISCVInstrInfo::isVectorAssociativeAndCommutative(
const MachineInstr &Inst,
2329 bool Invert)
const {
2330#define OPCODE_LMUL_CASE(OPC) \
2331 case RISCV::OPC##_M1: \
2332 case RISCV::OPC##_M2: \
2333 case RISCV::OPC##_M4: \
2334 case RISCV::OPC##_M8: \
2335 case RISCV::OPC##_MF2: \
2336 case RISCV::OPC##_MF4: \
2337 case RISCV::OPC##_MF8
2339#define OPCODE_LMUL_MASK_CASE(OPC) \
2340 case RISCV::OPC##_M1_MASK: \
2341 case RISCV::OPC##_M2_MASK: \
2342 case RISCV::OPC##_M4_MASK: \
2343 case RISCV::OPC##_M8_MASK: \
2344 case RISCV::OPC##_MF2_MASK: \
2345 case RISCV::OPC##_MF4_MASK: \
2346 case RISCV::OPC##_MF8_MASK
2351 Opcode = *InvOpcode;
2368#undef OPCODE_LMUL_MASK_CASE
2369#undef OPCODE_LMUL_CASE
2372bool RISCVInstrInfo::areRVVInstsReassociable(
const MachineInstr &Root,
2383 const uint64_t TSFlags =
Desc.TSFlags;
2385 auto checkImmOperand = [&](
unsigned OpIdx) {
2389 auto checkRegOperand = [&](
unsigned OpIdx) {
2397 if (!checkRegOperand(1))
2412 bool SeenMI2 =
false;
2413 for (
auto End =
MBB->
rend(), It = It1; It != End; ++It) {
2422 if (It->modifiesRegister(RISCV::V0,
TRI)) {
2423 Register SrcReg = It->getOperand(1).getReg();
2441 if (MI1VReg != SrcReg)
2450 assert(SeenMI2 &&
"Prev is expected to appear before Root");
2490bool RISCVInstrInfo::hasReassociableVectorSibling(
const MachineInstr &Inst,
2491 bool &Commuted)
const {
2495 "Expect the present of passthrough operand.");
2501 Commuted = !areRVVInstsReassociable(Inst, *MI1) &&
2502 areRVVInstsReassociable(Inst, *MI2);
2506 return areRVVInstsReassociable(Inst, *MI1) &&
2507 (isVectorAssociativeAndCommutative(*MI1) ||
2508 isVectorAssociativeAndCommutative(*MI1,
true)) &&
2515 if (!isVectorAssociativeAndCommutative(Inst) &&
2516 !isVectorAssociativeAndCommutative(Inst,
true))
2542 for (
unsigned I = 0;
I < 5; ++
I)
2548 bool &Commuted)
const {
2549 if (isVectorAssociativeAndCommutative(Inst) ||
2550 isVectorAssociativeAndCommutative(Inst,
true))
2551 return hasReassociableVectorSibling(Inst, Commuted);
2557 unsigned OperandIdx = Commuted ? 2 : 1;
2561 int16_t InstFrmOpIdx =
2562 RISCV::getNamedOperandIdx(Inst.
getOpcode(), RISCV::OpName::frm);
2563 int16_t SiblingFrmOpIdx =
2564 RISCV::getNamedOperandIdx(Sibling.
getOpcode(), RISCV::OpName::frm);
2566 return (InstFrmOpIdx < 0 && SiblingFrmOpIdx < 0) ||
2571 bool Invert)
const {
2572 if (isVectorAssociativeAndCommutative(Inst, Invert))
2580 Opc = *InverseOpcode;
2625std::optional<unsigned>
2627#define RVV_OPC_LMUL_CASE(OPC, INV) \
2628 case RISCV::OPC##_M1: \
2629 return RISCV::INV##_M1; \
2630 case RISCV::OPC##_M2: \
2631 return RISCV::INV##_M2; \
2632 case RISCV::OPC##_M4: \
2633 return RISCV::INV##_M4; \
2634 case RISCV::OPC##_M8: \
2635 return RISCV::INV##_M8; \
2636 case RISCV::OPC##_MF2: \
2637 return RISCV::INV##_MF2; \
2638 case RISCV::OPC##_MF4: \
2639 return RISCV::INV##_MF4; \
2640 case RISCV::OPC##_MF8: \
2641 return RISCV::INV##_MF8
2643#define RVV_OPC_LMUL_MASK_CASE(OPC, INV) \
2644 case RISCV::OPC##_M1_MASK: \
2645 return RISCV::INV##_M1_MASK; \
2646 case RISCV::OPC##_M2_MASK: \
2647 return RISCV::INV##_M2_MASK; \
2648 case RISCV::OPC##_M4_MASK: \
2649 return RISCV::INV##_M4_MASK; \
2650 case RISCV::OPC##_M8_MASK: \
2651 return RISCV::INV##_M8_MASK; \
2652 case RISCV::OPC##_MF2_MASK: \
2653 return RISCV::INV##_MF2_MASK; \
2654 case RISCV::OPC##_MF4_MASK: \
2655 return RISCV::INV##_MF4_MASK; \
2656 case RISCV::OPC##_MF8_MASK: \
2657 return RISCV::INV##_MF8_MASK
2661 return std::nullopt;
2663 return RISCV::FSUB_H;
2665 return RISCV::FSUB_S;
2667 return RISCV::FSUB_D;
2669 return RISCV::FADD_H;
2671 return RISCV::FADD_S;
2673 return RISCV::FADD_D;
2690#undef RVV_OPC_LMUL_MASK_CASE
2691#undef RVV_OPC_LMUL_CASE
2696 bool DoRegPressureReduce) {
2723 bool DoRegPressureReduce) {
2730 DoRegPressureReduce)) {
2736 DoRegPressureReduce)) {
2746 bool DoRegPressureReduce) {
2754 unsigned CombineOpc) {
2761 if (!
MI ||
MI->getParent() != &
MBB ||
MI->getOpcode() != CombineOpc)
2775 unsigned OuterShiftAmt) {
2781 if (InnerShiftAmt < OuterShiftAmt || (InnerShiftAmt - OuterShiftAmt) > 3)
2808 case RISCV::SH1ADD_UW:
2810 case RISCV::SH2ADD_UW:
2812 case RISCV::SH3ADD_UW:
2858 bool DoRegPressureReduce)
const {
2867 DoRegPressureReduce);
2875 return RISCV::FMADD_H;
2877 return RISCV::FMADD_S;
2879 return RISCV::FMADD_D;
2924 bool Mul1IsKill = Mul1.
isKill();
2925 bool Mul2IsKill = Mul2.
isKill();
2926 bool AddendIsKill = Addend.
isKill();
2935 BuildMI(*MF, MergedLoc,
TII->get(FusedOpc), DstReg)
2960 assert(OuterShiftAmt != 0 &&
"Unexpected opcode");
2967 assert(InnerShiftAmt >= OuterShiftAmt &&
"Unexpected shift amount");
2970 switch (InnerShiftAmt - OuterShiftAmt) {
2974 InnerOpc = RISCV::ADD;
2977 InnerOpc = RISCV::SH1ADD;
2980 InnerOpc = RISCV::SH2ADD;
2983 InnerOpc = RISCV::SH3ADD;
3001 InstrIdxForVirtReg.
insert(std::make_pair(NewVR, 0));
3018 DelInstrs, InstrIdxForVirtReg);
3045 for (
const auto &[Index, Operand] :
enumerate(
Desc.operands())) {
3047 unsigned OpType = Operand.OperandType;
3053 ErrInfo =
"Expected an immediate operand.";
3056 int64_t Imm = MO.
getImm();
3062#define CASE_OPERAND_UIMM(NUM) \
3063 case RISCVOp::OPERAND_UIMM##NUM: \
3064 Ok = isUInt<NUM>(Imm); \
3066#define CASE_OPERAND_UIMM_LSB_ZEROS(BITS, SUFFIX) \
3067 case RISCVOp::OPERAND_UIMM##BITS##_LSB##SUFFIX: { \
3068 constexpr size_t NumZeros = sizeof(#SUFFIX) - 1; \
3069 Ok = isShiftedUInt<BITS - NumZeros, NumZeros>(Imm); \
3072#define CASE_OPERAND_SIMM(NUM) \
3073 case RISCVOp::OPERAND_SIMM##NUM: \
3074 Ok = isInt<NUM>(Imm); \
3108 Ok = Imm >= 1 && Imm <= 32;
3129 Ok = (
isUInt<5>(Imm) && Imm != 0) || Imm == -1;
3140 Ok = Imm >= -15 && Imm <= 16;
3168 Ok = Ok && Imm != 0;
3171 Ok = (
isUInt<5>(Imm) && Imm != 0) || (Imm >= 0xfffe0 && Imm <= 0xfffff);
3174 Ok = Imm >= 0 && Imm <= 10;
3177 Ok = Imm >= 0 && Imm <= 7;
3180 Ok = Imm >= 1 && Imm <= 10;
3183 Ok = Imm >= 2 && Imm <= 14;
3192 Ok = Imm >= 0 && Imm <= 48 && Imm % 16 == 0;
3227 Ok = Imm == 1 || Imm == 2 || Imm == 4;
3231 ErrInfo =
"Invalid immediate";
3240 ErrInfo =
"Expected a non-register operand.";
3244 ErrInfo =
"Invalid immediate";
3253 ErrInfo =
"Expected a non-register operand.";
3257 ErrInfo =
"Invalid immediate";
3265 ErrInfo =
"Expected a non-register operand.";
3269 ErrInfo =
"Invalid immediate";
3275 int64_t Imm = MO.
getImm();
3278 ErrInfo =
"Invalid immediate";
3281 }
else if (!MO.
isReg()) {
3282 ErrInfo =
"Expected a register or immediate operand.";
3288 ErrInfo =
"Expected a register or immediate operand.";
3298 if (!
Op.isImm() && !
Op.isReg()) {
3299 ErrInfo =
"Invalid operand type for VL operand";
3302 if (
Op.isReg() &&
Op.getReg().isValid()) {
3305 if (!RISCV::GPRNoX0RegClass.hasSubClassEq(RC)) {
3306 ErrInfo =
"Invalid register class for VL operand";
3311 ErrInfo =
"VL operand w/o SEW operand?";
3317 if (!
MI.getOperand(
OpIdx).isImm()) {
3318 ErrInfo =
"SEW value expected to be an immediate";
3323 ErrInfo =
"Unexpected SEW value";
3326 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
3328 ErrInfo =
"Unexpected SEW value";
3334 if (!
MI.getOperand(
OpIdx).isImm()) {
3335 ErrInfo =
"Policy operand expected to be an immediate";
3340 ErrInfo =
"Invalid Policy Value";
3344 ErrInfo =
"policy operand w/o VL operand?";
3352 if (!
MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
3353 ErrInfo =
"policy operand w/o tied operand?";
3360 !
MI.readsRegister(RISCV::FRM,
nullptr)) {
3361 ErrInfo =
"dynamic rounding mode should read FRM";
3383 case RISCV::LD_RV32:
3393 case RISCV::SD_RV32:
3409 int64_t NewOffset = OldOffset + Disp;
3431 "Addressing mode not supported for folding");
3504 case RISCV::LD_RV32:
3507 case RISCV::SD_RV32:
3514 OffsetIsScalable =
false;
3530 if (BaseOps1.
front()->isIdenticalTo(*BaseOps2.
front()))
3538 if (MO1->getAddrSpace() != MO2->getAddrSpace())
3541 auto Base1 = MO1->getValue();
3542 auto Base2 = MO2->getValue();
3543 if (!Base1 || !Base2)
3551 return Base1 == Base2;
3557 int64_t Offset2,
bool OffsetIsScalable2,
unsigned ClusterSize,
3558 unsigned NumBytes)
const {
3561 if (!BaseOps1.
empty() && !BaseOps2.
empty()) {
3566 }
else if (!BaseOps1.
empty() || !BaseOps2.
empty()) {
3572 BaseOps1.
front()->getParent()->getMF()->getSubtarget().getCacheLineSize();
3578 return ClusterSize <= 4 && std::abs(Offset1 - Offset2) <
CacheLineSize;
3628 int64_t OffsetA = 0, OffsetB = 0;
3634 int LowOffset = std::min(OffsetA, OffsetB);
3635 int HighOffset = std::max(OffsetA, OffsetB);
3636 LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
3638 LowOffset + (
int)LowWidth.
getValue() <= HighOffset)
3645std::pair<unsigned, unsigned>
3648 return std::make_pair(TF & Mask, TF & ~Mask);
3654 static const std::pair<unsigned, const char *> TargetFlags[] = {
3655 {MO_CALL,
"riscv-call"},
3656 {MO_LO,
"riscv-lo"},
3657 {MO_HI,
"riscv-hi"},
3658 {MO_PCREL_LO,
"riscv-pcrel-lo"},
3659 {MO_PCREL_HI,
"riscv-pcrel-hi"},
3660 {MO_GOT_HI,
"riscv-got-hi"},
3661 {MO_TPREL_LO,
"riscv-tprel-lo"},
3662 {MO_TPREL_HI,
"riscv-tprel-hi"},
3663 {MO_TPREL_ADD,
"riscv-tprel-add"},
3664 {MO_TLS_GOT_HI,
"riscv-tls-got-hi"},
3665 {MO_TLS_GD_HI,
"riscv-tls-gd-hi"},
3666 {MO_TLSDESC_HI,
"riscv-tlsdesc-hi"},
3667 {MO_TLSDESC_LOAD_LO,
"riscv-tlsdesc-load-lo"},
3668 {MO_TLSDESC_ADD_LO,
"riscv-tlsdesc-add-lo"},
3669 {MO_TLSDESC_CALL,
"riscv-tlsdesc-call"}};
3677 if (!OutlineFromLinkOnceODRs &&
F.hasLinkOnceODRLinkage())
3690 unsigned &Flags)
const {
3709 return F.getFnAttribute(
"fentry-call").getValueAsBool() ||
3710 F.hasFnAttribute(
"patchable-function-entry");
3715 return MI.readsRegister(RegNo,
TRI) ||
3716 MI.getDesc().hasImplicitUseOfPhysReg(RegNo);
3721 return MI.modifiesRegister(RegNo,
TRI) ||
3722 MI.getDesc().hasImplicitDefOfPhysReg(RegNo);
3726 if (!
MBB.back().isReturn())
3752 if (
C.back().isReturn() &&
3753 !
C.isAvailableAcrossAndOutOfSeq(TailExpandUseReg, RegInfo)) {
3755 LLVM_DEBUG(
dbgs() <<
"Cannot be outlined between: " <<
C.front() <<
"and "
3757 LLVM_DEBUG(
dbgs() <<
"Because the tail-call register is live across "
3758 "the proposed outlined function call\n");
3764 if (
C.back().isReturn()) {
3766 "The candidate who uses return instruction must be outlined "
3778 return !
C.isAvailableAcrossAndOutOfSeq(RISCV::X5, RegInfo);
3781std::optional<std::unique_ptr<outliner::OutlinedFunction>>
3784 std::vector<outliner::Candidate> &RepeatedSequenceLocs,
3785 unsigned MinRepeats)
const {
3793 if (RepeatedSequenceLocs.size() < MinRepeats)
3794 return std::nullopt;
3798 unsigned InstrSizeCExt =
3800 unsigned CallOverhead = 0, FrameOverhead = 0;
3803 unsigned CFICount = 0;
3804 for (
auto &
I : Candidate) {
3805 if (
I.isCFIInstruction())
3816 std::vector<MCCFIInstruction> CFIInstructions =
3817 C.getMF()->getFrameInstructions();
3819 if (CFICount > 0 && CFICount != CFIInstructions.size())
3820 return std::nullopt;
3828 CallOverhead = 4 + InstrSizeCExt;
3835 FrameOverhead = InstrSizeCExt;
3841 return std::nullopt;
3843 for (
auto &
C : RepeatedSequenceLocs)
3844 C.setCallInfo(MOCI, CallOverhead);
3846 unsigned SequenceSize = 0;
3847 for (
auto &
MI : Candidate)
3850 return std::make_unique<outliner::OutlinedFunction>(
3851 RepeatedSequenceLocs, SequenceSize, FrameOverhead, MOCI);
3857 unsigned Flags)
const {
3861 MBB->getParent()->getSubtarget().getRegisterInfo();
3862 const auto &
F =
MI.getMF()->getFunction();
3867 if (
MI.isCFIInstruction())
3875 for (
const auto &MO :
MI.operands()) {
3880 (
MI.getMF()->getTarget().getFunctionSections() ||
F.hasComdat() ||
3881 F.hasSection() ||
F.getSectionPrefix()))
3898 MBB.addLiveIn(RISCV::X5);
3913 .addGlobalAddress(M.getNamedValue(MF.
getName()),
3921 .addGlobalAddress(M.getNamedValue(MF.
getName()), 0,
3932 return std::nullopt;
3936 if (
MI.getOpcode() == RISCV::ADDI &&
MI.getOperand(1).isReg() &&
3937 MI.getOperand(2).isImm())
3938 return RegImmPair{
MI.getOperand(1).getReg(),
MI.getOperand(2).getImm()};
3940 return std::nullopt;
3948 std::string GenericComment =
3950 if (!GenericComment.empty())
3951 return GenericComment;
3955 return std::string();
3957 std::string Comment;
3964 switch (OpInfo.OperandType) {
3967 unsigned Imm =
Op.getImm();
3972 unsigned Imm =
Op.getImm();
3977 unsigned Imm =
Op.getImm();
3983 unsigned Log2SEW =
Op.getImm();
3984 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
3990 unsigned Policy =
Op.getImm();
3992 "Invalid Policy Value");
3998 if (
Op.isImm() &&
Op.getImm() == -1)
4020#define CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL) \
4021 RISCV::Pseudo##OP##_##LMUL
4023#define CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL) \
4024 RISCV::Pseudo##OP##_##LMUL##_MASK
4026#define CASE_RVV_OPCODE_LMUL(OP, LMUL) \
4027 CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL): \
4028 case CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL)
4030#define CASE_RVV_OPCODE_UNMASK_WIDEN(OP) \
4031 CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF8): \
4032 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF4): \
4033 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF2): \
4034 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M1): \
4035 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M2): \
4036 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M4)
4038#define CASE_RVV_OPCODE_UNMASK(OP) \
4039 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
4040 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M8)
4042#define CASE_RVV_OPCODE_MASK_WIDEN(OP) \
4043 CASE_RVV_OPCODE_MASK_LMUL(OP, MF8): \
4044 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF4): \
4045 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF2): \
4046 case CASE_RVV_OPCODE_MASK_LMUL(OP, M1): \
4047 case CASE_RVV_OPCODE_MASK_LMUL(OP, M2): \
4048 case CASE_RVV_OPCODE_MASK_LMUL(OP, M4)
4050#define CASE_RVV_OPCODE_MASK(OP) \
4051 CASE_RVV_OPCODE_MASK_WIDEN(OP): \
4052 case CASE_RVV_OPCODE_MASK_LMUL(OP, M8)
4054#define CASE_RVV_OPCODE_WIDEN(OP) \
4055 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
4056 case CASE_RVV_OPCODE_MASK_WIDEN(OP)
4058#define CASE_RVV_OPCODE(OP) \
4059 CASE_RVV_OPCODE_UNMASK(OP): \
4060 case CASE_RVV_OPCODE_MASK(OP)
4064#define CASE_VMA_OPCODE_COMMON(OP, TYPE, LMUL) \
4065 RISCV::PseudoV##OP##_##TYPE##_##LMUL
4067#define CASE_VMA_OPCODE_LMULS(OP, TYPE) \
4068 CASE_VMA_OPCODE_COMMON(OP, TYPE, MF8): \
4069 case CASE_VMA_OPCODE_COMMON(OP, TYPE, MF4): \
4070 case CASE_VMA_OPCODE_COMMON(OP, TYPE, MF2): \
4071 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M1): \
4072 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M2): \
4073 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M4): \
4074 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M8)
4077#define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL, SEW) \
4078 RISCV::PseudoV##OP##_##TYPE##_##LMUL##_##SEW
4080#define CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW) \
4081 CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1, SEW): \
4082 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2, SEW): \
4083 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4, SEW): \
4084 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8, SEW)
4086#define CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW) \
4087 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2, SEW): \
4088 case CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW)
4090#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE, SEW) \
4091 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4, SEW): \
4092 case CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW)
4094#define CASE_VFMA_OPCODE_VV(OP) \
4095 CASE_VFMA_OPCODE_LMULS_MF4(OP, VV, E16): \
4096 case CASE_VFMA_OPCODE_LMULS_MF4(OP##_ALT, VV, E16): \
4097 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VV, E32): \
4098 case CASE_VFMA_OPCODE_LMULS_M1(OP, VV, E64)
4100#define CASE_VFMA_SPLATS(OP) \
4101 CASE_VFMA_OPCODE_LMULS_MF4(OP, VFPR16, E16): \
4102 case CASE_VFMA_OPCODE_LMULS_MF4(OP##_ALT, VFPR16, E16): \
4103 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VFPR32, E32): \
4104 case CASE_VFMA_OPCODE_LMULS_M1(OP, VFPR64, E64)
4108 unsigned &SrcOpIdx1,
4109 unsigned &SrcOpIdx2)
const {
4111 if (!
Desc.isCommutable())
4114 switch (
MI.getOpcode()) {
4115 case RISCV::TH_MVEQZ:
4116 case RISCV::TH_MVNEZ:
4120 if (
MI.getOperand(2).getReg() == RISCV::X0)
4123 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
4124 case RISCV::QC_SELECTIEQ:
4125 case RISCV::QC_SELECTINE:
4126 case RISCV::QC_SELECTIIEQ:
4127 case RISCV::QC_SELECTIINE:
4128 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
4129 case RISCV::QC_MVEQ:
4130 case RISCV::QC_MVNE:
4131 case RISCV::QC_MVLT:
4132 case RISCV::QC_MVGE:
4133 case RISCV::QC_MVLTU:
4134 case RISCV::QC_MVGEU:
4135 case RISCV::QC_MVEQI:
4136 case RISCV::QC_MVNEI:
4137 case RISCV::QC_MVLTI:
4138 case RISCV::QC_MVGEI:
4139 case RISCV::QC_MVLTUI:
4140 case RISCV::QC_MVGEUI:
4141 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 4);
4142 case RISCV::TH_MULA:
4143 case RISCV::TH_MULAW:
4144 case RISCV::TH_MULAH:
4145 case RISCV::TH_MULS:
4146 case RISCV::TH_MULSW:
4147 case RISCV::TH_MULSH:
4149 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
4150 case RISCV::PseudoCCMOVGPRNoX0:
4151 case RISCV::PseudoCCMOVGPR:
4153 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
4184 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
4211 unsigned CommutableOpIdx1 = 1;
4212 unsigned CommutableOpIdx2 = 3;
4213 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
4234 if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
4236 if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
4240 if (SrcOpIdx1 != CommuteAnyOperandIndex &&
4241 SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
4247 if (SrcOpIdx1 == CommuteAnyOperandIndex ||
4248 SrcOpIdx2 == CommuteAnyOperandIndex) {
4251 unsigned CommutableOpIdx1 = SrcOpIdx1;
4252 if (SrcOpIdx1 == SrcOpIdx2) {
4255 CommutableOpIdx1 = 1;
4256 }
else if (SrcOpIdx1 == CommuteAnyOperandIndex) {
4258 CommutableOpIdx1 = SrcOpIdx2;
4263 unsigned CommutableOpIdx2;
4264 if (CommutableOpIdx1 != 1) {
4266 CommutableOpIdx2 = 1;
4268 Register Op1Reg =
MI.getOperand(CommutableOpIdx1).getReg();
4273 if (Op1Reg !=
MI.getOperand(2).getReg())
4274 CommutableOpIdx2 = 2;
4276 CommutableOpIdx2 = 3;
4281 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
4294#define CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \
4295 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL: \
4296 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL; \
4299#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
4300 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
4301 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
4302 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
4303 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
4304 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
4305 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
4306 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
4309#define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL, SEW) \
4310 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL##_##SEW: \
4311 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL##_##SEW; \
4314#define CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW) \
4315 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1, SEW) \
4316 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2, SEW) \
4317 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4, SEW) \
4318 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8, SEW)
4320#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW) \
4321 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2, SEW) \
4322 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW)
4324#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE, SEW) \
4325 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4, SEW) \
4326 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW)
4328#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP) \
4329 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VV, E16) \
4330 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP##_ALT, NEWOP##_ALT, VV, E16) \
4331 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VV, E32) \
4332 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VV, E64)
4334#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
4335 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VFPR16, E16) \
4336 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP##_ALT, NEWOP##_ALT, VFPR16, E16) \
4337 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VFPR32, E32) \
4338 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VFPR64, E64)
4344 unsigned OpIdx2)
const {
4347 return *
MI.getParent()->getParent()->CloneMachineInstr(&
MI);
4351 switch (
MI.getOpcode()) {
4352 case RISCV::TH_MVEQZ:
4353 case RISCV::TH_MVNEZ: {
4354 auto &WorkingMI = cloneIfNew(
MI);
4355 WorkingMI.setDesc(
get(
MI.getOpcode() == RISCV::TH_MVEQZ ? RISCV::TH_MVNEZ
4356 : RISCV::TH_MVEQZ));
4360 case RISCV::QC_SELECTIEQ:
4361 case RISCV::QC_SELECTINE:
4362 case RISCV::QC_SELECTIIEQ:
4363 case RISCV::QC_SELECTIINE:
4365 case RISCV::QC_MVEQ:
4366 case RISCV::QC_MVNE:
4367 case RISCV::QC_MVLT:
4368 case RISCV::QC_MVGE:
4369 case RISCV::QC_MVLTU:
4370 case RISCV::QC_MVGEU:
4371 case RISCV::QC_MVEQI:
4372 case RISCV::QC_MVNEI:
4373 case RISCV::QC_MVLTI:
4374 case RISCV::QC_MVGEI:
4375 case RISCV::QC_MVLTUI:
4376 case RISCV::QC_MVGEUI: {
4377 auto &WorkingMI = cloneIfNew(
MI);
4382 case RISCV::PseudoCCMOVGPRNoX0:
4383 case RISCV::PseudoCCMOVGPR: {
4385 unsigned BCC =
MI.getOperand(
MI.getNumExplicitOperands() - 3).getImm();
4387 auto &WorkingMI = cloneIfNew(
MI);
4388 WorkingMI.getOperand(
MI.getNumExplicitOperands() - 3).setImm(BCC);
4412 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
4413 assert((OpIdx1 == 3 || OpIdx2 == 3) &&
"Unexpected opcode index");
4415 switch (
MI.getOpcode()) {
4438 auto &WorkingMI = cloneIfNew(
MI);
4439 WorkingMI.setDesc(
get(
Opc));
4449 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
4452 if (OpIdx1 == 3 || OpIdx2 == 3) {
4454 switch (
MI.getOpcode()) {
4465 auto &WorkingMI = cloneIfNew(
MI);
4466 WorkingMI.setDesc(
get(
Opc));
4478#undef CASE_VMA_CHANGE_OPCODE_COMMON
4479#undef CASE_VMA_CHANGE_OPCODE_LMULS
4480#undef CASE_VFMA_CHANGE_OPCODE_COMMON
4481#undef CASE_VFMA_CHANGE_OPCODE_LMULS_M1
4482#undef CASE_VFMA_CHANGE_OPCODE_LMULS_MF2
4483#undef CASE_VFMA_CHANGE_OPCODE_LMULS_MF4
4484#undef CASE_VFMA_CHANGE_OPCODE_VV
4485#undef CASE_VFMA_CHANGE_OPCODE_SPLATS
4487#undef CASE_RVV_OPCODE_UNMASK_LMUL
4488#undef CASE_RVV_OPCODE_MASK_LMUL
4489#undef CASE_RVV_OPCODE_LMUL
4490#undef CASE_RVV_OPCODE_UNMASK_WIDEN
4491#undef CASE_RVV_OPCODE_UNMASK
4492#undef CASE_RVV_OPCODE_MASK_WIDEN
4493#undef CASE_RVV_OPCODE_MASK
4494#undef CASE_RVV_OPCODE_WIDEN
4495#undef CASE_RVV_OPCODE
4497#undef CASE_VMA_OPCODE_COMMON
4498#undef CASE_VMA_OPCODE_LMULS
4499#undef CASE_VFMA_OPCODE_COMMON
4500#undef CASE_VFMA_OPCODE_LMULS_M1
4501#undef CASE_VFMA_OPCODE_LMULS_MF2
4502#undef CASE_VFMA_OPCODE_LMULS_MF4
4503#undef CASE_VFMA_OPCODE_VV
4504#undef CASE_VFMA_SPLATS
4507 switch (
MI.getOpcode()) {
4515 if (
MI.getOperand(1).getReg() == RISCV::X0)
4516 commuteInstruction(
MI);
4518 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4519 MI.getOperand(2).ChangeToImmediate(0);
4520 MI.setDesc(
get(RISCV::ADDI));
4524 if (
MI.getOpcode() == RISCV::XOR &&
4525 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg()) {
4526 MI.getOperand(1).setReg(RISCV::X0);
4527 MI.getOperand(2).ChangeToImmediate(0);
4528 MI.setDesc(
get(RISCV::ADDI));
4535 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4536 MI.setDesc(
get(RISCV::ADDI));
4542 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4543 MI.getOperand(2).ChangeToImmediate(0);
4544 MI.setDesc(
get(RISCV::ADDI));
4550 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4551 MI.getOperand(2).ChangeToImmediate(0);
4552 MI.setDesc(
get(RISCV::ADDIW));
4559 if (
MI.getOperand(1).getReg() == RISCV::X0)
4560 commuteInstruction(
MI);
4562 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4563 MI.getOperand(2).ChangeToImmediate(0);
4564 MI.setDesc(
get(RISCV::ADDIW));
4569 case RISCV::SH1ADD_UW:
4571 case RISCV::SH2ADD_UW:
4573 case RISCV::SH3ADD_UW:
4575 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4576 MI.removeOperand(1);
4578 MI.setDesc(
get(RISCV::ADDI));
4582 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4583 MI.removeOperand(2);
4584 unsigned Opc =
MI.getOpcode();
4585 if (
Opc == RISCV::SH1ADD_UW ||
Opc == RISCV::SH2ADD_UW ||
4586 Opc == RISCV::SH3ADD_UW) {
4588 MI.setDesc(
get(RISCV::SLLI_UW));
4592 MI.setDesc(
get(RISCV::SLLI));
4606 if (
MI.getOperand(1).getReg() == RISCV::X0 ||
4607 MI.getOperand(2).getReg() == RISCV::X0) {
4608 MI.getOperand(1).setReg(RISCV::X0);
4609 MI.getOperand(2).ChangeToImmediate(0);
4610 MI.setDesc(
get(RISCV::ADDI));
4616 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4617 MI.getOperand(2).setImm(0);
4618 MI.setDesc(
get(RISCV::ADDI));
4626 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4627 MI.getOperand(2).ChangeToImmediate(0);
4628 MI.setDesc(
get(RISCV::ADDI));
4632 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4633 MI.getOperand(2).ChangeToImmediate(0);
4634 MI.setDesc(
get(RISCV::ADDI));
4642 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4643 MI.getOperand(2).ChangeToImmediate(0);
4644 MI.setDesc(
get(RISCV::ADDI));
4654 case RISCV::SLLI_UW:
4656 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4657 MI.getOperand(2).setImm(0);
4658 MI.setDesc(
get(RISCV::ADDI));
4666 if (
MI.getOperand(1).getReg() == RISCV::X0 &&
4667 MI.getOperand(2).getReg() == RISCV::X0) {
4668 MI.getOperand(2).ChangeToImmediate(0);
4669 MI.setDesc(
get(RISCV::ADDI));
4673 if (
MI.getOpcode() == RISCV::ADD_UW &&
4674 MI.getOperand(1).getReg() == RISCV::X0) {
4675 MI.removeOperand(1);
4677 MI.setDesc(
get(RISCV::ADDI));
4683 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4684 MI.getOperand(2).setImm(
MI.getOperand(2).getImm() != 0);
4685 MI.setDesc(
get(RISCV::ADDI));
4691 case RISCV::ZEXT_H_RV32:
4692 case RISCV::ZEXT_H_RV64:
4695 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4697 MI.setDesc(
get(RISCV::ADDI));
4706 if (
MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg()) {
4707 MI.getOperand(2).ChangeToImmediate(0);
4708 MI.setDesc(
get(RISCV::ADDI));
4715 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4717 MI.removeOperand(0);
4718 MI.insert(
MI.operands_begin() + 1, {MO0});
4723 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4725 MI.removeOperand(0);
4726 MI.insert(
MI.operands_begin() + 1, {MO0});
4727 MI.setDesc(
get(RISCV::BNE));
4732 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4734 MI.removeOperand(0);
4735 MI.insert(
MI.operands_begin() + 1, {MO0});
4736 MI.setDesc(
get(RISCV::BEQ));
4744#define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL) \
4745 RISCV::PseudoV##OP##_##LMUL##_TIED
4747#define CASE_WIDEOP_OPCODE_LMULS(OP) \
4748 CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \
4749 case CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \
4750 case CASE_WIDEOP_OPCODE_COMMON(OP, MF2): \
4751 case CASE_WIDEOP_OPCODE_COMMON(OP, M1): \
4752 case CASE_WIDEOP_OPCODE_COMMON(OP, M2): \
4753 case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
4755#define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL) \
4756 case RISCV::PseudoV##OP##_##LMUL##_TIED: \
4757 NewOpc = RISCV::PseudoV##OP##_##LMUL; \
4760#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
4761 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \
4762 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4) \
4763 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2) \
4764 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1) \
4765 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \
4766 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
4769#define CASE_FP_WIDEOP_OPCODE_COMMON(OP, LMUL, SEW) \
4770 RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED
4772#define CASE_FP_WIDEOP_OPCODE_LMULS(OP) \
4773 CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \
4774 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \
4775 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E32): \
4776 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \
4777 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E32): \
4778 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \
4779 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E32): \
4780 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16): \
4781 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E32) \
4783#define CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL, SEW) \
4784 case RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED: \
4785 NewOpc = RISCV::PseudoV##OP##_##LMUL##_##SEW; \
4788#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
4789 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \
4790 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \
4791 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E32) \
4792 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \
4793 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E32) \
4794 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \
4795 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E32) \
4796 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16) \
4797 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E32) \
4799#define CASE_FP_WIDEOP_OPCODE_LMULS_ALT(OP) \
4800 CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \
4801 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \
4802 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \
4803 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \
4804 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16)
4806#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_ALT(OP) \
4807 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \
4808 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \
4809 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \
4810 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \
4811 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16)
4818 switch (
MI.getOpcode()) {
4826 MI.getNumExplicitOperands() == 7 &&
4827 "Expect 7 explicit operands rd, rs2, rs1, rm, vl, sew, policy");
4834 switch (
MI.getOpcode()) {
4846 .
add(
MI.getOperand(0))
4848 .
add(
MI.getOperand(1))
4849 .
add(
MI.getOperand(2))
4850 .
add(
MI.getOperand(3))
4851 .
add(
MI.getOperand(4))
4852 .
add(
MI.getOperand(5))
4853 .
add(
MI.getOperand(6));
4862 MI.getNumExplicitOperands() == 6);
4869 switch (
MI.getOpcode()) {
4881 .
add(
MI.getOperand(0))
4883 .
add(
MI.getOperand(1))
4884 .
add(
MI.getOperand(2))
4885 .
add(
MI.getOperand(3))
4886 .
add(
MI.getOperand(4))
4887 .
add(
MI.getOperand(5));
4894 unsigned NumOps =
MI.getNumOperands();
4897 if (
Op.isReg() &&
Op.isKill())
4905 if (
MI.getOperand(0).isEarlyClobber()) {
4919#undef CASE_WIDEOP_OPCODE_COMMON
4920#undef CASE_WIDEOP_OPCODE_LMULS
4921#undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
4922#undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
4923#undef CASE_FP_WIDEOP_OPCODE_COMMON
4924#undef CASE_FP_WIDEOP_OPCODE_LMULS
4925#undef CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON
4926#undef CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS
4935 if (ShiftAmount == 0)
4941 }
else if (
int ShXAmount, ShiftAmount;
4943 (ShXAmount =
isShifted359(Amount, ShiftAmount)) != 0) {
4946 switch (ShXAmount) {
4948 Opc = RISCV::SH1ADD;
4951 Opc = RISCV::SH2ADD;
4954 Opc = RISCV::SH3ADD;
4990 }
else if (
STI.hasStdExtZmmul()) {
5000 for (
uint32_t ShiftAmount = 0; Amount >> ShiftAmount; ShiftAmount++) {
5001 if (Amount & (1U << ShiftAmount)) {
5005 .
addImm(ShiftAmount - PrevShiftAmount)
5007 if (Amount >> (ShiftAmount + 1)) {
5021 PrevShiftAmount = ShiftAmount;
5024 assert(Acc &&
"Expected valid accumulator");
5034 static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] =
5042 ?
STI.getTailDupAggressiveThreshold()
5049 unsigned Opcode =
MI.getOpcode();
5050 if (!RISCVVPseudosTable::getPseudoInfo(Opcode) &&
5059 return MI.isCopy() &&
MI.getOperand(0).getReg().isPhysical() &&
5061 TRI->getMinimalPhysRegClass(
MI.getOperand(0).getReg()));
5064std::optional<std::pair<unsigned, unsigned>>
5068 return std::nullopt;
5069 case RISCV::PseudoVSPILL2_M1:
5070 case RISCV::PseudoVRELOAD2_M1:
5071 return std::make_pair(2u, 1u);
5072 case RISCV::PseudoVSPILL2_M2:
5073 case RISCV::PseudoVRELOAD2_M2:
5074 return std::make_pair(2u, 2u);
5075 case RISCV::PseudoVSPILL2_M4:
5076 case RISCV::PseudoVRELOAD2_M4:
5077 return std::make_pair(2u, 4u);
5078 case RISCV::PseudoVSPILL3_M1:
5079 case RISCV::PseudoVRELOAD3_M1:
5080 return std::make_pair(3u, 1u);
5081 case RISCV::PseudoVSPILL3_M2:
5082 case RISCV::PseudoVRELOAD3_M2:
5083 return std::make_pair(3u, 2u);
5084 case RISCV::PseudoVSPILL4_M1:
5085 case RISCV::PseudoVRELOAD4_M1:
5086 return std::make_pair(4u, 1u);
5087 case RISCV::PseudoVSPILL4_M2:
5088 case RISCV::PseudoVRELOAD4_M2:
5089 return std::make_pair(4u, 2u);
5090 case RISCV::PseudoVSPILL5_M1:
5091 case RISCV::PseudoVRELOAD5_M1:
5092 return std::make_pair(5u, 1u);
5093 case RISCV::PseudoVSPILL6_M1:
5094 case RISCV::PseudoVRELOAD6_M1:
5095 return std::make_pair(6u, 1u);
5096 case RISCV::PseudoVSPILL7_M1:
5097 case RISCV::PseudoVRELOAD7_M1:
5098 return std::make_pair(7u, 1u);
5099 case RISCV::PseudoVSPILL8_M1:
5100 case RISCV::PseudoVRELOAD8_M1:
5101 return std::make_pair(8u, 1u);
5106 int16_t MI1FrmOpIdx =
5107 RISCV::getNamedOperandIdx(MI1.
getOpcode(), RISCV::OpName::frm);
5108 int16_t MI2FrmOpIdx =
5109 RISCV::getNamedOperandIdx(MI2.
getOpcode(), RISCV::OpName::frm);
5110 if (MI1FrmOpIdx < 0 || MI2FrmOpIdx < 0)
5117std::optional<unsigned>
5121 return std::nullopt;
5124 case RISCV::VSLL_VX:
5125 case RISCV::VSRL_VX:
5126 case RISCV::VSRA_VX:
5128 case RISCV::VSSRL_VX:
5129 case RISCV::VSSRA_VX:
5131 case RISCV::VROL_VX:
5132 case RISCV::VROR_VX:
5137 case RISCV::VNSRL_WX:
5138 case RISCV::VNSRA_WX:
5140 case RISCV::VNCLIPU_WX:
5141 case RISCV::VNCLIP_WX:
5143 case RISCV::VWSLL_VX:
5148 case RISCV::VADD_VX:
5149 case RISCV::VSUB_VX:
5150 case RISCV::VRSUB_VX:
5152 case RISCV::VWADDU_VX:
5153 case RISCV::VWSUBU_VX:
5154 case RISCV::VWADD_VX:
5155 case RISCV::VWSUB_VX:
5156 case RISCV::VWADDU_WX:
5157 case RISCV::VWSUBU_WX:
5158 case RISCV::VWADD_WX:
5159 case RISCV::VWSUB_WX:
5161 case RISCV::VADC_VXM:
5162 case RISCV::VADC_VIM:
5163 case RISCV::VMADC_VXM:
5164 case RISCV::VMADC_VIM:
5165 case RISCV::VMADC_VX:
5166 case RISCV::VSBC_VXM:
5167 case RISCV::VMSBC_VXM:
5168 case RISCV::VMSBC_VX:
5170 case RISCV::VAND_VX:
5172 case RISCV::VXOR_VX:
5174 case RISCV::VMSEQ_VX:
5175 case RISCV::VMSNE_VX:
5176 case RISCV::VMSLTU_VX:
5177 case RISCV::VMSLT_VX:
5178 case RISCV::VMSLEU_VX:
5179 case RISCV::VMSLE_VX:
5180 case RISCV::VMSGTU_VX:
5181 case RISCV::VMSGT_VX:
5183 case RISCV::VMINU_VX:
5184 case RISCV::VMIN_VX:
5185 case RISCV::VMAXU_VX:
5186 case RISCV::VMAX_VX:
5188 case RISCV::VMUL_VX:
5189 case RISCV::VMULH_VX:
5190 case RISCV::VMULHU_VX:
5191 case RISCV::VMULHSU_VX:
5193 case RISCV::VDIVU_VX:
5194 case RISCV::VDIV_VX:
5195 case RISCV::VREMU_VX:
5196 case RISCV::VREM_VX:
5198 case RISCV::VWMUL_VX:
5199 case RISCV::VWMULU_VX:
5200 case RISCV::VWMULSU_VX:
5202 case RISCV::VMACC_VX:
5203 case RISCV::VNMSAC_VX:
5204 case RISCV::VMADD_VX:
5205 case RISCV::VNMSUB_VX:
5207 case RISCV::VWMACCU_VX:
5208 case RISCV::VWMACC_VX:
5209 case RISCV::VWMACCSU_VX:
5210 case RISCV::VWMACCUS_VX:
5212 case RISCV::VMERGE_VXM:
5214 case RISCV::VMV_V_X:
5216 case RISCV::VSADDU_VX:
5217 case RISCV::VSADD_VX:
5218 case RISCV::VSSUBU_VX:
5219 case RISCV::VSSUB_VX:
5221 case RISCV::VAADDU_VX:
5222 case RISCV::VAADD_VX:
5223 case RISCV::VASUBU_VX:
5224 case RISCV::VASUB_VX:
5226 case RISCV::VSMUL_VX:
5228 case RISCV::VMV_S_X:
5230 case RISCV::VANDN_VX:
5231 return 1U << Log2SEW;
5237 RISCVVPseudosTable::getPseudoInfo(RVVPseudoOpcode);
5240 return RVV->BaseInstr;
5250 unsigned Scaled = Log2SEW + (DestEEW - 1);
5264 return std::nullopt;
5269 assert((LHS.isImm() || LHS.getParent()->getMF()->getRegInfo().isSSA()) &&
5270 (RHS.isImm() || RHS.getParent()->getMF()->getRegInfo().isSSA()));
5271 if (LHS.isReg() && RHS.isReg() && LHS.getReg().isVirtual() &&
5272 LHS.getReg() == RHS.getReg())
5276 if (LHS.isImm() && LHS.getImm() == 0)
5282 if (!LHSImm || !RHSImm)
5284 return LHSImm <= RHSImm;
5296 : LHS(LHS), RHS(RHS),
Cond(
Cond.begin(),
Cond.end()) {}
5298 bool shouldIgnoreForPipelining(
const MachineInstr *
MI)
const override {
5308 std::optional<bool> createTripCountGreaterCondition(
5309 int TC, MachineBasicBlock &
MBB,
5310 SmallVectorImpl<MachineOperand> &CondParam)
override {
5318 void setPreheader(MachineBasicBlock *NewPreheader)
override {}
5320 void adjustTripCount(
int TripCountAdjust)
override {}
5324std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
5332 if (
TBB == LoopBB && FBB == LoopBB)
5339 assert((
TBB == LoopBB || FBB == LoopBB) &&
5340 "The Loop must be a single-basic-block loop");
5351 if (!Reg.isVirtual())
5358 if (LHS && LHS->isPHI())
5360 if (RHS && RHS->isPHI())
5363 return std::make_unique<RISCVPipelinerLoopInfo>(LHS, RHS,
Cond);
5369 Opc = RVVMCOpcode ? RVVMCOpcode :
Opc;
5386 case RISCV::FDIV_H_INX:
5387 case RISCV::FDIV_S_INX:
5388 case RISCV::FDIV_D_INX:
5389 case RISCV::FDIV_D_IN32X:
5390 case RISCV::FSQRT_H:
5391 case RISCV::FSQRT_S:
5392 case RISCV::FSQRT_D:
5393 case RISCV::FSQRT_H_INX:
5394 case RISCV::FSQRT_S_INX:
5395 case RISCV::FSQRT_D_INX:
5396 case RISCV::FSQRT_D_IN32X:
5398 case RISCV::VDIV_VV:
5399 case RISCV::VDIV_VX:
5400 case RISCV::VDIVU_VV:
5401 case RISCV::VDIVU_VX:
5402 case RISCV::VREM_VV:
5403 case RISCV::VREM_VX:
5404 case RISCV::VREMU_VV:
5405 case RISCV::VREMU_VX:
5407 case RISCV::VFDIV_VV:
5408 case RISCV::VFDIV_VF:
5409 case RISCV::VFRDIV_VF:
5410 case RISCV::VFSQRT_V:
5411 case RISCV::VFRSQRT7_V:
5417 if (
MI->getOpcode() != TargetOpcode::COPY)
5422 Register DstReg =
MI->getOperand(0).getReg();
5425 :
TRI->getMinimalPhysRegClass(DstReg);
5435 auto [RCLMul, RCFractional] =
5437 return (!RCFractional && LMul == RCLMul) || (RCFractional && LMul == 1);
5441 if (
MI.memoperands_empty())
5456 if (MO.getReg().isPhysical())
5459 if (MO.getReg().isPhysical())
5461 bool SawStore =
false;
5464 if (
II->definesRegister(PhysReg,
nullptr))
5467 if (
II->definesRegister(PhysReg,
nullptr) ||
5468 II->readsRegister(PhysReg,
nullptr))
5470 if (
II->mayStore()) {
MachineInstrBuilder MachineInstrBuilder & DefMI
static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg, unsigned NumRegs)
static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target, SmallVectorImpl< MachineOperand > &Cond)
@ MachineOutlinerTailCall
Emit a save, restore, call, and return.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
SmallVector< int16_t, MAX_SRC_OPERANDS_NUM > OperandIndices
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
const HexagonInstrInfo * TII
Module.h This file contains the declarations for the Module class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Register const TargetRegisterInfo * TRI
Promote Memory to Register
This file provides utility analysis objects describing memory locations.
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static bool cannotInsertTailCall(const MachineBasicBlock &MBB)
#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP)
#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_ALT(OP)
#define CASE_FP_WIDEOP_OPCODE_LMULS(OP)
#define CASE_OPERAND_SIMM(NUM)
static std::optional< unsigned > getLMULForRVVWholeLoadStore(unsigned Opcode)
#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP)
static unsigned getFPFusedMultiplyOpcode(unsigned RootOpc, unsigned Pattern)
std::optional< unsigned > getFoldedOpcode(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, const RISCVSubtarget &ST)
#define RVV_OPC_LMUL_CASE(OPC, INV)
#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static void combineFPFusedMultiply(MachineInstr &Root, MachineInstr &Prev, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs)
static unsigned getAddendOperandIdx(unsigned Pattern)
#define CASE_RVV_OPCODE_UNMASK(OP)
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static cl::opt< bool > PreferWholeRegisterMove("riscv-prefer-whole-register-move", cl::init(false), cl::Hidden, cl::desc("Prefer whole register move for vector registers."))
#define CASE_VFMA_SPLATS(OP)
unsigned getPredicatedOpcode(unsigned Opcode)
#define CASE_FP_WIDEOP_OPCODE_LMULS_ALT(OP)
#define CASE_WIDEOP_OPCODE_LMULS(OP)
static bool isMIReadsReg(const MachineInstr &MI, const TargetRegisterInfo *TRI, MCRegister RegNo)
#define OPCODE_LMUL_MASK_CASE(OPC)
#define CASE_OPERAND_UIMM_LSB_ZEROS(BITS, SUFFIX)
static bool isFSUB(unsigned Opc)
#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE)
#define CASE_RVV_OPCODE(OP)
static std::optional< int64_t > getEffectiveImm(const MachineOperand &MO)
#define CASE_VFMA_OPCODE_VV(OP)
MachineOutlinerConstructionID
#define CASE_RVV_OPCODE_WIDEN(OP)
static unsigned getLoadPredicatedOpcode(unsigned Opcode)
static unsigned getSHXADDUWShiftAmount(unsigned Opc)
#define CASE_VMA_OPCODE_LMULS(OP, TYPE)
static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI, const MachineBasicBlock &MBB, MachineBasicBlock::const_iterator MBBI, MachineBasicBlock::const_iterator &DefMBBI, RISCVVType::VLMUL LMul)
static bool isFMUL(unsigned Opc)
static unsigned getInverseXqcicmOpcode(unsigned Opcode)
static bool getFPPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
#define OPCODE_LMUL_CASE(OPC)
#define CASE_OPERAND_UIMM(NUM)
static bool canCombineShiftIntoShXAdd(const MachineBasicBlock &MBB, const MachineOperand &MO, unsigned OuterShiftAmt)
Utility routine that checks if.
static bool isCandidatePatchable(const MachineBasicBlock &MBB)
static bool isFADD(unsigned Opc)
static void genShXAddAddShift(MachineInstr &Root, unsigned AddOpIdx, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg)
static bool isLoadImm(const MachineInstr *MI, int64_t &Imm)
static bool isMIModifiesReg(const MachineInstr &MI, const TargetRegisterInfo *TRI, MCRegister RegNo)
static bool canCombineFPFusedMultiply(const MachineInstr &Root, const MachineOperand &MO, bool DoRegPressureReduce)
static bool getSHXADDPatterns(const MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns)
static bool getFPFusedMultiplyPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
static cl::opt< MachineTraceStrategy > ForceMachineCombinerStrategy("riscv-force-machine-combiner-strategy", cl::Hidden, cl::desc("Force machine combiner to use a specific strategy for machine " "trace metrics evaluation."), cl::init(MachineTraceStrategy::TS_NumStrategies), cl::values(clEnumValN(MachineTraceStrategy::TS_Local, "local", "Local strategy."), clEnumValN(MachineTraceStrategy::TS_MinInstrCount, "min-instr", "MinInstrCount strategy.")))
static unsigned getSHXADDShiftAmount(unsigned Opc)
#define CASE_RVV_OPCODE_MASK(OP)
#define RVV_OPC_LMUL_MASK_CASE(OPC, INV)
static MachineInstr * canFoldAsPredicatedOp(Register Reg, const MachineRegisterInfo &MRI, const TargetInstrInfo *TII, const RISCVSubtarget &STI)
Identify instructions that can be folded into a CCMOV instruction, and return the defining instructio...
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
This file declares the machine register scavenger class.
static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, ArrayRef< const MachineOperand * > BaseOps1, const MachineInstr &MI2, ArrayRef< const MachineOperand * > BaseOps2)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO, unsigned CombineOpc=0)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & front() const
front - Get the first element.
bool empty() const
empty - Check if the array is empty.
static LLVM_ABI DILocation * getMergedLocation(DILocation *LocA, DILocation *LocB)
Attempts to merge LocA and LocB into a single location; see DebugLoc::getMergedLocation for more deta...
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
LiveInterval - This class represents the liveness of a register, or stack slot.
LiveInterval & getInterval(Register Reg)
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
LLVM_ABI void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
static LocationSize precise(uint64_t Value)
TypeSize getValue() const
MCInstBuilder & addReg(MCRegister Reg)
Add a new register operand.
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Instances of this class represent a single low-level machine instruction.
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
This holds information about one operand of a machine instruction, indicating the register class for ...
Wrapper class representing physical registers. Should be passed by value.
const FeatureBitset & getFeatureBits() const
MachineInstrBundleIterator< const MachineInstr > const_iterator
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
Instructions::const_iterator const_instr_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineInstrBundleIterator< MachineInstr > iterator
MachineInstrBundleIterator< const MachineInstr, true > const_reverse_iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setStackID(int ObjectIdx, uint8_t ID)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addUse(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
reverse_iterator getReverse() const
Get a reverse iterator to the same node.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isReturn(QueryType Type=AnyInBundle) const
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
filtered_mop_range all_defs()
Returns an iterator range over all operands that are (explicit or implicit) register defs.
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
LLVM_ABI bool isSafeToMove(bool &SawStore) const
Return true if it is safe to move this instruction.
LLVM_ABI unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool modifiesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr modifies (fully define or partially define) the specified register.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
LLVM_ABI bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
LLVM_ABI bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
filtered_mop_range all_uses()
Returns an iterator range over all operands that are (explicit or implicit) register uses.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
LLVM_ABI void clearKillInfo()
Clears kill flags on all operands.
A description of a memory reference used in the backend.
bool isNonTemporal() const
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
static MachineOperand CreateImm(int64_t Val)
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
LLVM_ABI bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
@ MO_Immediate
Immediate operand.
@ MO_Register
Register operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI bool hasOneNonDBGUse(Register RegNo) const
hasOneNonDBGUse - Return true if there is exactly one non-Debug use of the specified register.
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
LLVM_ABI void clearKillFlags(Register Reg) const
clearKillFlags - Iterate over all the uses of the given register and clear the kill flag from the Mac...
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool hasOneUse(Register RegNo) const
hasOneUse - Return true if there is exactly one instruction using the specified register.
LLVM_ABI void clearVirtRegs()
clearVirtRegs - Remove all virtual registers (after physreg assignment).
const TargetRegisterInfo * getTargetRegisterInfo() const
LLVM_ABI bool isConstantPhysReg(MCRegister PhysReg) const
Returns true if PhysReg is unallocatable and constant throughout the function.
LLVM_ABI const TargetRegisterClass * constrainRegClass(Register Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
LLVM_ABI void replaceRegWith(Register FromReg, Register ToReg)
replaceRegWith - Replace all instances of FromReg with ToReg in the machine function.
LLVM_ABI MachineInstr * getUniqueVRegDef(Register Reg) const
getUniqueVRegDef - Return the unique machine instr that defines the specified virtual register or nul...
A Module instance is used to store all the information related to an LLVM module.
MI-level patchpoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given patchpoint should emit.
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool IsKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
std::optional< std::unique_ptr< outliner::OutlinedFunction > > getOutliningCandidateInfo(const MachineModuleInfo &MMI, std::vector< outliner::Candidate > &RepeatedSequenceLocs, unsigned MinRepeats) const override
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg) const override
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags, bool DstRenamable=false, bool DstIsDead=false) const
MachineInstr * emitLdStWithAddr(MachineInstr &MemI, const ExtAddrMode &AM) const override
void mulImm(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, uint32_t Amt, MachineInstr::MIFlag Flag) const
Generate code to multiply the value in DestReg by Amt - handles all the common optimizations for this...
static bool isPairableLdStInstOpc(unsigned Opc)
Return true if pairing the given load or store may be paired with another.
RISCVInstrInfo(const RISCVSubtarget &STI)
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DstReg, int FrameIndex, const TargetRegisterClass *RC, Register VReg, unsigned SubReg=0, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &dl, int *BytesAdded=nullptr) const override
bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const override
static bool isLdStSafeToPair(const MachineInstr &LdSt, const TargetRegisterInfo *TRI)
void copyPhysRegVector(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc, const TargetRegisterClass *RegClass) const
bool isReMaterializableImpl(const MachineInstr &MI) const override
MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &SeenMIs, bool) const override
bool isVRegCopy(const MachineInstr *MI, unsigned LMul=0) const
Return true if MI is a COPY to a vector register of a specific LMul, or any kind of vector registers ...
bool canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg, const MachineInstr &AddrI, ExtAddrMode &AM) const override
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
bool isAsCheapAsAMove(const MachineInstr &MI) const override
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, LocationSize &Width, const TargetRegisterInfo *TRI) const
unsigned getTailDuplicateSize(CodeGenOptLevel OptLevel) const override
void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const override
const RISCVSubtarget & STI
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
static bool isSafeToMove(const MachineInstr &From, const MachineInstr &To)
Return true if moving From down to To won't cause any physical register reads or writes to be clobber...
std::optional< unsigned > getInverseOpcode(unsigned Opcode) const override
bool simplifyInstruction(MachineInstr &MI) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MBBI, unsigned Flags) const override
MachineTraceStrategy getMachineCombinerTraceStrategy() const override
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const override
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags() const override
MCInst getNop() const override
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
bool analyzeCandidate(outliner::Candidate &C) const
bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
bool requiresNTLHint(const MachineInstr &MI) const
Return true if the instruction requires an NTL hint to be emitted.
void finalizeInsInstrs(MachineInstr &Root, unsigned &Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs) const override
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const override
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
static RISCVCC::CondCode getCondFromBranchOpc(unsigned Opc)
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
CombinerObjective getCombinerObjective(unsigned Pattern) const override
bool isHighLatencyDef(int Opc) const override
static bool evaluateCondBranch(RISCVCC::CondCode CC, int64_t C0, int64_t C1)
Return the result of the evaluation of C0 CC C1, where CC is a RISCVCC::CondCode.
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const override
bool optimizeCondBranch(MachineInstr &MI) const override
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
static bool isFromLoadImm(const MachineRegisterInfo &MRI, const MachineOperand &Op, int64_t &Imm)
Return true if the operand is a load immediate instruction and sets Imm to the immediate value.
bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const override
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
int getBranchRelaxationScratchFrameIndex() const
const RISCVRegisterInfo * getRegisterInfo() const override
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
SlotIndex - An opaque wrapper around machine indexes.
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
MI-level stackmap operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given stackmap should emit.
MI-level Statepoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given statepoint should emit.
StringRef - Represent a constant reference to a string, i.e.
Object returned by analyzeLoopForPipelining.
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when \P Inst has reassociable operands in the same \P MBB.
virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool isReMaterializableImpl(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
virtual void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const
The returned array encodes the operand index for each parameter because the operands may be commuted;...
virtual CombinerObjective getCombinerObjective(unsigned Pattern) const
Return the objective of a combiner pattern.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when \P Inst has reassociable sibling.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
const uint8_t TSFlags
Configurable target specific flags.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Target - Wrapper for Target specific information.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
static constexpr TypeSize getZero()
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
self_iterator getIterator()
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
CondCode getInverseBranchCondition(CondCode)
unsigned getInverseBranchOpcode(unsigned BCC)
unsigned getBrCond(CondCode CC, unsigned SelectOpc=0)
static bool isValidRoundingMode(unsigned Mode)
static StringRef roundingModeToString(RoundingMode RndMode)
static unsigned getVecPolicyOpNum(const MCInstrDesc &Desc)
static bool usesMaskPolicy(uint64_t TSFlags)
static bool hasRoundModeOp(uint64_t TSFlags)
static unsigned getVLOpNum(const MCInstrDesc &Desc)
static bool hasVLOp(uint64_t TSFlags)
static MCRegister getTailExpandUseRegNo(const FeatureBitset &FeatureBits)
static int getFRMOpNum(const MCInstrDesc &Desc)
static int getVXRMOpNum(const MCInstrDesc &Desc)
static bool hasVecPolicyOp(uint64_t TSFlags)
static bool usesVXRM(uint64_t TSFlags)
static bool isRVVWideningReduction(uint64_t TSFlags)
static unsigned getSEWOpNum(const MCInstrDesc &Desc)
static bool hasSEWOp(uint64_t TSFlags)
static bool isFirstDefTiedToFirstUse(const MCInstrDesc &Desc)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
SmallVector< Inst, 8 > InstSeq
@ OPERAND_UIMMLOG2XLEN_NONZERO
@ OPERAND_SIMM12_LSB00000
@ OPERAND_FIRST_RISCV_IMM
@ OPERAND_UIMM10_LSB00_NONZERO
@ OPERAND_SIMM10_LSB0000_NONZERO
@ OPERAND_ATOMIC_ORDERING
static unsigned getNF(uint8_t TSFlags)
static RISCVVType::VLMUL getLMul(uint8_t TSFlags)
static bool isTailAgnostic(unsigned VType)
LLVM_ABI void printXSfmmVType(unsigned VType, raw_ostream &OS)
LLVM_ABI std::pair< unsigned, bool > decodeVLMUL(VLMUL VLMul)
static bool isValidSEW(unsigned SEW)
LLVM_ABI void printVType(unsigned VType, raw_ostream &OS)
static bool isValidXSfmmVType(unsigned VTypeI)
static unsigned getSEW(unsigned VType)
static VLMUL getVLMUL(unsigned VType)
static bool isValidRoundingMode(unsigned Mode)
static StringRef roundingModeToString(RoundingMode RndMode)
bool hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2)
bool isVLKnownLE(const MachineOperand &LHS, const MachineOperand &RHS)
Given two VL operands, do we know that LHS <= RHS?
unsigned getRVVMCOpcode(unsigned RVVPseudoOpcode)
unsigned getDestLog2EEW(const MCInstrDesc &Desc, unsigned Log2SEW)
std::optional< unsigned > getVectorLowDemandedScalarBits(unsigned Opcode, unsigned Log2SEW)
std::optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
static constexpr unsigned RVVBitsPerBlock
bool isRVVSpill(const MachineInstr &MI)
static constexpr unsigned RVVBytesPerBlock
static constexpr int64_t VLMaxSentinel
bool isVectorCopy(const TargetRegisterInfo *TRI, const MachineInstr &MI)
Return true if MI is a copy that will be lowered to one or more vmvNr.vs.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
MachineTraceStrategy
Strategies for selecting traces.
@ TS_MinInstrCount
Select the trace through a block that has the fewest instructions.
@ TS_Local
Select the trace that contains only the current basic block.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
static const MachineMemOperand::Flags MONontemporalBit1
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
RegState
Flags to represent properties of register accesses.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
@ Define
Register definition.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
bool isValidAtomicOrdering(Int I)
constexpr RegState getKillRegState(bool B)
static const MachineMemOperand::Flags MONontemporalBit0
constexpr RegState getDeadRegState(bool B)
constexpr bool has_single_bit(T Value) noexcept
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
constexpr RegState getRenamableRegState(bool B)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
constexpr RegState getDefRegState(bool B)
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
CodeGenOptLevel
Code generation optimization level.
int isShifted359(T Value, int &Shift)
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr bool isShiftedInt(int64_t x)
Checks if a signed integer is an N bit number shifted left by S.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
constexpr bool isShiftedUInt(uint64_t x)
Checks if a unsigned integer is an N bit number shifted left by S.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
This represents a simple continuous liveness interval for a value.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
static bool isRVVRegClass(const TargetRegisterClass *RC)
Used to describe a register and immediate addition.
An individual sequence of instructions to be replaced with a call to an outlined function.
MachineFunction * getMF() const
The information necessary to create an outlined function for some class of candidate.