LLVM 23.0.0git
SIInstrInfo.cpp
Go to the documentation of this file.
1//===- SIInstrInfo.cpp - SI Instruction Information ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// SI Implementation of TargetInstrInfo.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SIInstrInfo.h"
15#include "AMDGPU.h"
16#include "AMDGPUInstrInfo.h"
17#include "AMDGPULaneMaskUtils.h"
18#include "GCNHazardRecognizer.h"
19#include "GCNSubtarget.h"
22#include "llvm/ADT/STLExtras.h"
33#include "llvm/IR/IntrinsicsAMDGPU.h"
34#include "llvm/MC/MCContext.h"
37
38using namespace llvm;
39
40#define DEBUG_TYPE "si-instr-info"
41
42#define GET_INSTRINFO_CTOR_DTOR
43#include "AMDGPUGenInstrInfo.inc"
44
45namespace llvm::AMDGPU {
46#define GET_D16ImageDimIntrinsics_IMPL
47#define GET_ImageDimIntrinsicTable_IMPL
48#define GET_RsrcIntrinsics_IMPL
49#include "AMDGPUGenSearchableTables.inc"
50} // namespace llvm::AMDGPU
51
52// Must be at least 4 to be able to branch over minimum unconditional branch
53// code. This is only for making it possible to write reasonably small tests for
54// long branches.
56BranchOffsetBits("amdgpu-s-branch-bits", cl::ReallyHidden, cl::init(16),
57 cl::desc("Restrict range of branch instructions (DEBUG)"));
58
60 "amdgpu-fix-16-bit-physreg-copies",
61 cl::desc("Fix copies between 32 and 16 bit registers by extending to 32 bit"),
62 cl::init(true),
64
66 : AMDGPUGenInstrInfo(ST, RI, AMDGPU::ADJCALLSTACKUP,
67 AMDGPU::ADJCALLSTACKDOWN),
68 RI(ST), ST(ST) {
69 SchedModel.init(&ST);
70}
71
72//===----------------------------------------------------------------------===//
73// TargetInstrInfo callbacks
74//===----------------------------------------------------------------------===//
75
76static unsigned getNumOperandsNoGlue(SDNode *Node) {
77 unsigned N = Node->getNumOperands();
78 while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue)
79 --N;
80 return N;
81}
82
83/// Returns true if both nodes have the same value for the given
84/// operand \p Op, or if both nodes do not have this operand.
86 AMDGPU::OpName OpName) {
87 unsigned Opc0 = N0->getMachineOpcode();
88 unsigned Opc1 = N1->getMachineOpcode();
89
90 int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName);
91 int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName);
92
93 if (Op0Idx == -1 && Op1Idx == -1)
94 return true;
95
96
97 if ((Op0Idx == -1 && Op1Idx != -1) ||
98 (Op1Idx == -1 && Op0Idx != -1))
99 return false;
100
101 // getNamedOperandIdx returns the index for the MachineInstr's operands,
102 // which includes the result as the first operand. We are indexing into the
103 // MachineSDNode's operands, so we need to skip the result operand to get
104 // the real index.
105 --Op0Idx;
106 --Op1Idx;
107
108 return N0->getOperand(Op0Idx) == N1->getOperand(Op1Idx);
109}
110
111static bool canRemat(const MachineInstr &MI) {
112
116 return true;
117
118 if (SIInstrInfo::isSMRD(MI)) {
119 return !MI.memoperands_empty() &&
120 llvm::all_of(MI.memoperands(), [](const MachineMemOperand *MMO) {
121 return MMO->isLoad() && MMO->isInvariant();
122 });
123 }
124
125 return false;
126}
127
129 const MachineInstr &MI) const {
130
131 if (canRemat(MI)) {
132 // Normally VALU use of exec would block the rematerialization, but that
133 // is OK in this case to have an implicit exec read as all VALU do.
134 // We really want all of the generic logic for this except for this.
135
136 // Another potential implicit use is mode register. The core logic of
137 // the RA will not attempt rematerialization if mode is set anywhere
138 // in the function, otherwise it is safe since mode is not changed.
139
140 // There is difference to generic method which does not allow
141 // rematerialization if there are virtual register uses. We allow this,
142 // therefore this method includes SOP instructions as well.
143 if (!MI.hasImplicitDef() &&
144 MI.getNumImplicitOperands() == MI.getDesc().implicit_uses().size() &&
145 !MI.mayRaiseFPException())
146 return true;
147 }
148
150}
151
152// Returns true if the scalar result of a VALU instruction depends on exec.
153bool SIInstrInfo::resultDependsOnExec(const MachineInstr &MI) const {
154 // Ignore comparisons which are only used masked with exec.
155 // This allows some hoisting/sinking of VALU comparisons.
156 if (MI.isCompare()) {
157 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::sdst);
158 if (!Dst)
159 return true;
160
161 Register DstReg = Dst->getReg();
162 if (!DstReg.isVirtual())
163 return true;
164
165 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
166 for (MachineInstr &Use : MRI.use_nodbg_instructions(DstReg)) {
167 switch (Use.getOpcode()) {
168 case AMDGPU::S_AND_SAVEEXEC_B32:
169 case AMDGPU::S_AND_SAVEEXEC_B64:
170 break;
171 case AMDGPU::S_AND_B32:
172 case AMDGPU::S_AND_B64:
173 if (!Use.readsRegister(AMDGPU::EXEC, /*TRI=*/nullptr))
174 return true;
175 break;
176 default:
177 return true;
178 }
179 }
180 return false;
181 }
182
183 // If it is not convergent it does not depend on EXEC.
184 if (!MI.isConvergent())
185 return false;
186
187 switch (MI.getOpcode()) {
188 default:
189 break;
190 case AMDGPU::V_READFIRSTLANE_B32:
191 return true;
192 }
193
194 return false;
195}
196
198 // Any implicit use of exec by VALU is not a real register read.
199 return MO.getReg() == AMDGPU::EXEC && MO.isImplicit() &&
200 isVALU(*MO.getParent()) && !resultDependsOnExec(*MO.getParent());
201}
202
204 MachineBasicBlock *SuccToSinkTo,
205 MachineCycleInfo *CI) const {
206 // Allow sinking if MI edits lane mask (divergent i1 in sgpr).
207 if (MI.getOpcode() == AMDGPU::SI_IF_BREAK)
208 return true;
209
210 MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
211 // Check if sinking of MI would create temporal divergent use.
212 for (auto Op : MI.uses()) {
213 if (Op.isReg() && Op.getReg().isVirtual() &&
214 RI.isSGPRClass(MRI.getRegClass(Op.getReg()))) {
215 MachineInstr *SgprDef = MRI.getVRegDef(Op.getReg());
216
217 // SgprDef defined inside cycle
218 MachineCycle *FromCycle = CI->getCycle(SgprDef->getParent());
219 if (FromCycle == nullptr)
220 continue;
221
222 MachineCycle *ToCycle = CI->getCycle(SuccToSinkTo);
223 // Check if there is a FromCycle that contains SgprDef's basic block but
224 // does not contain SuccToSinkTo and also has divergent exit condition.
225 while (FromCycle && !FromCycle->contains(ToCycle)) {
227 FromCycle->getExitingBlocks(ExitingBlocks);
228
229 // FromCycle has divergent exit condition.
230 for (MachineBasicBlock *ExitingBlock : ExitingBlocks) {
231 if (hasDivergentBranch(ExitingBlock))
232 return false;
233 }
234
235 FromCycle = FromCycle->getParentCycle();
236 }
237 }
238 }
239
240 return true;
241}
242
244 int64_t &Offset0,
245 int64_t &Offset1) const {
246 if (!Load0->isMachineOpcode() || !Load1->isMachineOpcode())
247 return false;
248
249 unsigned Opc0 = Load0->getMachineOpcode();
250 unsigned Opc1 = Load1->getMachineOpcode();
251
252 // Make sure both are actually loads.
253 if (!get(Opc0).mayLoad() || !get(Opc1).mayLoad())
254 return false;
255
256 // A mayLoad instruction without a def is not a load. Likely a prefetch.
257 if (!get(Opc0).getNumDefs() || !get(Opc1).getNumDefs())
258 return false;
259
260 if (isDS(Opc0) && isDS(Opc1)) {
261
262 // FIXME: Handle this case:
263 if (getNumOperandsNoGlue(Load0) != getNumOperandsNoGlue(Load1))
264 return false;
265
266 // Check base reg.
267 if (Load0->getOperand(0) != Load1->getOperand(0))
268 return false;
269
270 // Skip read2 / write2 variants for simplicity.
271 // TODO: We should report true if the used offsets are adjacent (excluded
272 // st64 versions).
273 int Offset0Idx = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset);
274 int Offset1Idx = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset);
275 if (Offset0Idx == -1 || Offset1Idx == -1)
276 return false;
277
278 // XXX - be careful of dataless loads
279 // getNamedOperandIdx returns the index for MachineInstrs. Since they
280 // include the output in the operand list, but SDNodes don't, we need to
281 // subtract the index by one.
282 Offset0Idx -= get(Opc0).NumDefs;
283 Offset1Idx -= get(Opc1).NumDefs;
284 Offset0 = Load0->getConstantOperandVal(Offset0Idx);
285 Offset1 = Load1->getConstantOperandVal(Offset1Idx);
286 return true;
287 }
288
289 if (isSMRD(Opc0) && isSMRD(Opc1)) {
290 // Skip time and cache invalidation instructions.
291 if (!AMDGPU::hasNamedOperand(Opc0, AMDGPU::OpName::sbase) ||
292 !AMDGPU::hasNamedOperand(Opc1, AMDGPU::OpName::sbase))
293 return false;
294
295 unsigned NumOps = getNumOperandsNoGlue(Load0);
296 if (NumOps != getNumOperandsNoGlue(Load1))
297 return false;
298
299 // Check base reg.
300 if (Load0->getOperand(0) != Load1->getOperand(0))
301 return false;
302
303 // Match register offsets, if both register and immediate offsets present.
304 assert(NumOps == 4 || NumOps == 5);
305 if (NumOps == 5 && Load0->getOperand(1) != Load1->getOperand(1))
306 return false;
307
308 const ConstantSDNode *Load0Offset =
310 const ConstantSDNode *Load1Offset =
312
313 if (!Load0Offset || !Load1Offset)
314 return false;
315
316 Offset0 = Load0Offset->getZExtValue();
317 Offset1 = Load1Offset->getZExtValue();
318 return true;
319 }
320
321 // MUBUF and MTBUF can access the same addresses.
322 if ((isMUBUF(Opc0) || isMTBUF(Opc0)) && (isMUBUF(Opc1) || isMTBUF(Opc1))) {
323
324 // MUBUF and MTBUF have vaddr at different indices.
325 if (!nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::soffset) ||
326 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::vaddr) ||
327 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::srsrc))
328 return false;
329
330 int OffIdx0 = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset);
331 int OffIdx1 = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset);
332
333 if (OffIdx0 == -1 || OffIdx1 == -1)
334 return false;
335
336 // getNamedOperandIdx returns the index for MachineInstrs. Since they
337 // include the output in the operand list, but SDNodes don't, we need to
338 // subtract the index by one.
339 OffIdx0 -= get(Opc0).NumDefs;
340 OffIdx1 -= get(Opc1).NumDefs;
341
342 SDValue Off0 = Load0->getOperand(OffIdx0);
343 SDValue Off1 = Load1->getOperand(OffIdx1);
344
345 // The offset might be a FrameIndexSDNode.
346 if (!isa<ConstantSDNode>(Off0) || !isa<ConstantSDNode>(Off1))
347 return false;
348
349 Offset0 = Off0->getAsZExtVal();
350 Offset1 = Off1->getAsZExtVal();
351 return true;
352 }
353
354 return false;
355}
356
357static bool isStride64(unsigned Opc) {
358 switch (Opc) {
359 case AMDGPU::DS_READ2ST64_B32:
360 case AMDGPU::DS_READ2ST64_B64:
361 case AMDGPU::DS_WRITE2ST64_B32:
362 case AMDGPU::DS_WRITE2ST64_B64:
363 return true;
364 default:
365 return false;
366 }
367}
368
371 int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width,
372 const TargetRegisterInfo *TRI) const {
373 if (!LdSt.mayLoadOrStore())
374 return false;
375
376 unsigned Opc = LdSt.getOpcode();
377 OffsetIsScalable = false;
378 const MachineOperand *BaseOp, *OffsetOp;
379 int DataOpIdx;
380
381 if (isDS(LdSt)) {
382 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::addr);
383 OffsetOp = getNamedOperand(LdSt, AMDGPU::OpName::offset);
384 if (OffsetOp) {
385 // Normal, single offset LDS instruction.
386 if (!BaseOp) {
387 // DS_CONSUME/DS_APPEND use M0 for the base address.
388 // TODO: find the implicit use operand for M0 and use that as BaseOp?
389 return false;
390 }
391 BaseOps.push_back(BaseOp);
392 Offset = OffsetOp->getImm();
393 // Get appropriate operand, and compute width accordingly.
394 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
395 if (DataOpIdx == -1)
396 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0);
397 if (Opc == AMDGPU::DS_ATOMIC_ASYNC_BARRIER_ARRIVE_B64)
398 Width = LocationSize::precise(64);
399 else
400 Width = LocationSize::precise(getOpSize(LdSt, DataOpIdx));
401 } else {
402 // The 2 offset instructions use offset0 and offset1 instead. We can treat
403 // these as a load with a single offset if the 2 offsets are consecutive.
404 // We will use this for some partially aligned loads.
405 const MachineOperand *Offset0Op =
406 getNamedOperand(LdSt, AMDGPU::OpName::offset0);
407 const MachineOperand *Offset1Op =
408 getNamedOperand(LdSt, AMDGPU::OpName::offset1);
409
410 unsigned Offset0 = Offset0Op->getImm() & 0xff;
411 unsigned Offset1 = Offset1Op->getImm() & 0xff;
412 if (Offset0 + 1 != Offset1)
413 return false;
414
415 // Each of these offsets is in element sized units, so we need to convert
416 // to bytes of the individual reads.
417
418 unsigned EltSize;
419 if (LdSt.mayLoad())
420 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, 0)) / 16;
421 else {
422 assert(LdSt.mayStore());
423 int Data0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0);
424 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, Data0Idx)) / 8;
425 }
426
427 if (isStride64(Opc))
428 EltSize *= 64;
429
430 BaseOps.push_back(BaseOp);
431 Offset = EltSize * Offset0;
432 // Get appropriate operand(s), and compute width accordingly.
433 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
434 if (DataOpIdx == -1) {
435 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0);
436 Width = LocationSize::precise(getOpSize(LdSt, DataOpIdx));
437 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data1);
438 Width = LocationSize::precise(
439 Width.getValue() + TypeSize::getFixed(getOpSize(LdSt, DataOpIdx)));
440 } else {
441 Width = LocationSize::precise(getOpSize(LdSt, DataOpIdx));
442 }
443 }
444 return true;
445 }
446
447 if (isMUBUF(LdSt) || isMTBUF(LdSt)) {
448 const MachineOperand *RSrc = getNamedOperand(LdSt, AMDGPU::OpName::srsrc);
449 if (!RSrc) // e.g. BUFFER_WBINVL1_VOL
450 return false;
451 BaseOps.push_back(RSrc);
452 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::vaddr);
453 if (BaseOp && !BaseOp->isFI())
454 BaseOps.push_back(BaseOp);
455 const MachineOperand *OffsetImm =
456 getNamedOperand(LdSt, AMDGPU::OpName::offset);
457 Offset = OffsetImm->getImm();
458 const MachineOperand *SOffset =
459 getNamedOperand(LdSt, AMDGPU::OpName::soffset);
460 if (SOffset) {
461 if (SOffset->isReg())
462 BaseOps.push_back(SOffset);
463 else
464 Offset += SOffset->getImm();
465 }
466 // Get appropriate operand, and compute width accordingly.
467 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
468 if (DataOpIdx == -1)
469 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata);
470 if (DataOpIdx == -1) // LDS DMA
471 return false;
472 Width = LocationSize::precise(getOpSize(LdSt, DataOpIdx));
473 return true;
474 }
475
476 if (isImage(LdSt)) {
477 auto RsrcOpName =
478 isMIMG(LdSt) ? AMDGPU::OpName::srsrc : AMDGPU::OpName::rsrc;
479 int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opc, RsrcOpName);
480 BaseOps.push_back(&LdSt.getOperand(SRsrcIdx));
481 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0);
482 if (VAddr0Idx >= 0) {
483 // GFX10 possible NSA encoding.
484 for (int I = VAddr0Idx; I < SRsrcIdx; ++I)
485 BaseOps.push_back(&LdSt.getOperand(I));
486 } else {
487 BaseOps.push_back(getNamedOperand(LdSt, AMDGPU::OpName::vaddr));
488 }
489 Offset = 0;
490 // Get appropriate operand, and compute width accordingly.
491 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata);
492 if (DataOpIdx == -1)
493 return false; // no return sampler
494 Width = LocationSize::precise(getOpSize(LdSt, DataOpIdx));
495 return true;
496 }
497
498 if (isSMRD(LdSt)) {
499 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::sbase);
500 if (!BaseOp) // e.g. S_MEMTIME
501 return false;
502 BaseOps.push_back(BaseOp);
503 OffsetOp = getNamedOperand(LdSt, AMDGPU::OpName::offset);
504 Offset = OffsetOp ? OffsetOp->getImm() : 0;
505 // Get appropriate operand, and compute width accordingly.
506 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::sdst);
507 if (DataOpIdx == -1)
508 return false;
509 Width = LocationSize::precise(getOpSize(LdSt, DataOpIdx));
510 return true;
511 }
512
513 if (isFLAT(LdSt)) {
514 // Instructions have either vaddr or saddr or both or none.
515 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::vaddr);
516 if (BaseOp)
517 BaseOps.push_back(BaseOp);
518 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::saddr);
519 if (BaseOp)
520 BaseOps.push_back(BaseOp);
521 Offset = getNamedOperand(LdSt, AMDGPU::OpName::offset)->getImm();
522 // Get appropriate operand, and compute width accordingly.
523 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
524 if (DataOpIdx == -1)
525 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata);
526 if (DataOpIdx == -1) // LDS DMA
527 return false;
528 Width = LocationSize::precise(getOpSize(LdSt, DataOpIdx));
529 return true;
530 }
531
532 return false;
533}
534
535static bool memOpsHaveSameBasePtr(const MachineInstr &MI1,
537 const MachineInstr &MI2,
539 // Only examine the first "base" operand of each instruction, on the
540 // assumption that it represents the real base address of the memory access.
541 // Other operands are typically offsets or indices from this base address.
542 if (BaseOps1.front()->isIdenticalTo(*BaseOps2.front()))
543 return true;
544
545 if (!MI1.hasOneMemOperand() || !MI2.hasOneMemOperand())
546 return false;
547
548 auto *MO1 = *MI1.memoperands_begin();
549 auto *MO2 = *MI2.memoperands_begin();
550 if (MO1->getAddrSpace() != MO2->getAddrSpace())
551 return false;
552
553 const auto *Base1 = MO1->getValue();
554 const auto *Base2 = MO2->getValue();
555 if (!Base1 || !Base2)
556 return false;
557 Base1 = getUnderlyingObject(Base1);
558 Base2 = getUnderlyingObject(Base2);
559
560 if (isa<UndefValue>(Base1) || isa<UndefValue>(Base2))
561 return false;
562
563 return Base1 == Base2;
564}
565
567 int64_t Offset1, bool OffsetIsScalable1,
569 int64_t Offset2, bool OffsetIsScalable2,
570 unsigned ClusterSize,
571 unsigned NumBytes) const {
572 // If the mem ops (to be clustered) do not have the same base ptr, then they
573 // should not be clustered
574 unsigned MaxMemoryClusterDWords = DefaultMemoryClusterDWordsLimit;
575 if (!BaseOps1.empty() && !BaseOps2.empty()) {
576 const MachineInstr &FirstLdSt = *BaseOps1.front()->getParent();
577 const MachineInstr &SecondLdSt = *BaseOps2.front()->getParent();
578 if (!memOpsHaveSameBasePtr(FirstLdSt, BaseOps1, SecondLdSt, BaseOps2))
579 return false;
580
581 const SIMachineFunctionInfo *MFI =
582 FirstLdSt.getMF()->getInfo<SIMachineFunctionInfo>();
583 MaxMemoryClusterDWords = MFI->getMaxMemoryClusterDWords();
584 } else if (!BaseOps1.empty() || !BaseOps2.empty()) {
585 // If only one base op is empty, they do not have the same base ptr
586 return false;
587 }
588
589 // In order to avoid register pressure, on an average, the number of DWORDS
590 // loaded together by all clustered mem ops should not exceed
591 // MaxMemoryClusterDWords. This is an empirical value based on certain
592 // observations and performance related experiments.
593 // The good thing about this heuristic is - it avoids clustering of too many
594 // sub-word loads, and also avoids clustering of wide loads. Below is the
595 // brief summary of how the heuristic behaves for various `LoadSize` when
596 // MaxMemoryClusterDWords is 8.
597 //
598 // (1) 1 <= LoadSize <= 4: cluster at max 8 mem ops
599 // (2) 5 <= LoadSize <= 8: cluster at max 4 mem ops
600 // (3) 9 <= LoadSize <= 12: cluster at max 2 mem ops
601 // (4) 13 <= LoadSize <= 16: cluster at max 2 mem ops
602 // (5) LoadSize >= 17: do not cluster
603 const unsigned LoadSize = NumBytes / ClusterSize;
604 const unsigned NumDWords = ((LoadSize + 3) / 4) * ClusterSize;
605 return NumDWords <= MaxMemoryClusterDWords;
606}
607
608// FIXME: This behaves strangely. If, for example, you have 32 load + stores,
609// the first 16 loads will be interleaved with the stores, and the next 16 will
610// be clustered as expected. It should really split into 2 16 store batches.
611//
612// Loads are clustered until this returns false, rather than trying to schedule
613// groups of stores. This also means we have to deal with saying different
614// address space loads should be clustered, and ones which might cause bank
615// conflicts.
616//
617// This might be deprecated so it might not be worth that much effort to fix.
619 int64_t Offset0, int64_t Offset1,
620 unsigned NumLoads) const {
621 assert(Offset1 > Offset0 &&
622 "Second offset should be larger than first offset!");
623 // If we have less than 16 loads in a row, and the offsets are within 64
624 // bytes, then schedule together.
625
626 // A cacheline is 64 bytes (for global memory).
627 return (NumLoads <= 16 && (Offset1 - Offset0) < 64);
628}
629
632 const DebugLoc &DL, MCRegister DestReg,
633 MCRegister SrcReg, bool KillSrc,
634 const char *Msg = "illegal VGPR to SGPR copy") {
635 MachineFunction *MF = MBB.getParent();
636
638 C.diagnose(DiagnosticInfoUnsupported(MF->getFunction(), Msg, DL, DS_Error));
639
640 BuildMI(MBB, MI, DL, TII->get(AMDGPU::SI_ILLEGAL_COPY), DestReg)
641 .addReg(SrcReg, getKillRegState(KillSrc));
642}
643
644/// Handle copying from SGPR to AGPR, or from AGPR to AGPR on GFX908. It is not
645/// possible to have a direct copy in these cases on GFX908, so an intermediate
646/// VGPR copy is required.
650 const DebugLoc &DL, MCRegister DestReg,
651 MCRegister SrcReg, bool KillSrc,
652 RegScavenger &RS, bool RegsOverlap,
653 Register ImpDefSuperReg = Register(),
654 Register ImpUseSuperReg = Register()) {
655 assert((TII.getSubtarget().hasMAIInsts() &&
656 !TII.getSubtarget().hasGFX90AInsts()) &&
657 "Expected GFX908 subtarget.");
658
659 assert((AMDGPU::SReg_32RegClass.contains(SrcReg) ||
660 AMDGPU::AGPR_32RegClass.contains(SrcReg)) &&
661 "Source register of the copy should be either an SGPR or an AGPR.");
662
663 assert(AMDGPU::AGPR_32RegClass.contains(DestReg) &&
664 "Destination register of the copy should be an AGPR.");
665
666 const SIRegisterInfo &RI = TII.getRegisterInfo();
667
668 // First try to find defining accvgpr_write to avoid temporary registers.
669 // In the case of copies of overlapping AGPRs, we conservatively do not
670 // reuse previous accvgpr_writes. Otherwise, we may incorrectly pick up
671 // an accvgpr_write used for this same copy due to implicit-defs
672 if (!RegsOverlap) {
673 for (auto Def = MI, E = MBB.begin(); Def != E; ) {
674 --Def;
675
676 if (!Def->modifiesRegister(SrcReg, &RI))
677 continue;
678
679 if (Def->getOpcode() != AMDGPU::V_ACCVGPR_WRITE_B32_e64 ||
680 Def->getOperand(0).getReg() != SrcReg)
681 break;
682
683 MachineOperand &DefOp = Def->getOperand(1);
684 assert(DefOp.isReg() || DefOp.isImm());
685
686 if (DefOp.isReg()) {
687 bool SafeToPropagate = true;
688 // Check that register source operand is not clobbered before MI.
689 // Immediate operands are always safe to propagate.
690 for (auto I = Def; I != MI && SafeToPropagate; ++I)
691 if (I->modifiesRegister(DefOp.getReg(), &RI))
692 SafeToPropagate = false;
693
694 if (!SafeToPropagate)
695 break;
696
697 for (auto I = Def; I != MI; ++I)
698 I->clearRegisterKills(DefOp.getReg(), &RI);
699 }
700
701 MachineInstrBuilder Builder =
702 BuildMI(MBB, MI, DL, TII.get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), DestReg)
703 .add(DefOp);
704 if (ImpDefSuperReg)
705 Builder.addReg(ImpDefSuperReg, RegState::Define | RegState::Implicit);
706
707 if (ImpUseSuperReg) {
708 Builder.addReg(ImpUseSuperReg,
710 }
711
712 return;
713 }
714 }
715
716 RS.enterBasicBlockEnd(MBB);
717 RS.backward(std::next(MI));
718
719 // Ideally we want to have three registers for a long reg_sequence copy
720 // to hide 2 waitstates between v_mov_b32 and accvgpr_write.
721 unsigned MaxVGPRs = RI.getRegPressureLimit(&AMDGPU::VGPR_32RegClass,
722 *MBB.getParent());
723
724 // Registers in the sequence are allocated contiguously so we can just
725 // use register number to pick one of three round-robin temps.
726 unsigned RegNo = (DestReg - AMDGPU::AGPR0) % 3;
727 Register Tmp =
728 MBB.getParent()->getInfo<SIMachineFunctionInfo>()->getVGPRForAGPRCopy();
729 assert(MBB.getParent()->getRegInfo().isReserved(Tmp) &&
730 "VGPR used for an intermediate copy should have been reserved.");
731
732 // Only loop through if there are any free registers left. We don't want to
733 // spill.
734 while (RegNo--) {
735 Register Tmp2 = RS.scavengeRegisterBackwards(AMDGPU::VGPR_32RegClass, MI,
736 /* RestoreAfter */ false, 0,
737 /* AllowSpill */ false);
738 if (!Tmp2 || RI.getHWRegIndex(Tmp2) >= MaxVGPRs)
739 break;
740 Tmp = Tmp2;
741 RS.setRegUsed(Tmp);
742 }
743
744 // Insert copy to temporary VGPR.
745 unsigned TmpCopyOp = AMDGPU::V_MOV_B32_e32;
746 if (AMDGPU::AGPR_32RegClass.contains(SrcReg)) {
747 TmpCopyOp = AMDGPU::V_ACCVGPR_READ_B32_e64;
748 } else {
749 assert(AMDGPU::SReg_32RegClass.contains(SrcReg));
750 }
751
752 MachineInstrBuilder UseBuilder = BuildMI(MBB, MI, DL, TII.get(TmpCopyOp), Tmp)
753 .addReg(SrcReg, getKillRegState(KillSrc));
754 if (ImpUseSuperReg) {
755 UseBuilder.addReg(ImpUseSuperReg,
757 }
758
759 MachineInstrBuilder DefBuilder
760 = BuildMI(MBB, MI, DL, TII.get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), DestReg)
761 .addReg(Tmp, RegState::Kill);
762
763 if (ImpDefSuperReg)
764 DefBuilder.addReg(ImpDefSuperReg, RegState::Define | RegState::Implicit);
765}
766
769 MCRegister DestReg, MCRegister SrcReg, bool KillSrc,
770 const TargetRegisterClass *RC, bool Forward) {
771 const SIRegisterInfo &RI = TII.getRegisterInfo();
772 ArrayRef<int16_t> BaseIndices = RI.getRegSplitParts(RC, 4);
774 MachineInstr *FirstMI = nullptr, *LastMI = nullptr;
775
776 for (unsigned Idx = 0; Idx < BaseIndices.size(); ++Idx) {
777 int16_t SubIdx = BaseIndices[Idx];
778 Register DestSubReg = RI.getSubReg(DestReg, SubIdx);
779 Register SrcSubReg = RI.getSubReg(SrcReg, SubIdx);
780 assert(DestSubReg && SrcSubReg && "Failed to find subregs!");
781 unsigned Opcode = AMDGPU::S_MOV_B32;
782
783 // Is SGPR aligned? If so try to combine with next.
784 bool AlignedDest = ((DestSubReg - AMDGPU::SGPR0) % 2) == 0;
785 bool AlignedSrc = ((SrcSubReg - AMDGPU::SGPR0) % 2) == 0;
786 if (AlignedDest && AlignedSrc && (Idx + 1 < BaseIndices.size())) {
787 // Can use SGPR64 copy
788 unsigned Channel = RI.getChannelFromSubReg(SubIdx);
789 SubIdx = RI.getSubRegFromChannel(Channel, 2);
790 DestSubReg = RI.getSubReg(DestReg, SubIdx);
791 SrcSubReg = RI.getSubReg(SrcReg, SubIdx);
792 assert(DestSubReg && SrcSubReg && "Failed to find subregs!");
793 Opcode = AMDGPU::S_MOV_B64;
794 Idx++;
795 }
796
797 LastMI = BuildMI(MBB, I, DL, TII.get(Opcode), DestSubReg)
798 .addReg(SrcSubReg)
799 .addReg(SrcReg, RegState::Implicit);
800
801 if (!FirstMI)
802 FirstMI = LastMI;
803
804 if (!Forward)
805 I--;
806 }
807
808 assert(FirstMI && LastMI);
809 if (!Forward)
810 std::swap(FirstMI, LastMI);
811
812 FirstMI->addOperand(
813 MachineOperand::CreateReg(DestReg, true /*IsDef*/, true /*IsImp*/));
814
815 if (KillSrc)
816 LastMI->addRegisterKilled(SrcReg, &RI);
817}
818
821 const DebugLoc &DL, Register DestReg,
822 Register SrcReg, bool KillSrc, bool RenamableDest,
823 bool RenamableSrc) const {
824 const TargetRegisterClass *RC = RI.getPhysRegBaseClass(DestReg);
825 unsigned Size = RI.getRegSizeInBits(*RC);
826 const TargetRegisterClass *SrcRC = RI.getPhysRegBaseClass(SrcReg);
827 unsigned SrcSize = RI.getRegSizeInBits(*SrcRC);
828
829 // The rest of copyPhysReg assumes Src and Dst size are the same size.
830 // TODO-GFX11_16BIT If all true 16 bit instruction patterns are completed can
831 // we remove Fix16BitCopies and this code block?
832 if (Fix16BitCopies) {
833 if (((Size == 16) != (SrcSize == 16))) {
834 // Non-VGPR Src and Dst will later be expanded back to 32 bits.
835 assert(ST.useRealTrue16Insts());
836 Register &RegToFix = (Size == 32) ? DestReg : SrcReg;
837 MCRegister SubReg = RI.getSubReg(RegToFix, AMDGPU::lo16);
838 RegToFix = SubReg;
839
840 if (DestReg == SrcReg) {
841 // Identity copy. Insert empty bundle since ExpandPostRA expects an
842 // instruction here.
843 BuildMI(MBB, MI, DL, get(AMDGPU::BUNDLE));
844 return;
845 }
846 RC = RI.getPhysRegBaseClass(DestReg);
847 Size = RI.getRegSizeInBits(*RC);
848 SrcRC = RI.getPhysRegBaseClass(SrcReg);
849 SrcSize = RI.getRegSizeInBits(*SrcRC);
850 }
851 }
852
853 if (RC == &AMDGPU::VGPR_32RegClass) {
854 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg) ||
855 AMDGPU::SReg_32RegClass.contains(SrcReg) ||
856 AMDGPU::AGPR_32RegClass.contains(SrcReg));
857 unsigned Opc = AMDGPU::AGPR_32RegClass.contains(SrcReg) ?
858 AMDGPU::V_ACCVGPR_READ_B32_e64 : AMDGPU::V_MOV_B32_e32;
859 BuildMI(MBB, MI, DL, get(Opc), DestReg)
860 .addReg(SrcReg, getKillRegState(KillSrc));
861 return;
862 }
863
864 if (RC == &AMDGPU::SReg_32_XM0RegClass ||
865 RC == &AMDGPU::SReg_32RegClass) {
866 if (SrcReg == AMDGPU::SCC) {
867 BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B32), DestReg)
868 .addImm(1)
869 .addImm(0);
870 return;
871 }
872
873 if (!AMDGPU::SReg_32RegClass.contains(SrcReg)) {
874 if (DestReg == AMDGPU::VCC_LO) {
875 // FIXME: Hack until VReg_1 removed.
876 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg));
877 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32))
878 .addImm(0)
879 .addReg(SrcReg, getKillRegState(KillSrc));
880 return;
881 }
882
883 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc);
884 return;
885 }
886
887 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg)
888 .addReg(SrcReg, getKillRegState(KillSrc));
889 return;
890 }
891
892 if (RC == &AMDGPU::SReg_64RegClass) {
893 if (SrcReg == AMDGPU::SCC) {
894 BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B64), DestReg)
895 .addImm(1)
896 .addImm(0);
897 return;
898 }
899
900 if (!AMDGPU::SReg_64_EncodableRegClass.contains(SrcReg)) {
901 if (DestReg == AMDGPU::VCC) {
902 // FIXME: Hack until VReg_1 removed.
903 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg));
904 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32))
905 .addImm(0)
906 .addReg(SrcReg, getKillRegState(KillSrc));
907 return;
908 }
909
910 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc);
911 return;
912 }
913
914 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg)
915 .addReg(SrcReg, getKillRegState(KillSrc));
916 return;
917 }
918
919 if (DestReg == AMDGPU::SCC) {
920 // Copying 64-bit or 32-bit sources to SCC barely makes sense,
921 // but SelectionDAG emits such copies for i1 sources.
922 if (AMDGPU::SReg_64RegClass.contains(SrcReg)) {
923 // This copy can only be produced by patterns
924 // with explicit SCC, which are known to be enabled
925 // only for subtargets with S_CMP_LG_U64 present.
926 assert(ST.hasScalarCompareEq64());
927 BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U64))
928 .addReg(SrcReg, getKillRegState(KillSrc))
929 .addImm(0);
930 } else {
931 assert(AMDGPU::SReg_32RegClass.contains(SrcReg));
932 BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U32))
933 .addReg(SrcReg, getKillRegState(KillSrc))
934 .addImm(0);
935 }
936
937 return;
938 }
939
940 if (RC == &AMDGPU::AGPR_32RegClass) {
941 if (AMDGPU::VGPR_32RegClass.contains(SrcReg) ||
942 (ST.hasGFX90AInsts() && AMDGPU::SReg_32RegClass.contains(SrcReg))) {
943 BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), DestReg)
944 .addReg(SrcReg, getKillRegState(KillSrc));
945 return;
946 }
947
948 if (AMDGPU::AGPR_32RegClass.contains(SrcReg) && ST.hasGFX90AInsts()) {
949 BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_MOV_B32), DestReg)
950 .addReg(SrcReg, getKillRegState(KillSrc));
951 return;
952 }
953
954 // FIXME: Pass should maintain scavenger to avoid scan through the block on
955 // every AGPR spill.
956 RegScavenger RS;
957 const bool Overlap = RI.regsOverlap(SrcReg, DestReg);
958 indirectCopyToAGPR(*this, MBB, MI, DL, DestReg, SrcReg, KillSrc, RS, Overlap);
959 return;
960 }
961
962 if (Size == 16) {
963 assert(AMDGPU::VGPR_16RegClass.contains(SrcReg) ||
964 AMDGPU::SReg_LO16RegClass.contains(SrcReg) ||
965 AMDGPU::AGPR_LO16RegClass.contains(SrcReg));
966
967 bool IsSGPRDst = AMDGPU::SReg_LO16RegClass.contains(DestReg);
968 bool IsSGPRSrc = AMDGPU::SReg_LO16RegClass.contains(SrcReg);
969 bool IsAGPRDst = AMDGPU::AGPR_LO16RegClass.contains(DestReg);
970 bool IsAGPRSrc = AMDGPU::AGPR_LO16RegClass.contains(SrcReg);
971 bool DstLow = !AMDGPU::isHi16Reg(DestReg, RI);
972 bool SrcLow = !AMDGPU::isHi16Reg(SrcReg, RI);
973 MCRegister NewDestReg = RI.get32BitRegister(DestReg);
974 MCRegister NewSrcReg = RI.get32BitRegister(SrcReg);
975
976 if (IsSGPRDst) {
977 if (!IsSGPRSrc) {
978 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc);
979 return;
980 }
981
982 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), NewDestReg)
983 .addReg(NewSrcReg, getKillRegState(KillSrc));
984 return;
985 }
986
987 if (IsAGPRDst || IsAGPRSrc) {
988 if (!DstLow || !SrcLow) {
989 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc,
990 "Cannot use hi16 subreg with an AGPR!");
991 }
992
993 copyPhysReg(MBB, MI, DL, NewDestReg, NewSrcReg, KillSrc);
994 return;
995 }
996
997 if (ST.useRealTrue16Insts()) {
998 if (IsSGPRSrc) {
999 assert(SrcLow);
1000 SrcReg = NewSrcReg;
1001 }
1002 // Use the smaller instruction encoding if possible.
1003 if (AMDGPU::VGPR_16_Lo128RegClass.contains(DestReg) &&
1004 (IsSGPRSrc || AMDGPU::VGPR_16_Lo128RegClass.contains(SrcReg))) {
1005 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B16_t16_e32), DestReg)
1006 .addReg(SrcReg);
1007 } else {
1008 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B16_t16_e64), DestReg)
1009 .addImm(0) // src0_modifiers
1010 .addReg(SrcReg)
1011 .addImm(0); // op_sel
1012 }
1013 return;
1014 }
1015
1016 if (IsSGPRSrc && !ST.hasSDWAScalar()) {
1017 if (!DstLow || !SrcLow) {
1018 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc,
1019 "Cannot use hi16 subreg on VI!");
1020 }
1021
1022 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), NewDestReg)
1023 .addReg(NewSrcReg, getKillRegState(KillSrc));
1024 return;
1025 }
1026
1027 auto MIB = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_sdwa), NewDestReg)
1028 .addImm(0) // src0_modifiers
1029 .addReg(NewSrcReg)
1030 .addImm(0) // clamp
1037 // First implicit operand is $exec.
1038 MIB->tieOperands(0, MIB->getNumOperands() - 1);
1039 return;
1040 }
1041
1042 if (RC == RI.getVGPR64Class() && (SrcRC == RC || RI.isSGPRClass(SrcRC))) {
1043 if (ST.hasMovB64()) {
1044 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_e32), DestReg)
1045 .addReg(SrcReg, getKillRegState(KillSrc));
1046 return;
1047 }
1048 if (ST.hasPkMovB32()) {
1049 BuildMI(MBB, MI, DL, get(AMDGPU::V_PK_MOV_B32), DestReg)
1051 .addReg(SrcReg)
1053 .addReg(SrcReg)
1054 .addImm(0) // op_sel_lo
1055 .addImm(0) // op_sel_hi
1056 .addImm(0) // neg_lo
1057 .addImm(0) // neg_hi
1058 .addImm(0) // clamp
1059 .addReg(SrcReg, getKillRegState(KillSrc) | RegState::Implicit);
1060 return;
1061 }
1062 }
1063
1064 const bool Forward = RI.getHWRegIndex(DestReg) <= RI.getHWRegIndex(SrcReg);
1065 if (RI.isSGPRClass(RC)) {
1066 if (!RI.isSGPRClass(SrcRC)) {
1067 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc);
1068 return;
1069 }
1070 const bool CanKillSuperReg = KillSrc && !RI.regsOverlap(SrcReg, DestReg);
1071 expandSGPRCopy(*this, MBB, MI, DL, DestReg, SrcReg, CanKillSuperReg, RC,
1072 Forward);
1073 return;
1074 }
1075
1076 unsigned EltSize = 4;
1077 unsigned Opcode = AMDGPU::V_MOV_B32_e32;
1078 if (RI.isAGPRClass(RC)) {
1079 if (ST.hasGFX90AInsts() && RI.isAGPRClass(SrcRC))
1080 Opcode = AMDGPU::V_ACCVGPR_MOV_B32;
1081 else if (RI.hasVGPRs(SrcRC) ||
1082 (ST.hasGFX90AInsts() && RI.isSGPRClass(SrcRC)))
1083 Opcode = AMDGPU::V_ACCVGPR_WRITE_B32_e64;
1084 else
1085 Opcode = AMDGPU::INSTRUCTION_LIST_END;
1086 } else if (RI.hasVGPRs(RC) && RI.isAGPRClass(SrcRC)) {
1087 Opcode = AMDGPU::V_ACCVGPR_READ_B32_e64;
1088 } else if ((Size % 64 == 0) && RI.hasVGPRs(RC) &&
1089 (RI.isProperlyAlignedRC(*RC) &&
1090 (SrcRC == RC || RI.isSGPRClass(SrcRC)))) {
1091 // TODO: In 96-bit case, could do a 64-bit mov and then a 32-bit mov.
1092 if (ST.hasMovB64()) {
1093 Opcode = AMDGPU::V_MOV_B64_e32;
1094 EltSize = 8;
1095 } else if (ST.hasPkMovB32()) {
1096 Opcode = AMDGPU::V_PK_MOV_B32;
1097 EltSize = 8;
1098 }
1099 }
1100
1101 // For the cases where we need an intermediate instruction/temporary register
1102 // (destination is an AGPR), we need a scavenger.
1103 //
1104 // FIXME: The pass should maintain this for us so we don't have to re-scan the
1105 // whole block for every handled copy.
1106 std::unique_ptr<RegScavenger> RS;
1107 if (Opcode == AMDGPU::INSTRUCTION_LIST_END)
1108 RS = std::make_unique<RegScavenger>();
1109
1110 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RC, EltSize);
1111
1112 // If there is an overlap, we can't kill the super-register on the last
1113 // instruction, since it will also kill the components made live by this def.
1114 const bool Overlap = RI.regsOverlap(SrcReg, DestReg);
1115 const bool CanKillSuperReg = KillSrc && !Overlap;
1116
1117 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) {
1118 unsigned SubIdx;
1119 if (Forward)
1120 SubIdx = SubIndices[Idx];
1121 else
1122 SubIdx = SubIndices[SubIndices.size() - Idx - 1];
1123 Register DestSubReg = RI.getSubReg(DestReg, SubIdx);
1124 Register SrcSubReg = RI.getSubReg(SrcReg, SubIdx);
1125 assert(DestSubReg && SrcSubReg && "Failed to find subregs!");
1126
1127 bool IsFirstSubreg = Idx == 0;
1128 bool UseKill = CanKillSuperReg && Idx == SubIndices.size() - 1;
1129
1130 if (Opcode == AMDGPU::INSTRUCTION_LIST_END) {
1131 Register ImpDefSuper = IsFirstSubreg ? Register(DestReg) : Register();
1132 Register ImpUseSuper = SrcReg;
1133 indirectCopyToAGPR(*this, MBB, MI, DL, DestSubReg, SrcSubReg, UseKill,
1134 *RS, Overlap, ImpDefSuper, ImpUseSuper);
1135 } else if (Opcode == AMDGPU::V_PK_MOV_B32) {
1137 BuildMI(MBB, MI, DL, get(AMDGPU::V_PK_MOV_B32), DestSubReg)
1139 .addReg(SrcSubReg)
1141 .addReg(SrcSubReg)
1142 .addImm(0) // op_sel_lo
1143 .addImm(0) // op_sel_hi
1144 .addImm(0) // neg_lo
1145 .addImm(0) // neg_hi
1146 .addImm(0) // clamp
1147 .addReg(SrcReg, getKillRegState(UseKill) | RegState::Implicit);
1148 if (IsFirstSubreg)
1150 } else {
1151 MachineInstrBuilder Builder =
1152 BuildMI(MBB, MI, DL, get(Opcode), DestSubReg).addReg(SrcSubReg);
1153 if (IsFirstSubreg)
1154 Builder.addReg(DestReg, RegState::Define | RegState::Implicit);
1155
1156 Builder.addReg(SrcReg, getKillRegState(UseKill) | RegState::Implicit);
1157 }
1158 }
1159}
1160
1161int SIInstrInfo::commuteOpcode(unsigned Opcode) const {
1162 int32_t NewOpc;
1163
1164 // Try to map original to commuted opcode
1165 NewOpc = AMDGPU::getCommuteRev(Opcode);
1166 if (NewOpc != -1)
1167 // Check if the commuted (REV) opcode exists on the target.
1168 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1;
1169
1170 // Try to map commuted to original opcode
1171 NewOpc = AMDGPU::getCommuteOrig(Opcode);
1172 if (NewOpc != -1)
1173 // Check if the original (non-REV) opcode exists on the target.
1174 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1;
1175
1176 return Opcode;
1177}
1178
1179const TargetRegisterClass *
1181 return &AMDGPU::VGPR_32RegClass;
1182}
1183
1186 const DebugLoc &DL, Register DstReg,
1188 Register TrueReg,
1189 Register FalseReg) const {
1190 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
1191 const TargetRegisterClass *BoolXExecRC = RI.getWaveMaskRegClass();
1193 assert(MRI.getRegClass(DstReg) == &AMDGPU::VGPR_32RegClass &&
1194 "Not a VGPR32 reg");
1195
1196 if (Cond.size() == 1) {
1197 Register SReg = MRI.createVirtualRegister(BoolXExecRC);
1198 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg)
1199 .add(Cond[0]);
1200 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1201 .addImm(0)
1202 .addReg(FalseReg)
1203 .addImm(0)
1204 .addReg(TrueReg)
1205 .addReg(SReg);
1206 } else if (Cond.size() == 2) {
1207 assert(Cond[0].isImm() && "Cond[0] is not an immediate");
1208 switch (Cond[0].getImm()) {
1209 case SIInstrInfo::SCC_TRUE: {
1210 Register SReg = MRI.createVirtualRegister(BoolXExecRC);
1211 BuildMI(MBB, I, DL, get(LMC.CSelectOpc), SReg).addImm(1).addImm(0);
1212 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1213 .addImm(0)
1214 .addReg(FalseReg)
1215 .addImm(0)
1216 .addReg(TrueReg)
1217 .addReg(SReg);
1218 break;
1219 }
1220 case SIInstrInfo::SCC_FALSE: {
1221 Register SReg = MRI.createVirtualRegister(BoolXExecRC);
1222 BuildMI(MBB, I, DL, get(LMC.CSelectOpc), SReg).addImm(0).addImm(1);
1223 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1224 .addImm(0)
1225 .addReg(FalseReg)
1226 .addImm(0)
1227 .addReg(TrueReg)
1228 .addReg(SReg);
1229 break;
1230 }
1231 case SIInstrInfo::VCCNZ: {
1232 MachineOperand RegOp = Cond[1];
1233 RegOp.setImplicit(false);
1234 Register SReg = MRI.createVirtualRegister(BoolXExecRC);
1235 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg)
1236 .add(RegOp);
1237 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1238 .addImm(0)
1239 .addReg(FalseReg)
1240 .addImm(0)
1241 .addReg(TrueReg)
1242 .addReg(SReg);
1243 break;
1244 }
1245 case SIInstrInfo::VCCZ: {
1246 MachineOperand RegOp = Cond[1];
1247 RegOp.setImplicit(false);
1248 Register SReg = MRI.createVirtualRegister(BoolXExecRC);
1249 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg)
1250 .add(RegOp);
1251 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1252 .addImm(0)
1253 .addReg(TrueReg)
1254 .addImm(0)
1255 .addReg(FalseReg)
1256 .addReg(SReg);
1257 break;
1258 }
1259 case SIInstrInfo::EXECNZ: {
1260 Register SReg = MRI.createVirtualRegister(BoolXExecRC);
1261 Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC());
1262 BuildMI(MBB, I, DL, get(LMC.OrSaveExecOpc), SReg2).addImm(0);
1263 BuildMI(MBB, I, DL, get(LMC.CSelectOpc), SReg).addImm(1).addImm(0);
1264 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1265 .addImm(0)
1266 .addReg(FalseReg)
1267 .addImm(0)
1268 .addReg(TrueReg)
1269 .addReg(SReg);
1270 break;
1271 }
1272 case SIInstrInfo::EXECZ: {
1273 Register SReg = MRI.createVirtualRegister(BoolXExecRC);
1274 Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC());
1275 BuildMI(MBB, I, DL, get(LMC.OrSaveExecOpc), SReg2).addImm(0);
1276 BuildMI(MBB, I, DL, get(LMC.CSelectOpc), SReg).addImm(0).addImm(1);
1277 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1278 .addImm(0)
1279 .addReg(FalseReg)
1280 .addImm(0)
1281 .addReg(TrueReg)
1282 .addReg(SReg);
1283 llvm_unreachable("Unhandled branch predicate EXECZ");
1284 break;
1285 }
1286 default:
1287 llvm_unreachable("invalid branch predicate");
1288 }
1289 } else {
1290 llvm_unreachable("Can only handle Cond size 1 or 2");
1291 }
1292}
1293
1296 const DebugLoc &DL,
1297 Register SrcReg, int Value) const {
1298 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
1299 Register Reg = MRI.createVirtualRegister(RI.getBoolRC());
1300 BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_EQ_I32_e64), Reg)
1301 .addImm(Value)
1302 .addReg(SrcReg);
1303
1304 return Reg;
1305}
1306
1309 const DebugLoc &DL,
1310 Register SrcReg, int Value) const {
1311 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
1312 Register Reg = MRI.createVirtualRegister(RI.getBoolRC());
1313 BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_NE_I32_e64), Reg)
1314 .addImm(Value)
1315 .addReg(SrcReg);
1316
1317 return Reg;
1318}
1319
1321 const Register Reg,
1322 int64_t &ImmVal) const {
1323 switch (MI.getOpcode()) {
1324 case AMDGPU::V_MOV_B32_e32:
1325 case AMDGPU::S_MOV_B32:
1326 case AMDGPU::S_MOVK_I32:
1327 case AMDGPU::S_MOV_B64:
1328 case AMDGPU::V_MOV_B64_e32:
1329 case AMDGPU::V_ACCVGPR_WRITE_B32_e64:
1330 case AMDGPU::AV_MOV_B32_IMM_PSEUDO:
1331 case AMDGPU::AV_MOV_B64_IMM_PSEUDO:
1332 case AMDGPU::S_MOV_B64_IMM_PSEUDO:
1333 case AMDGPU::V_MOV_B64_PSEUDO:
1334 case AMDGPU::V_MOV_B16_t16_e32: {
1335 const MachineOperand &Src0 = MI.getOperand(1);
1336 if (Src0.isImm()) {
1337 ImmVal = Src0.getImm();
1338 return MI.getOperand(0).getReg() == Reg;
1339 }
1340
1341 return false;
1342 }
1343 case AMDGPU::V_MOV_B16_t16_e64: {
1344 const MachineOperand &Src0 = MI.getOperand(2);
1345 if (Src0.isImm() && !MI.getOperand(1).getImm()) {
1346 ImmVal = Src0.getImm();
1347 return MI.getOperand(0).getReg() == Reg;
1348 }
1349
1350 return false;
1351 }
1352 case AMDGPU::S_BREV_B32:
1353 case AMDGPU::V_BFREV_B32_e32:
1354 case AMDGPU::V_BFREV_B32_e64: {
1355 const MachineOperand &Src0 = MI.getOperand(1);
1356 if (Src0.isImm()) {
1357 ImmVal = static_cast<int64_t>(reverseBits<int32_t>(Src0.getImm()));
1358 return MI.getOperand(0).getReg() == Reg;
1359 }
1360
1361 return false;
1362 }
1363 case AMDGPU::S_NOT_B32:
1364 case AMDGPU::V_NOT_B32_e32:
1365 case AMDGPU::V_NOT_B32_e64: {
1366 const MachineOperand &Src0 = MI.getOperand(1);
1367 if (Src0.isImm()) {
1368 ImmVal = static_cast<int64_t>(~static_cast<int32_t>(Src0.getImm()));
1369 return MI.getOperand(0).getReg() == Reg;
1370 }
1371
1372 return false;
1373 }
1374 default:
1375 return false;
1376 }
1377}
1378
1379std::optional<int64_t>
1381 if (Op.isImm())
1382 return Op.getImm();
1383
1384 if (!Op.isReg() || !Op.getReg().isVirtual())
1385 return std::nullopt;
1386 MachineRegisterInfo &MRI = Op.getParent()->getMF()->getRegInfo();
1387 const MachineInstr *Def = MRI.getVRegDef(Op.getReg());
1388 if (Def && Def->isMoveImmediate()) {
1389 const MachineOperand &ImmSrc = Def->getOperand(1);
1390 if (ImmSrc.isImm())
1391 return extractSubregFromImm(ImmSrc.getImm(), Op.getSubReg());
1392 }
1393
1394 return std::nullopt;
1395}
1396
1398
1399 if (RI.isAGPRClass(DstRC))
1400 return AMDGPU::COPY;
1401 if (RI.getRegSizeInBits(*DstRC) == 16) {
1402 // Assume hi bits are unneeded. Only _e64 true16 instructions are legal
1403 // before RA.
1404 return RI.isSGPRClass(DstRC) ? AMDGPU::COPY : AMDGPU::V_MOV_B16_t16_e64;
1405 }
1406 if (RI.getRegSizeInBits(*DstRC) == 32)
1407 return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1408 if (RI.getRegSizeInBits(*DstRC) == 64 && RI.isSGPRClass(DstRC))
1409 return AMDGPU::S_MOV_B64;
1410 if (RI.getRegSizeInBits(*DstRC) == 64 && !RI.isSGPRClass(DstRC))
1411 return AMDGPU::V_MOV_B64_PSEUDO;
1412 return AMDGPU::COPY;
1413}
1414
1415const MCInstrDesc &
1417 bool IsIndirectSrc) const {
1418 if (IsIndirectSrc) {
1419 if (VecSize <= 32) // 4 bytes
1420 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V1);
1421 if (VecSize <= 64) // 8 bytes
1422 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V2);
1423 if (VecSize <= 96) // 12 bytes
1424 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V3);
1425 if (VecSize <= 128) // 16 bytes
1426 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V4);
1427 if (VecSize <= 160) // 20 bytes
1428 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V5);
1429 if (VecSize <= 192) // 24 bytes
1430 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V6);
1431 if (VecSize <= 224) // 28 bytes
1432 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V7);
1433 if (VecSize <= 256) // 32 bytes
1434 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V8);
1435 if (VecSize <= 288) // 36 bytes
1436 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V9);
1437 if (VecSize <= 320) // 40 bytes
1438 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V10);
1439 if (VecSize <= 352) // 44 bytes
1440 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V11);
1441 if (VecSize <= 384) // 48 bytes
1442 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V12);
1443 if (VecSize <= 512) // 64 bytes
1444 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V16);
1445 if (VecSize <= 1024) // 128 bytes
1446 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V32);
1447
1448 llvm_unreachable("unsupported size for IndirectRegReadGPRIDX pseudos");
1449 }
1450
1451 if (VecSize <= 32) // 4 bytes
1452 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V1);
1453 if (VecSize <= 64) // 8 bytes
1454 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V2);
1455 if (VecSize <= 96) // 12 bytes
1456 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V3);
1457 if (VecSize <= 128) // 16 bytes
1458 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V4);
1459 if (VecSize <= 160) // 20 bytes
1460 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V5);
1461 if (VecSize <= 192) // 24 bytes
1462 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V6);
1463 if (VecSize <= 224) // 28 bytes
1464 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V7);
1465 if (VecSize <= 256) // 32 bytes
1466 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V8);
1467 if (VecSize <= 288) // 36 bytes
1468 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V9);
1469 if (VecSize <= 320) // 40 bytes
1470 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V10);
1471 if (VecSize <= 352) // 44 bytes
1472 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V11);
1473 if (VecSize <= 384) // 48 bytes
1474 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V12);
1475 if (VecSize <= 512) // 64 bytes
1476 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V16);
1477 if (VecSize <= 1024) // 128 bytes
1478 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V32);
1479
1480 llvm_unreachable("unsupported size for IndirectRegWriteGPRIDX pseudos");
1481}
1482
1483static unsigned getIndirectVGPRWriteMovRelPseudoOpc(unsigned VecSize) {
1484 if (VecSize <= 32) // 4 bytes
1485 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V1;
1486 if (VecSize <= 64) // 8 bytes
1487 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V2;
1488 if (VecSize <= 96) // 12 bytes
1489 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V3;
1490 if (VecSize <= 128) // 16 bytes
1491 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V4;
1492 if (VecSize <= 160) // 20 bytes
1493 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V5;
1494 if (VecSize <= 192) // 24 bytes
1495 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V6;
1496 if (VecSize <= 224) // 28 bytes
1497 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V7;
1498 if (VecSize <= 256) // 32 bytes
1499 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V8;
1500 if (VecSize <= 288) // 36 bytes
1501 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V9;
1502 if (VecSize <= 320) // 40 bytes
1503 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V10;
1504 if (VecSize <= 352) // 44 bytes
1505 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V11;
1506 if (VecSize <= 384) // 48 bytes
1507 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V12;
1508 if (VecSize <= 512) // 64 bytes
1509 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V16;
1510 if (VecSize <= 1024) // 128 bytes
1511 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V32;
1512
1513 llvm_unreachable("unsupported size for IndirectRegWrite pseudos");
1514}
1515
1516static unsigned getIndirectSGPRWriteMovRelPseudo32(unsigned VecSize) {
1517 if (VecSize <= 32) // 4 bytes
1518 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V1;
1519 if (VecSize <= 64) // 8 bytes
1520 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V2;
1521 if (VecSize <= 96) // 12 bytes
1522 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V3;
1523 if (VecSize <= 128) // 16 bytes
1524 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V4;
1525 if (VecSize <= 160) // 20 bytes
1526 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V5;
1527 if (VecSize <= 192) // 24 bytes
1528 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V6;
1529 if (VecSize <= 224) // 28 bytes
1530 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V7;
1531 if (VecSize <= 256) // 32 bytes
1532 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V8;
1533 if (VecSize <= 288) // 36 bytes
1534 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V9;
1535 if (VecSize <= 320) // 40 bytes
1536 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V10;
1537 if (VecSize <= 352) // 44 bytes
1538 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V11;
1539 if (VecSize <= 384) // 48 bytes
1540 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V12;
1541 if (VecSize <= 512) // 64 bytes
1542 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V16;
1543 if (VecSize <= 1024) // 128 bytes
1544 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V32;
1545
1546 llvm_unreachable("unsupported size for IndirectRegWrite pseudos");
1547}
1548
1549static unsigned getIndirectSGPRWriteMovRelPseudo64(unsigned VecSize) {
1550 if (VecSize <= 64) // 8 bytes
1551 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V1;
1552 if (VecSize <= 128) // 16 bytes
1553 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V2;
1554 if (VecSize <= 256) // 32 bytes
1555 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V4;
1556 if (VecSize <= 512) // 64 bytes
1557 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V8;
1558 if (VecSize <= 1024) // 128 bytes
1559 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V16;
1560
1561 llvm_unreachable("unsupported size for IndirectRegWrite pseudos");
1562}
1563
1564const MCInstrDesc &
1565SIInstrInfo::getIndirectRegWriteMovRelPseudo(unsigned VecSize, unsigned EltSize,
1566 bool IsSGPR) const {
1567 if (IsSGPR) {
1568 switch (EltSize) {
1569 case 32:
1570 return get(getIndirectSGPRWriteMovRelPseudo32(VecSize));
1571 case 64:
1572 return get(getIndirectSGPRWriteMovRelPseudo64(VecSize));
1573 default:
1574 llvm_unreachable("invalid reg indexing elt size");
1575 }
1576 }
1577
1578 assert(EltSize == 32 && "invalid reg indexing elt size");
1580}
1581
1582static unsigned getSGPRSpillSaveOpcode(unsigned Size) {
1583 switch (Size) {
1584 case 4:
1585 return AMDGPU::SI_SPILL_S32_SAVE;
1586 case 8:
1587 return AMDGPU::SI_SPILL_S64_SAVE;
1588 case 12:
1589 return AMDGPU::SI_SPILL_S96_SAVE;
1590 case 16:
1591 return AMDGPU::SI_SPILL_S128_SAVE;
1592 case 20:
1593 return AMDGPU::SI_SPILL_S160_SAVE;
1594 case 24:
1595 return AMDGPU::SI_SPILL_S192_SAVE;
1596 case 28:
1597 return AMDGPU::SI_SPILL_S224_SAVE;
1598 case 32:
1599 return AMDGPU::SI_SPILL_S256_SAVE;
1600 case 36:
1601 return AMDGPU::SI_SPILL_S288_SAVE;
1602 case 40:
1603 return AMDGPU::SI_SPILL_S320_SAVE;
1604 case 44:
1605 return AMDGPU::SI_SPILL_S352_SAVE;
1606 case 48:
1607 return AMDGPU::SI_SPILL_S384_SAVE;
1608 case 64:
1609 return AMDGPU::SI_SPILL_S512_SAVE;
1610 case 128:
1611 return AMDGPU::SI_SPILL_S1024_SAVE;
1612 default:
1613 llvm_unreachable("unknown register size");
1614 }
1615}
1616
1617static unsigned getVGPRSpillSaveOpcode(unsigned Size) {
1618 switch (Size) {
1619 case 2:
1620 return AMDGPU::SI_SPILL_V16_SAVE;
1621 case 4:
1622 return AMDGPU::SI_SPILL_V32_SAVE;
1623 case 8:
1624 return AMDGPU::SI_SPILL_V64_SAVE;
1625 case 12:
1626 return AMDGPU::SI_SPILL_V96_SAVE;
1627 case 16:
1628 return AMDGPU::SI_SPILL_V128_SAVE;
1629 case 20:
1630 return AMDGPU::SI_SPILL_V160_SAVE;
1631 case 24:
1632 return AMDGPU::SI_SPILL_V192_SAVE;
1633 case 28:
1634 return AMDGPU::SI_SPILL_V224_SAVE;
1635 case 32:
1636 return AMDGPU::SI_SPILL_V256_SAVE;
1637 case 36:
1638 return AMDGPU::SI_SPILL_V288_SAVE;
1639 case 40:
1640 return AMDGPU::SI_SPILL_V320_SAVE;
1641 case 44:
1642 return AMDGPU::SI_SPILL_V352_SAVE;
1643 case 48:
1644 return AMDGPU::SI_SPILL_V384_SAVE;
1645 case 64:
1646 return AMDGPU::SI_SPILL_V512_SAVE;
1647 case 128:
1648 return AMDGPU::SI_SPILL_V1024_SAVE;
1649 default:
1650 llvm_unreachable("unknown register size");
1651 }
1652}
1653
1654static unsigned getAVSpillSaveOpcode(unsigned Size) {
1655 switch (Size) {
1656 case 4:
1657 return AMDGPU::SI_SPILL_AV32_SAVE;
1658 case 8:
1659 return AMDGPU::SI_SPILL_AV64_SAVE;
1660 case 12:
1661 return AMDGPU::SI_SPILL_AV96_SAVE;
1662 case 16:
1663 return AMDGPU::SI_SPILL_AV128_SAVE;
1664 case 20:
1665 return AMDGPU::SI_SPILL_AV160_SAVE;
1666 case 24:
1667 return AMDGPU::SI_SPILL_AV192_SAVE;
1668 case 28:
1669 return AMDGPU::SI_SPILL_AV224_SAVE;
1670 case 32:
1671 return AMDGPU::SI_SPILL_AV256_SAVE;
1672 case 36:
1673 return AMDGPU::SI_SPILL_AV288_SAVE;
1674 case 40:
1675 return AMDGPU::SI_SPILL_AV320_SAVE;
1676 case 44:
1677 return AMDGPU::SI_SPILL_AV352_SAVE;
1678 case 48:
1679 return AMDGPU::SI_SPILL_AV384_SAVE;
1680 case 64:
1681 return AMDGPU::SI_SPILL_AV512_SAVE;
1682 case 128:
1683 return AMDGPU::SI_SPILL_AV1024_SAVE;
1684 default:
1685 llvm_unreachable("unknown register size");
1686 }
1687}
1688
1689static unsigned getWWMRegSpillSaveOpcode(unsigned Size,
1690 bool IsVectorSuperClass) {
1691 // Currently, there is only 32-bit WWM register spills needed.
1692 if (Size != 4)
1693 llvm_unreachable("unknown wwm register spill size");
1694
1695 if (IsVectorSuperClass)
1696 return AMDGPU::SI_SPILL_WWM_AV32_SAVE;
1697
1698 return AMDGPU::SI_SPILL_WWM_V32_SAVE;
1699}
1700
1702 Register Reg, const TargetRegisterClass *RC, unsigned Size,
1703 const SIMachineFunctionInfo &MFI) const {
1704 bool IsVectorSuperClass = RI.isVectorSuperClass(RC);
1705
1706 // Choose the right opcode if spilling a WWM register.
1708 return getWWMRegSpillSaveOpcode(Size, IsVectorSuperClass);
1709
1710 // TODO: Check if AGPRs are available
1711 if (ST.hasMAIInsts())
1712 return getAVSpillSaveOpcode(Size);
1713
1715}
1716
1719 bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
1720 MachineInstr::MIFlag Flags) const {
1721 MachineFunction *MF = MBB.getParent();
1723 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
1724 const DebugLoc &DL = MBB.findDebugLoc(MI);
1725
1726 MachinePointerInfo PtrInfo
1727 = MachinePointerInfo::getFixedStack(*MF, FrameIndex);
1729 PtrInfo, MachineMemOperand::MOStore, FrameInfo.getObjectSize(FrameIndex),
1730 FrameInfo.getObjectAlign(FrameIndex));
1731 unsigned SpillSize = RI.getSpillSize(*RC);
1732
1733 MachineRegisterInfo &MRI = MF->getRegInfo();
1734 if (RI.isSGPRClass(RC)) {
1735 MFI->setHasSpilledSGPRs();
1736 assert(SrcReg != AMDGPU::M0 && "m0 should not be spilled");
1737 assert(SrcReg != AMDGPU::EXEC_LO && SrcReg != AMDGPU::EXEC_HI &&
1738 SrcReg != AMDGPU::EXEC && "exec should not be spilled");
1739
1740 // We are only allowed to create one new instruction when spilling
1741 // registers, so we need to use pseudo instruction for spilling SGPRs.
1742 const MCInstrDesc &OpDesc = get(getSGPRSpillSaveOpcode(SpillSize));
1743
1744 // The SGPR spill/restore instructions only work on number sgprs, so we need
1745 // to make sure we are using the correct register class.
1746 if (SrcReg.isVirtual() && SpillSize == 4) {
1747 MRI.constrainRegClass(SrcReg, &AMDGPU::SReg_32_XM0_XEXECRegClass);
1748 }
1749
1750 BuildMI(MBB, MI, DL, OpDesc)
1751 .addReg(SrcReg, getKillRegState(isKill)) // data
1752 .addFrameIndex(FrameIndex) // addr
1753 .addMemOperand(MMO)
1755
1756 if (RI.spillSGPRToVGPR())
1757 FrameInfo.setStackID(FrameIndex, TargetStackID::SGPRSpill);
1758 return;
1759 }
1760
1761 unsigned Opcode =
1762 getVectorRegSpillSaveOpcode(VReg ? VReg : SrcReg, RC, SpillSize, *MFI);
1763 MFI->setHasSpilledVGPRs();
1764
1765 BuildMI(MBB, MI, DL, get(Opcode))
1766 .addReg(SrcReg, getKillRegState(isKill)) // data
1767 .addFrameIndex(FrameIndex) // addr
1768 .addReg(MFI->getStackPtrOffsetReg()) // scratch_offset
1769 .addImm(0) // offset
1770 .addMemOperand(MMO);
1771}
1772
1773static unsigned getSGPRSpillRestoreOpcode(unsigned Size) {
1774 switch (Size) {
1775 case 4:
1776 return AMDGPU::SI_SPILL_S32_RESTORE;
1777 case 8:
1778 return AMDGPU::SI_SPILL_S64_RESTORE;
1779 case 12:
1780 return AMDGPU::SI_SPILL_S96_RESTORE;
1781 case 16:
1782 return AMDGPU::SI_SPILL_S128_RESTORE;
1783 case 20:
1784 return AMDGPU::SI_SPILL_S160_RESTORE;
1785 case 24:
1786 return AMDGPU::SI_SPILL_S192_RESTORE;
1787 case 28:
1788 return AMDGPU::SI_SPILL_S224_RESTORE;
1789 case 32:
1790 return AMDGPU::SI_SPILL_S256_RESTORE;
1791 case 36:
1792 return AMDGPU::SI_SPILL_S288_RESTORE;
1793 case 40:
1794 return AMDGPU::SI_SPILL_S320_RESTORE;
1795 case 44:
1796 return AMDGPU::SI_SPILL_S352_RESTORE;
1797 case 48:
1798 return AMDGPU::SI_SPILL_S384_RESTORE;
1799 case 64:
1800 return AMDGPU::SI_SPILL_S512_RESTORE;
1801 case 128:
1802 return AMDGPU::SI_SPILL_S1024_RESTORE;
1803 default:
1804 llvm_unreachable("unknown register size");
1805 }
1806}
1807
1808static unsigned getVGPRSpillRestoreOpcode(unsigned Size) {
1809 switch (Size) {
1810 case 2:
1811 return AMDGPU::SI_SPILL_V16_RESTORE;
1812 case 4:
1813 return AMDGPU::SI_SPILL_V32_RESTORE;
1814 case 8:
1815 return AMDGPU::SI_SPILL_V64_RESTORE;
1816 case 12:
1817 return AMDGPU::SI_SPILL_V96_RESTORE;
1818 case 16:
1819 return AMDGPU::SI_SPILL_V128_RESTORE;
1820 case 20:
1821 return AMDGPU::SI_SPILL_V160_RESTORE;
1822 case 24:
1823 return AMDGPU::SI_SPILL_V192_RESTORE;
1824 case 28:
1825 return AMDGPU::SI_SPILL_V224_RESTORE;
1826 case 32:
1827 return AMDGPU::SI_SPILL_V256_RESTORE;
1828 case 36:
1829 return AMDGPU::SI_SPILL_V288_RESTORE;
1830 case 40:
1831 return AMDGPU::SI_SPILL_V320_RESTORE;
1832 case 44:
1833 return AMDGPU::SI_SPILL_V352_RESTORE;
1834 case 48:
1835 return AMDGPU::SI_SPILL_V384_RESTORE;
1836 case 64:
1837 return AMDGPU::SI_SPILL_V512_RESTORE;
1838 case 128:
1839 return AMDGPU::SI_SPILL_V1024_RESTORE;
1840 default:
1841 llvm_unreachable("unknown register size");
1842 }
1843}
1844
1845static unsigned getAVSpillRestoreOpcode(unsigned Size) {
1846 switch (Size) {
1847 case 4:
1848 return AMDGPU::SI_SPILL_AV32_RESTORE;
1849 case 8:
1850 return AMDGPU::SI_SPILL_AV64_RESTORE;
1851 case 12:
1852 return AMDGPU::SI_SPILL_AV96_RESTORE;
1853 case 16:
1854 return AMDGPU::SI_SPILL_AV128_RESTORE;
1855 case 20:
1856 return AMDGPU::SI_SPILL_AV160_RESTORE;
1857 case 24:
1858 return AMDGPU::SI_SPILL_AV192_RESTORE;
1859 case 28:
1860 return AMDGPU::SI_SPILL_AV224_RESTORE;
1861 case 32:
1862 return AMDGPU::SI_SPILL_AV256_RESTORE;
1863 case 36:
1864 return AMDGPU::SI_SPILL_AV288_RESTORE;
1865 case 40:
1866 return AMDGPU::SI_SPILL_AV320_RESTORE;
1867 case 44:
1868 return AMDGPU::SI_SPILL_AV352_RESTORE;
1869 case 48:
1870 return AMDGPU::SI_SPILL_AV384_RESTORE;
1871 case 64:
1872 return AMDGPU::SI_SPILL_AV512_RESTORE;
1873 case 128:
1874 return AMDGPU::SI_SPILL_AV1024_RESTORE;
1875 default:
1876 llvm_unreachable("unknown register size");
1877 }
1878}
1879
1880static unsigned getWWMRegSpillRestoreOpcode(unsigned Size,
1881 bool IsVectorSuperClass) {
1882 // Currently, there is only 32-bit WWM register spills needed.
1883 if (Size != 4)
1884 llvm_unreachable("unknown wwm register spill size");
1885
1886 if (IsVectorSuperClass) // TODO: Always use this if there are AGPRs
1887 return AMDGPU::SI_SPILL_WWM_AV32_RESTORE;
1888
1889 return AMDGPU::SI_SPILL_WWM_V32_RESTORE;
1890}
1891
1893 Register Reg, const TargetRegisterClass *RC, unsigned Size,
1894 const SIMachineFunctionInfo &MFI) const {
1895 bool IsVectorSuperClass = RI.isVectorSuperClass(RC);
1896
1897 // Choose the right opcode if restoring a WWM register.
1899 return getWWMRegSpillRestoreOpcode(Size, IsVectorSuperClass);
1900
1901 // TODO: Check if AGPRs are available
1902 if (ST.hasMAIInsts())
1904
1905 assert(!RI.isAGPRClass(RC));
1907}
1908
1911 Register DestReg, int FrameIndex,
1912 const TargetRegisterClass *RC,
1913 Register VReg, unsigned SubReg,
1914 MachineInstr::MIFlag Flags) const {
1915 MachineFunction *MF = MBB.getParent();
1917 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
1918 const DebugLoc &DL = MBB.findDebugLoc(MI);
1919 unsigned SpillSize = RI.getSpillSize(*RC);
1920
1921 MachinePointerInfo PtrInfo
1922 = MachinePointerInfo::getFixedStack(*MF, FrameIndex);
1923
1925 PtrInfo, MachineMemOperand::MOLoad, FrameInfo.getObjectSize(FrameIndex),
1926 FrameInfo.getObjectAlign(FrameIndex));
1927
1928 if (RI.isSGPRClass(RC)) {
1929 MFI->setHasSpilledSGPRs();
1930 assert(DestReg != AMDGPU::M0 && "m0 should not be reloaded into");
1931 assert(DestReg != AMDGPU::EXEC_LO && DestReg != AMDGPU::EXEC_HI &&
1932 DestReg != AMDGPU::EXEC && "exec should not be spilled");
1933
1934 // FIXME: Maybe this should not include a memoperand because it will be
1935 // lowered to non-memory instructions.
1936 const MCInstrDesc &OpDesc = get(getSGPRSpillRestoreOpcode(SpillSize));
1937 if (DestReg.isVirtual() && SpillSize == 4) {
1938 MachineRegisterInfo &MRI = MF->getRegInfo();
1939 MRI.constrainRegClass(DestReg, &AMDGPU::SReg_32_XM0_XEXECRegClass);
1940 }
1941
1942 if (RI.spillSGPRToVGPR())
1943 FrameInfo.setStackID(FrameIndex, TargetStackID::SGPRSpill);
1944 BuildMI(MBB, MI, DL, OpDesc, DestReg)
1945 .addFrameIndex(FrameIndex) // addr
1946 .addMemOperand(MMO)
1948
1949 return;
1950 }
1951
1952 unsigned Opcode = getVectorRegSpillRestoreOpcode(VReg ? VReg : DestReg, RC,
1953 SpillSize, *MFI);
1954 BuildMI(MBB, MI, DL, get(Opcode), DestReg)
1955 .addFrameIndex(FrameIndex) // vaddr
1956 .addReg(MFI->getStackPtrOffsetReg()) // scratch_offset
1957 .addImm(0) // offset
1958 .addMemOperand(MMO);
1959}
1960
1965
1968 unsigned Quantity) const {
1969 DebugLoc DL = MBB.findDebugLoc(MI);
1970 unsigned MaxSNopCount = 1u << ST.getSNopBits();
1971 while (Quantity > 0) {
1972 unsigned Arg = std::min(Quantity, MaxSNopCount);
1973 Quantity -= Arg;
1974 BuildMI(MBB, MI, DL, get(AMDGPU::S_NOP)).addImm(Arg - 1);
1975 }
1976}
1977
1979 auto *MF = MBB.getParent();
1980 SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
1981
1982 assert(Info->isEntryFunction());
1983
1984 if (MBB.succ_empty()) {
1985 bool HasNoTerminator = MBB.getFirstTerminator() == MBB.end();
1986 if (HasNoTerminator) {
1987 if (Info->returnsVoid()) {
1988 BuildMI(MBB, MBB.end(), DebugLoc(), get(AMDGPU::S_ENDPGM)).addImm(0);
1989 } else {
1990 BuildMI(MBB, MBB.end(), DebugLoc(), get(AMDGPU::SI_RETURN_TO_EPILOG));
1991 }
1992 }
1993 }
1994}
1995
1999 const DebugLoc &DL) const {
2000 MachineFunction *MF = MBB.getParent();
2001 constexpr unsigned DoorbellIDMask = 0x3ff;
2002 constexpr unsigned ECQueueWaveAbort = 0x400;
2003
2004 MachineBasicBlock *TrapBB = &MBB;
2005 MachineBasicBlock *HaltLoopBB = MF->CreateMachineBasicBlock();
2006
2007 if (!MBB.succ_empty() || std::next(MI.getIterator()) != MBB.end()) {
2008 MBB.splitAt(MI, /*UpdateLiveIns=*/false);
2009 TrapBB = MF->CreateMachineBasicBlock();
2010 BuildMI(MBB, MI, DL, get(AMDGPU::S_CBRANCH_EXECNZ)).addMBB(TrapBB);
2011 MF->push_back(TrapBB);
2012 MBB.addSuccessor(TrapBB);
2013 }
2014 // Start with a `s_trap 2`, if we're in PRIV=1 and we need the workaround this
2015 // will be a nop.
2016 BuildMI(*TrapBB, TrapBB->end(), DL, get(AMDGPU::S_TRAP))
2017 .addImm(static_cast<unsigned>(GCNSubtarget::TrapID::LLVMAMDHSATrap));
2018 Register DoorbellReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
2019 BuildMI(*TrapBB, TrapBB->end(), DL, get(AMDGPU::S_SENDMSG_RTN_B32),
2020 DoorbellReg)
2022 BuildMI(*TrapBB, TrapBB->end(), DL, get(AMDGPU::S_MOV_B32), AMDGPU::TTMP2)
2023 .addUse(AMDGPU::M0);
2024 Register DoorbellRegMasked =
2025 MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
2026 BuildMI(*TrapBB, TrapBB->end(), DL, get(AMDGPU::S_AND_B32), DoorbellRegMasked)
2027 .addUse(DoorbellReg)
2028 .addImm(DoorbellIDMask);
2029 Register SetWaveAbortBit =
2030 MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
2031 BuildMI(*TrapBB, TrapBB->end(), DL, get(AMDGPU::S_OR_B32), SetWaveAbortBit)
2032 .addUse(DoorbellRegMasked)
2033 .addImm(ECQueueWaveAbort);
2034 BuildMI(*TrapBB, TrapBB->end(), DL, get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2035 .addUse(SetWaveAbortBit);
2036 BuildMI(*TrapBB, TrapBB->end(), DL, get(AMDGPU::S_SENDMSG))
2038 BuildMI(*TrapBB, TrapBB->end(), DL, get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2039 .addUse(AMDGPU::TTMP2);
2040 BuildMI(*TrapBB, TrapBB->end(), DL, get(AMDGPU::S_BRANCH)).addMBB(HaltLoopBB);
2041 TrapBB->addSuccessor(HaltLoopBB);
2042
2043 BuildMI(*HaltLoopBB, HaltLoopBB->end(), DL, get(AMDGPU::S_SETHALT)).addImm(5);
2044 BuildMI(*HaltLoopBB, HaltLoopBB->end(), DL, get(AMDGPU::S_BRANCH))
2045 .addMBB(HaltLoopBB);
2046 MF->push_back(HaltLoopBB);
2047 HaltLoopBB->addSuccessor(HaltLoopBB);
2048
2049 return MBB.getNextNode();
2050}
2051
2053 switch (MI.getOpcode()) {
2054 default:
2055 if (MI.isMetaInstruction())
2056 return 0;
2057 return 1; // FIXME: Do wait states equal cycles?
2058
2059 case AMDGPU::S_NOP:
2060 return MI.getOperand(0).getImm() + 1;
2061 // SI_RETURN_TO_EPILOG is a fallthrough to code outside of the function. The
2062 // hazard, even if one exist, won't really be visible. Should we handle it?
2063 }
2064}
2065
2067 MachineBasicBlock &MBB = *MI.getParent();
2068 DebugLoc DL = MBB.findDebugLoc(MI);
2070 switch (MI.getOpcode()) {
2071 default: return TargetInstrInfo::expandPostRAPseudo(MI);
2072 case AMDGPU::S_MOV_B64_term:
2073 // This is only a terminator to get the correct spill code placement during
2074 // register allocation.
2075 MI.setDesc(get(AMDGPU::S_MOV_B64));
2076 break;
2077
2078 case AMDGPU::S_MOV_B32_term:
2079 // This is only a terminator to get the correct spill code placement during
2080 // register allocation.
2081 MI.setDesc(get(AMDGPU::S_MOV_B32));
2082 break;
2083
2084 case AMDGPU::S_XOR_B64_term:
2085 // This is only a terminator to get the correct spill code placement during
2086 // register allocation.
2087 MI.setDesc(get(AMDGPU::S_XOR_B64));
2088 break;
2089
2090 case AMDGPU::S_XOR_B32_term:
2091 // This is only a terminator to get the correct spill code placement during
2092 // register allocation.
2093 MI.setDesc(get(AMDGPU::S_XOR_B32));
2094 break;
2095 case AMDGPU::S_OR_B64_term:
2096 // This is only a terminator to get the correct spill code placement during
2097 // register allocation.
2098 MI.setDesc(get(AMDGPU::S_OR_B64));
2099 break;
2100 case AMDGPU::S_OR_B32_term:
2101 // This is only a terminator to get the correct spill code placement during
2102 // register allocation.
2103 MI.setDesc(get(AMDGPU::S_OR_B32));
2104 break;
2105
2106 case AMDGPU::S_ANDN2_B64_term:
2107 // This is only a terminator to get the correct spill code placement during
2108 // register allocation.
2109 MI.setDesc(get(AMDGPU::S_ANDN2_B64));
2110 break;
2111
2112 case AMDGPU::S_ANDN2_B32_term:
2113 // This is only a terminator to get the correct spill code placement during
2114 // register allocation.
2115 MI.setDesc(get(AMDGPU::S_ANDN2_B32));
2116 break;
2117
2118 case AMDGPU::S_AND_B64_term:
2119 // This is only a terminator to get the correct spill code placement during
2120 // register allocation.
2121 MI.setDesc(get(AMDGPU::S_AND_B64));
2122 break;
2123
2124 case AMDGPU::S_AND_B32_term:
2125 // This is only a terminator to get the correct spill code placement during
2126 // register allocation.
2127 MI.setDesc(get(AMDGPU::S_AND_B32));
2128 break;
2129
2130 case AMDGPU::S_AND_SAVEEXEC_B64_term:
2131 // This is only a terminator to get the correct spill code placement during
2132 // register allocation.
2133 MI.setDesc(get(AMDGPU::S_AND_SAVEEXEC_B64));
2134 break;
2135
2136 case AMDGPU::S_AND_SAVEEXEC_B32_term:
2137 // This is only a terminator to get the correct spill code placement during
2138 // register allocation.
2139 MI.setDesc(get(AMDGPU::S_AND_SAVEEXEC_B32));
2140 break;
2141
2142 case AMDGPU::SI_SPILL_S32_TO_VGPR:
2143 MI.setDesc(get(AMDGPU::V_WRITELANE_B32));
2144 break;
2145
2146 case AMDGPU::SI_RESTORE_S32_FROM_VGPR:
2147 MI.setDesc(get(AMDGPU::V_READLANE_B32));
2148 break;
2149 case AMDGPU::AV_MOV_B32_IMM_PSEUDO: {
2150 Register Dst = MI.getOperand(0).getReg();
2151 bool IsAGPR = SIRegisterInfo::isAGPRClass(RI.getPhysRegBaseClass(Dst));
2152 MI.setDesc(
2153 get(IsAGPR ? AMDGPU::V_ACCVGPR_WRITE_B32_e64 : AMDGPU::V_MOV_B32_e32));
2154 break;
2155 }
2156 case AMDGPU::AV_MOV_B64_IMM_PSEUDO: {
2157 Register Dst = MI.getOperand(0).getReg();
2158 if (SIRegisterInfo::isAGPRClass(RI.getPhysRegBaseClass(Dst))) {
2159 int64_t Imm = MI.getOperand(1).getImm();
2160
2161 Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0);
2162 Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1);
2163 BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), DstLo)
2166 BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), DstHi)
2167 .addImm(SignExtend64<32>(Imm >> 32))
2169 MI.eraseFromParent();
2170 break;
2171 }
2172
2173 [[fallthrough]];
2174 }
2175 case AMDGPU::V_MOV_B64_PSEUDO: {
2176 Register Dst = MI.getOperand(0).getReg();
2177 Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0);
2178 Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1);
2179
2180 const MCInstrDesc &Mov64Desc = get(AMDGPU::V_MOV_B64_e32);
2181 const TargetRegisterClass *Mov64RC = getRegClass(Mov64Desc, /*OpNum=*/0);
2182
2183 const MachineOperand &SrcOp = MI.getOperand(1);
2184 // FIXME: Will this work for 64-bit floating point immediates?
2185 assert(!SrcOp.isFPImm());
2186 if (ST.hasMovB64() && Mov64RC->contains(Dst)) {
2187 MI.setDesc(Mov64Desc);
2188 if (SrcOp.isReg() || isInlineConstant(MI, 1) ||
2189 isUInt<32>(SrcOp.getImm()) || ST.has64BitLiterals())
2190 break;
2191 }
2192 if (SrcOp.isImm()) {
2193 APInt Imm(64, SrcOp.getImm());
2194 APInt Lo(32, Imm.getLoBits(32).getZExtValue());
2195 APInt Hi(32, Imm.getHiBits(32).getZExtValue());
2196 const MCInstrDesc &PkMovDesc = get(AMDGPU::V_PK_MOV_B32);
2197 const TargetRegisterClass *PkMovRC = getRegClass(PkMovDesc, /*OpNum=*/0);
2198
2199 if (ST.hasPkMovB32() && Lo == Hi && isInlineConstant(Lo) &&
2200 PkMovRC->contains(Dst)) {
2201 BuildMI(MBB, MI, DL, PkMovDesc, Dst)
2203 .addImm(Lo.getSExtValue())
2205 .addImm(Lo.getSExtValue())
2206 .addImm(0) // op_sel_lo
2207 .addImm(0) // op_sel_hi
2208 .addImm(0) // neg_lo
2209 .addImm(0) // neg_hi
2210 .addImm(0); // clamp
2211 } else {
2212 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo)
2213 .addImm(Lo.getSExtValue())
2215 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi)
2216 .addImm(Hi.getSExtValue())
2218 }
2219 } else {
2220 assert(SrcOp.isReg());
2221 if (ST.hasPkMovB32() &&
2222 !RI.isAGPR(MBB.getParent()->getRegInfo(), SrcOp.getReg())) {
2223 BuildMI(MBB, MI, DL, get(AMDGPU::V_PK_MOV_B32), Dst)
2224 .addImm(SISrcMods::OP_SEL_1) // src0_mod
2225 .addReg(SrcOp.getReg())
2227 .addReg(SrcOp.getReg())
2228 .addImm(0) // op_sel_lo
2229 .addImm(0) // op_sel_hi
2230 .addImm(0) // neg_lo
2231 .addImm(0) // neg_hi
2232 .addImm(0); // clamp
2233 } else {
2234 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo)
2235 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0))
2237 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi)
2238 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1))
2240 }
2241 }
2242 MI.eraseFromParent();
2243 break;
2244 }
2245 case AMDGPU::V_MOV_B64_DPP_PSEUDO: {
2247 break;
2248 }
2249 case AMDGPU::S_MOV_B64_IMM_PSEUDO: {
2250 const MachineOperand &SrcOp = MI.getOperand(1);
2251 assert(!SrcOp.isFPImm());
2252
2253 if (ST.has64BitLiterals()) {
2254 MI.setDesc(get(AMDGPU::S_MOV_B64));
2255 break;
2256 }
2257
2258 APInt Imm(64, SrcOp.getImm());
2259 if (Imm.isIntN(32) || isInlineConstant(Imm)) {
2260 MI.setDesc(get(AMDGPU::S_MOV_B64));
2261 break;
2262 }
2263
2264 Register Dst = MI.getOperand(0).getReg();
2265 Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0);
2266 Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1);
2267
2268 APInt Lo(32, Imm.getLoBits(32).getZExtValue());
2269 APInt Hi(32, Imm.getHiBits(32).getZExtValue());
2270 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DstLo)
2271 .addImm(Lo.getSExtValue())
2273 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DstHi)
2274 .addImm(Hi.getSExtValue())
2276 MI.eraseFromParent();
2277 break;
2278 }
2279 case AMDGPU::V_SET_INACTIVE_B32: {
2280 // Lower V_SET_INACTIVE_B32 to V_CNDMASK_B32.
2281 Register DstReg = MI.getOperand(0).getReg();
2282 BuildMI(MBB, MI, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
2283 .add(MI.getOperand(3))
2284 .add(MI.getOperand(4))
2285 .add(MI.getOperand(1))
2286 .add(MI.getOperand(2))
2287 .add(MI.getOperand(5));
2288 MI.eraseFromParent();
2289 break;
2290 }
2291 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V1:
2292 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V2:
2293 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V3:
2294 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V4:
2295 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V5:
2296 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V6:
2297 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V7:
2298 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V8:
2299 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V9:
2300 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V10:
2301 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V11:
2302 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V12:
2303 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V16:
2304 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V32:
2305 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V1:
2306 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V2:
2307 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V3:
2308 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V4:
2309 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V5:
2310 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V6:
2311 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V7:
2312 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V8:
2313 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V9:
2314 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V10:
2315 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V11:
2316 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V12:
2317 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V16:
2318 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V32:
2319 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V1:
2320 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V2:
2321 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V4:
2322 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V8:
2323 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V16: {
2324 const TargetRegisterClass *EltRC = getOpRegClass(MI, 2);
2325
2326 unsigned Opc;
2327 if (RI.hasVGPRs(EltRC)) {
2328 Opc = AMDGPU::V_MOVRELD_B32_e32;
2329 } else {
2330 Opc = RI.getRegSizeInBits(*EltRC) == 64 ? AMDGPU::S_MOVRELD_B64
2331 : AMDGPU::S_MOVRELD_B32;
2332 }
2333
2334 const MCInstrDesc &OpDesc = get(Opc);
2335 Register VecReg = MI.getOperand(0).getReg();
2336 bool IsUndef = MI.getOperand(1).isUndef();
2337 unsigned SubReg = MI.getOperand(3).getImm();
2338 assert(VecReg == MI.getOperand(1).getReg());
2339
2341 BuildMI(MBB, MI, DL, OpDesc)
2342 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef)
2343 .add(MI.getOperand(2))
2345 .addReg(VecReg, RegState::Implicit | getUndefRegState(IsUndef));
2346
2347 const int ImpDefIdx =
2348 OpDesc.getNumOperands() + OpDesc.implicit_uses().size();
2349 const int ImpUseIdx = ImpDefIdx + 1;
2350 MIB->tieOperands(ImpDefIdx, ImpUseIdx);
2351 MI.eraseFromParent();
2352 break;
2353 }
2354 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V1:
2355 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V2:
2356 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V3:
2357 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V4:
2358 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V5:
2359 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V6:
2360 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V7:
2361 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V8:
2362 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V9:
2363 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V10:
2364 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V11:
2365 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V12:
2366 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V16:
2367 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V32: {
2368 assert(ST.useVGPRIndexMode());
2369 Register VecReg = MI.getOperand(0).getReg();
2370 bool IsUndef = MI.getOperand(1).isUndef();
2371 MachineOperand &Idx = MI.getOperand(3);
2372 Register SubReg = MI.getOperand(4).getImm();
2373
2374 MachineInstr *SetOn = BuildMI(MBB, MI, DL, get(AMDGPU::S_SET_GPR_IDX_ON))
2375 .add(Idx)
2377 SetOn->getOperand(3).setIsUndef();
2378
2379 const MCInstrDesc &OpDesc = get(AMDGPU::V_MOV_B32_indirect_write);
2381 BuildMI(MBB, MI, DL, OpDesc)
2382 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef)
2383 .add(MI.getOperand(2))
2385 .addReg(VecReg, RegState::Implicit | getUndefRegState(IsUndef));
2386
2387 const int ImpDefIdx =
2388 OpDesc.getNumOperands() + OpDesc.implicit_uses().size();
2389 const int ImpUseIdx = ImpDefIdx + 1;
2390 MIB->tieOperands(ImpDefIdx, ImpUseIdx);
2391
2392 MachineInstr *SetOff = BuildMI(MBB, MI, DL, get(AMDGPU::S_SET_GPR_IDX_OFF));
2393
2394 finalizeBundle(MBB, SetOn->getIterator(), std::next(SetOff->getIterator()));
2395
2396 MI.eraseFromParent();
2397 break;
2398 }
2399 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V1:
2400 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V2:
2401 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V3:
2402 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V4:
2403 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V5:
2404 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V6:
2405 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V7:
2406 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V8:
2407 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V9:
2408 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V10:
2409 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V11:
2410 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V12:
2411 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V16:
2412 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V32: {
2413 assert(ST.useVGPRIndexMode());
2414 Register Dst = MI.getOperand(0).getReg();
2415 Register VecReg = MI.getOperand(1).getReg();
2416 bool IsUndef = MI.getOperand(1).isUndef();
2417 Register SubReg = MI.getOperand(3).getImm();
2418
2419 MachineInstr *SetOn = BuildMI(MBB, MI, DL, get(AMDGPU::S_SET_GPR_IDX_ON))
2420 .add(MI.getOperand(2))
2422 SetOn->getOperand(3).setIsUndef();
2423
2424 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_indirect_read))
2425 .addDef(Dst)
2426 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef)
2427 .addReg(VecReg, RegState::Implicit | getUndefRegState(IsUndef));
2428
2429 MachineInstr *SetOff = BuildMI(MBB, MI, DL, get(AMDGPU::S_SET_GPR_IDX_OFF));
2430
2431 finalizeBundle(MBB, SetOn->getIterator(), std::next(SetOff->getIterator()));
2432
2433 MI.eraseFromParent();
2434 break;
2435 }
2436 case AMDGPU::SI_PC_ADD_REL_OFFSET: {
2437 MachineFunction &MF = *MBB.getParent();
2438 Register Reg = MI.getOperand(0).getReg();
2439 Register RegLo = RI.getSubReg(Reg, AMDGPU::sub0);
2440 Register RegHi = RI.getSubReg(Reg, AMDGPU::sub1);
2441 MachineOperand OpLo = MI.getOperand(1);
2442 MachineOperand OpHi = MI.getOperand(2);
2443
2444 // Create a bundle so these instructions won't be re-ordered by the
2445 // post-RA scheduler.
2446 MIBundleBuilder Bundler(MBB, MI);
2447 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_GETPC_B64), Reg));
2448
2449 // What we want here is an offset from the value returned by s_getpc (which
2450 // is the address of the s_add_u32 instruction) to the global variable, but
2451 // since the encoding of $symbol starts 4 bytes after the start of the
2452 // s_add_u32 instruction, we end up with an offset that is 4 bytes too
2453 // small. This requires us to add 4 to the global variable offset in order
2454 // to compute the correct address. Similarly for the s_addc_u32 instruction,
2455 // the encoding of $symbol starts 12 bytes after the start of the s_add_u32
2456 // instruction.
2457
2458 int64_t Adjust = 0;
2459 if (ST.hasGetPCZeroExtension()) {
2460 // Fix up hardware that does not sign-extend the 48-bit PC value by
2461 // inserting: s_sext_i32_i16 reghi, reghi
2462 Bundler.append(
2463 BuildMI(MF, DL, get(AMDGPU::S_SEXT_I32_I16), RegHi).addReg(RegHi));
2464 Adjust += 4;
2465 }
2466
2467 if (OpLo.isGlobal())
2468 OpLo.setOffset(OpLo.getOffset() + Adjust + 4);
2469 Bundler.append(
2470 BuildMI(MF, DL, get(AMDGPU::S_ADD_U32), RegLo).addReg(RegLo).add(OpLo));
2471
2472 if (OpHi.isGlobal())
2473 OpHi.setOffset(OpHi.getOffset() + Adjust + 12);
2474 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_ADDC_U32), RegHi)
2475 .addReg(RegHi)
2476 .add(OpHi));
2477
2478 finalizeBundle(MBB, Bundler.begin());
2479
2480 MI.eraseFromParent();
2481 break;
2482 }
2483 case AMDGPU::SI_PC_ADD_REL_OFFSET64: {
2484 MachineFunction &MF = *MBB.getParent();
2485 Register Reg = MI.getOperand(0).getReg();
2486 MachineOperand Op = MI.getOperand(1);
2487
2488 // Create a bundle so these instructions won't be re-ordered by the
2489 // post-RA scheduler.
2490 MIBundleBuilder Bundler(MBB, MI);
2491 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_GETPC_B64), Reg));
2492 if (Op.isGlobal())
2493 Op.setOffset(Op.getOffset() + 4);
2494 Bundler.append(
2495 BuildMI(MF, DL, get(AMDGPU::S_ADD_U64), Reg).addReg(Reg).add(Op));
2496
2497 finalizeBundle(MBB, Bundler.begin());
2498
2499 MI.eraseFromParent();
2500 break;
2501 }
2502 case AMDGPU::ENTER_STRICT_WWM: {
2503 // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when
2504 // Whole Wave Mode is entered.
2505 MI.setDesc(get(LMC.OrSaveExecOpc));
2506 break;
2507 }
2508 case AMDGPU::ENTER_STRICT_WQM: {
2509 // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when
2510 // STRICT_WQM is entered.
2511 BuildMI(MBB, MI, DL, get(LMC.MovOpc), MI.getOperand(0).getReg())
2512 .addReg(LMC.ExecReg);
2513 BuildMI(MBB, MI, DL, get(LMC.WQMOpc), LMC.ExecReg).addReg(LMC.ExecReg);
2514
2515 MI.eraseFromParent();
2516 break;
2517 }
2518 case AMDGPU::EXIT_STRICT_WWM:
2519 case AMDGPU::EXIT_STRICT_WQM: {
2520 // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when
2521 // WWM/STICT_WQM is exited.
2522 MI.setDesc(get(LMC.MovOpc));
2523 break;
2524 }
2525 case AMDGPU::SI_RETURN: {
2526 const MachineFunction *MF = MBB.getParent();
2527 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
2528 const SIRegisterInfo *TRI = ST.getRegisterInfo();
2529 // Hiding the return address use with SI_RETURN may lead to extra kills in
2530 // the function and missing live-ins. We are fine in practice because callee
2531 // saved register handling ensures the register value is restored before
2532 // RET, but we need the undef flag here to appease the MachineVerifier
2533 // liveness checks.
2535 BuildMI(MBB, MI, DL, get(AMDGPU::S_SETPC_B64_return))
2536 .addReg(TRI->getReturnAddressReg(*MF), RegState::Undef);
2537
2538 MIB.copyImplicitOps(MI);
2539 MI.eraseFromParent();
2540 break;
2541 }
2542
2543 case AMDGPU::S_MUL_U64_U32_PSEUDO:
2544 case AMDGPU::S_MUL_I64_I32_PSEUDO:
2545 MI.setDesc(get(AMDGPU::S_MUL_U64));
2546 break;
2547
2548 case AMDGPU::S_GETPC_B64_pseudo:
2549 MI.setDesc(get(AMDGPU::S_GETPC_B64));
2550 if (ST.hasGetPCZeroExtension()) {
2551 Register Dst = MI.getOperand(0).getReg();
2552 Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1);
2553 // Fix up hardware that does not sign-extend the 48-bit PC value by
2554 // inserting: s_sext_i32_i16 dsthi, dsthi
2555 BuildMI(MBB, std::next(MI.getIterator()), DL, get(AMDGPU::S_SEXT_I32_I16),
2556 DstHi)
2557 .addReg(DstHi);
2558 }
2559 break;
2560
2561 case AMDGPU::V_MAX_BF16_PSEUDO_e64: {
2562 assert(ST.hasBF16PackedInsts());
2563 MI.setDesc(get(AMDGPU::V_PK_MAX_NUM_BF16));
2564 MI.addOperand(MachineOperand::CreateImm(0)); // op_sel
2565 MI.addOperand(MachineOperand::CreateImm(0)); // neg_lo
2566 MI.addOperand(MachineOperand::CreateImm(0)); // neg_hi
2567 auto Op0 = getNamedOperand(MI, AMDGPU::OpName::src0_modifiers);
2568 Op0->setImm(Op0->getImm() | SISrcMods::OP_SEL_1);
2569 auto Op1 = getNamedOperand(MI, AMDGPU::OpName::src1_modifiers);
2570 Op1->setImm(Op1->getImm() | SISrcMods::OP_SEL_1);
2571 break;
2572 }
2573
2574 case AMDGPU::GET_STACK_BASE:
2575 // The stack starts at offset 0 unless we need to reserve some space at the
2576 // bottom.
2577 if (ST.getFrameLowering()->mayReserveScratchForCWSR(*MBB.getParent())) {
2578 // When CWSR is used in dynamic VGPR mode, the trap handler needs to save
2579 // some of the VGPRs. The size of the required scratch space has already
2580 // been computed by prolog epilog insertion.
2581 const SIMachineFunctionInfo *MFI =
2582 MBB.getParent()->getInfo<SIMachineFunctionInfo>();
2583 unsigned VGPRSize = MFI->getScratchReservedForDynamicVGPRs();
2584 Register DestReg = MI.getOperand(0).getReg();
2585 BuildMI(MBB, MI, DL, get(AMDGPU::S_GETREG_B32), DestReg)
2588 // The MicroEngine ID is 0 for the graphics queue, and 1 or 2 for compute
2589 // (3 is unused, so we ignore it). Unfortunately, S_GETREG doesn't set
2590 // SCC, so we need to check for 0 manually.
2591 BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U32)).addImm(0).addReg(DestReg);
2592 // Change the implicif-def of SCC to an explicit use (but first remove
2593 // the dead flag if present).
2594 MI.getOperand(MI.getNumExplicitOperands()).setIsDead(false);
2595 MI.getOperand(MI.getNumExplicitOperands()).setIsUse();
2596 MI.setDesc(get(AMDGPU::S_CMOVK_I32));
2597 MI.addOperand(MachineOperand::CreateImm(VGPRSize));
2598 } else {
2599 MI.setDesc(get(AMDGPU::S_MOV_B32));
2600 MI.addOperand(MachineOperand::CreateImm(0));
2601 MI.removeOperand(
2602 MI.getNumExplicitOperands()); // Drop implicit def of SCC.
2603 }
2604 break;
2605 }
2606
2607 return true;
2608}
2609
2612 unsigned SubIdx,
2613 const MachineInstr &Orig) const {
2614
2615 // Try shrinking the instruction to remat only the part needed for current
2616 // context.
2617 // TODO: Handle more cases.
2618 unsigned Opcode = Orig.getOpcode();
2619 switch (Opcode) {
2620 case AMDGPU::S_LOAD_DWORDX16_IMM:
2621 case AMDGPU::S_LOAD_DWORDX8_IMM: {
2622 if (SubIdx != 0)
2623 break;
2624
2625 if (I == MBB.end())
2626 break;
2627
2628 if (I->isBundled())
2629 break;
2630
2631 // Look for a single use of the register that is also a subreg.
2632 Register RegToFind = Orig.getOperand(0).getReg();
2633 MachineOperand *UseMO = nullptr;
2634 for (auto &CandMO : I->operands()) {
2635 if (!CandMO.isReg() || CandMO.getReg() != RegToFind || CandMO.isDef())
2636 continue;
2637 if (UseMO) {
2638 UseMO = nullptr;
2639 break;
2640 }
2641 UseMO = &CandMO;
2642 }
2643 if (!UseMO || UseMO->getSubReg() == AMDGPU::NoSubRegister)
2644 break;
2645
2646 unsigned Offset = RI.getSubRegIdxOffset(UseMO->getSubReg());
2647 unsigned SubregSize = RI.getSubRegIdxSize(UseMO->getSubReg());
2648
2649 MachineFunction *MF = MBB.getParent();
2650 MachineRegisterInfo &MRI = MF->getRegInfo();
2651 assert(MRI.use_nodbg_empty(DestReg) && "DestReg should have no users yet.");
2652
2653 unsigned NewOpcode = -1;
2654 if (SubregSize == 256)
2655 NewOpcode = AMDGPU::S_LOAD_DWORDX8_IMM;
2656 else if (SubregSize == 128)
2657 NewOpcode = AMDGPU::S_LOAD_DWORDX4_IMM;
2658 else
2659 break;
2660
2661 const MCInstrDesc &TID = get(NewOpcode);
2662 const TargetRegisterClass *NewRC =
2663 RI.getAllocatableClass(getRegClass(TID, 0));
2664 MRI.setRegClass(DestReg, NewRC);
2665
2666 UseMO->setReg(DestReg);
2667 UseMO->setSubReg(AMDGPU::NoSubRegister);
2668
2669 // Use a smaller load with the desired size, possibly with updated offset.
2670 MachineInstr *MI = MF->CloneMachineInstr(&Orig);
2671 MI->setDesc(TID);
2672 MI->getOperand(0).setReg(DestReg);
2673 MI->getOperand(0).setSubReg(AMDGPU::NoSubRegister);
2674 if (Offset) {
2675 MachineOperand *OffsetMO = getNamedOperand(*MI, AMDGPU::OpName::offset);
2676 int64_t FinalOffset = OffsetMO->getImm() + Offset / 8;
2677 OffsetMO->setImm(FinalOffset);
2678 }
2680 for (const MachineMemOperand *MemOp : Orig.memoperands())
2681 NewMMOs.push_back(MF->getMachineMemOperand(MemOp, MemOp->getPointerInfo(),
2682 SubregSize / 8));
2683 MI->setMemRefs(*MF, NewMMOs);
2684
2685 MBB.insert(I, MI);
2686 return;
2687 }
2688
2689 default:
2690 break;
2691 }
2692
2693 TargetInstrInfo::reMaterialize(MBB, I, DestReg, SubIdx, Orig);
2694}
2695
2696std::pair<MachineInstr*, MachineInstr*>
2698 assert (MI.getOpcode() == AMDGPU::V_MOV_B64_DPP_PSEUDO);
2699
2700 if (ST.hasMovB64() && ST.hasFeature(AMDGPU::FeatureDPALU_DPP) &&
2702 ST, getNamedOperand(MI, AMDGPU::OpName::dpp_ctrl)->getImm())) {
2703 MI.setDesc(get(AMDGPU::V_MOV_B64_dpp));
2704 return std::pair(&MI, nullptr);
2705 }
2706
2707 MachineBasicBlock &MBB = *MI.getParent();
2708 DebugLoc DL = MBB.findDebugLoc(MI);
2709 MachineFunction *MF = MBB.getParent();
2710 MachineRegisterInfo &MRI = MF->getRegInfo();
2711 Register Dst = MI.getOperand(0).getReg();
2712 unsigned Part = 0;
2713 MachineInstr *Split[2];
2714
2715 for (auto Sub : { AMDGPU::sub0, AMDGPU::sub1 }) {
2716 auto MovDPP = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_dpp));
2717 if (Dst.isPhysical()) {
2718 MovDPP.addDef(RI.getSubReg(Dst, Sub));
2719 } else {
2720 assert(MRI.isSSA());
2721 auto Tmp = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2722 MovDPP.addDef(Tmp);
2723 }
2724
2725 for (unsigned I = 1; I <= 2; ++I) { // old and src operands.
2726 const MachineOperand &SrcOp = MI.getOperand(I);
2727 assert(!SrcOp.isFPImm());
2728 if (SrcOp.isImm()) {
2729 APInt Imm(64, SrcOp.getImm());
2730 Imm.ashrInPlace(Part * 32);
2731 MovDPP.addImm(Imm.getLoBits(32).getZExtValue());
2732 } else {
2733 assert(SrcOp.isReg());
2734 Register Src = SrcOp.getReg();
2735 if (Src.isPhysical())
2736 MovDPP.addReg(RI.getSubReg(Src, Sub));
2737 else
2738 MovDPP.addReg(Src, getUndefRegState(SrcOp.isUndef()), Sub);
2739 }
2740 }
2741
2742 for (const MachineOperand &MO : llvm::drop_begin(MI.explicit_operands(), 3))
2743 MovDPP.addImm(MO.getImm());
2744
2745 Split[Part] = MovDPP;
2746 ++Part;
2747 }
2748
2749 if (Dst.isVirtual())
2750 BuildMI(MBB, MI, DL, get(AMDGPU::REG_SEQUENCE), Dst)
2751 .addReg(Split[0]->getOperand(0).getReg())
2752 .addImm(AMDGPU::sub0)
2753 .addReg(Split[1]->getOperand(0).getReg())
2754 .addImm(AMDGPU::sub1);
2755
2756 MI.eraseFromParent();
2757 return std::pair(Split[0], Split[1]);
2758}
2759
2760std::optional<DestSourcePair>
2762 if (MI.getOpcode() == AMDGPU::WWM_COPY)
2763 return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
2764
2765 return std::nullopt;
2766}
2767
2769 AMDGPU::OpName Src0OpName,
2770 MachineOperand &Src1,
2771 AMDGPU::OpName Src1OpName) const {
2772 MachineOperand *Src0Mods = getNamedOperand(MI, Src0OpName);
2773 if (!Src0Mods)
2774 return false;
2775
2776 MachineOperand *Src1Mods = getNamedOperand(MI, Src1OpName);
2777 assert(Src1Mods &&
2778 "All commutable instructions have both src0 and src1 modifiers");
2779
2780 int Src0ModsVal = Src0Mods->getImm();
2781 int Src1ModsVal = Src1Mods->getImm();
2782
2783 Src1Mods->setImm(Src0ModsVal);
2784 Src0Mods->setImm(Src1ModsVal);
2785 return true;
2786}
2787
2789 MachineOperand &RegOp,
2790 MachineOperand &NonRegOp) {
2791 Register Reg = RegOp.getReg();
2792 unsigned SubReg = RegOp.getSubReg();
2793 bool IsKill = RegOp.isKill();
2794 bool IsDead = RegOp.isDead();
2795 bool IsUndef = RegOp.isUndef();
2796 bool IsDebug = RegOp.isDebug();
2797
2798 if (NonRegOp.isImm())
2799 RegOp.ChangeToImmediate(NonRegOp.getImm());
2800 else if (NonRegOp.isFI())
2801 RegOp.ChangeToFrameIndex(NonRegOp.getIndex());
2802 else if (NonRegOp.isGlobal()) {
2803 RegOp.ChangeToGA(NonRegOp.getGlobal(), NonRegOp.getOffset(),
2804 NonRegOp.getTargetFlags());
2805 } else
2806 return nullptr;
2807
2808 // Make sure we don't reinterpret a subreg index in the target flags.
2809 RegOp.setTargetFlags(NonRegOp.getTargetFlags());
2810
2811 NonRegOp.ChangeToRegister(Reg, false, false, IsKill, IsDead, IsUndef, IsDebug);
2812 NonRegOp.setSubReg(SubReg);
2813
2814 return &MI;
2815}
2816
2818 MachineOperand &NonRegOp1,
2819 MachineOperand &NonRegOp2) {
2820 unsigned TargetFlags = NonRegOp1.getTargetFlags();
2821 int64_t NonRegVal = NonRegOp1.getImm();
2822
2823 NonRegOp1.setImm(NonRegOp2.getImm());
2824 NonRegOp2.setImm(NonRegVal);
2825 NonRegOp1.setTargetFlags(NonRegOp2.getTargetFlags());
2826 NonRegOp2.setTargetFlags(TargetFlags);
2827 return &MI;
2828}
2829
2830bool SIInstrInfo::isLegalToSwap(const MachineInstr &MI, unsigned OpIdx0,
2831 unsigned OpIdx1) const {
2832 const MCInstrDesc &InstDesc = MI.getDesc();
2833 const MCOperandInfo &OpInfo0 = InstDesc.operands()[OpIdx0];
2834 const MCOperandInfo &OpInfo1 = InstDesc.operands()[OpIdx1];
2835
2836 unsigned Opc = MI.getOpcode();
2837 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
2838
2839 const MachineOperand &MO0 = MI.getOperand(OpIdx0);
2840 const MachineOperand &MO1 = MI.getOperand(OpIdx1);
2841
2842 // Swap doesn't breach constant bus or literal limits
2843 // It may move literal to position other than src0, this is not allowed
2844 // pre-gfx10 However, most test cases need literals in Src0 for VOP
2845 // FIXME: After gfx9, literal can be in place other than Src0
2846 if (isVALU(MI)) {
2847 if ((int)OpIdx0 == Src0Idx && !MO0.isReg() &&
2848 !isInlineConstant(MO0, OpInfo1))
2849 return false;
2850 if ((int)OpIdx1 == Src0Idx && !MO1.isReg() &&
2851 !isInlineConstant(MO1, OpInfo0))
2852 return false;
2853 }
2854
2855 if ((int)OpIdx1 != Src0Idx && MO0.isReg()) {
2856 if (OpInfo1.RegClass == -1)
2857 return OpInfo1.OperandType == MCOI::OPERAND_UNKNOWN;
2858 return isLegalRegOperand(MI, OpIdx1, MO0) &&
2859 (!MO1.isReg() || isLegalRegOperand(MI, OpIdx0, MO1));
2860 }
2861 if ((int)OpIdx0 != Src0Idx && MO1.isReg()) {
2862 if (OpInfo0.RegClass == -1)
2863 return OpInfo0.OperandType == MCOI::OPERAND_UNKNOWN;
2864 return (!MO0.isReg() || isLegalRegOperand(MI, OpIdx1, MO0)) &&
2865 isLegalRegOperand(MI, OpIdx0, MO1);
2866 }
2867
2868 // No need to check 64-bit literals since swapping does not bring new
2869 // 64-bit literals into current instruction to fold to 32-bit
2870
2871 return isImmOperandLegal(MI, OpIdx1, MO0);
2872}
2873
2875 unsigned Src0Idx,
2876 unsigned Src1Idx) const {
2877 assert(!NewMI && "this should never be used");
2878
2879 unsigned Opc = MI.getOpcode();
2880 int CommutedOpcode = commuteOpcode(Opc);
2881 if (CommutedOpcode == -1)
2882 return nullptr;
2883
2884 if (Src0Idx > Src1Idx)
2885 std::swap(Src0Idx, Src1Idx);
2886
2887 assert(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) ==
2888 static_cast<int>(Src0Idx) &&
2889 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) ==
2890 static_cast<int>(Src1Idx) &&
2891 "inconsistency with findCommutedOpIndices");
2892
2893 if (!isLegalToSwap(MI, Src0Idx, Src1Idx))
2894 return nullptr;
2895
2896 MachineInstr *CommutedMI = nullptr;
2897 MachineOperand &Src0 = MI.getOperand(Src0Idx);
2898 MachineOperand &Src1 = MI.getOperand(Src1Idx);
2899 if (Src0.isReg() && Src1.isReg()) {
2900 // Be sure to copy the source modifiers to the right place.
2901 CommutedMI =
2902 TargetInstrInfo::commuteInstructionImpl(MI, NewMI, Src0Idx, Src1Idx);
2903 } else if (Src0.isReg() && !Src1.isReg()) {
2904 CommutedMI = swapRegAndNonRegOperand(MI, Src0, Src1);
2905 } else if (!Src0.isReg() && Src1.isReg()) {
2906 CommutedMI = swapRegAndNonRegOperand(MI, Src1, Src0);
2907 } else if (Src0.isImm() && Src1.isImm()) {
2908 CommutedMI = swapImmOperands(MI, Src0, Src1);
2909 } else {
2910 // FIXME: Found two non registers to commute. This does happen.
2911 return nullptr;
2912 }
2913
2914 if (CommutedMI) {
2915 swapSourceModifiers(MI, Src0, AMDGPU::OpName::src0_modifiers,
2916 Src1, AMDGPU::OpName::src1_modifiers);
2917
2918 swapSourceModifiers(MI, Src0, AMDGPU::OpName::src0_sel, Src1,
2919 AMDGPU::OpName::src1_sel);
2920
2921 CommutedMI->setDesc(get(CommutedOpcode));
2922 }
2923
2924 return CommutedMI;
2925}
2926
2927// This needs to be implemented because the source modifiers may be inserted
2928// between the true commutable operands, and the base
2929// TargetInstrInfo::commuteInstruction uses it.
2931 unsigned &SrcOpIdx0,
2932 unsigned &SrcOpIdx1) const {
2933 return findCommutedOpIndices(MI.getDesc(), SrcOpIdx0, SrcOpIdx1);
2934}
2935
2937 unsigned &SrcOpIdx0,
2938 unsigned &SrcOpIdx1) const {
2939 if (!Desc.isCommutable())
2940 return false;
2941
2942 unsigned Opc = Desc.getOpcode();
2943 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
2944 if (Src0Idx == -1)
2945 return false;
2946
2947 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
2948 if (Src1Idx == -1)
2949 return false;
2950
2951 return fixCommutedOpIndices(SrcOpIdx0, SrcOpIdx1, Src0Idx, Src1Idx);
2952}
2953
2955 int64_t BrOffset) const {
2956 // BranchRelaxation should never have to check s_setpc_b64 or s_add_pc_i64
2957 // because its dest block is unanalyzable.
2958 assert(isSOPP(BranchOp) || isSOPK(BranchOp));
2959
2960 // Convert to dwords.
2961 BrOffset /= 4;
2962
2963 // The branch instructions do PC += signext(SIMM16 * 4) + 4, so the offset is
2964 // from the next instruction.
2965 BrOffset -= 1;
2966
2967 return isIntN(BranchOffsetBits, BrOffset);
2968}
2969
2972 return MI.getOperand(0).getMBB();
2973}
2974
2976 for (const MachineInstr &MI : MBB->terminators()) {
2977 if (MI.getOpcode() == AMDGPU::SI_IF || MI.getOpcode() == AMDGPU::SI_ELSE ||
2978 MI.getOpcode() == AMDGPU::SI_LOOP)
2979 return true;
2980 }
2981 return false;
2982}
2983
2985 MachineBasicBlock &DestBB,
2986 MachineBasicBlock &RestoreBB,
2987 const DebugLoc &DL, int64_t BrOffset,
2988 RegScavenger *RS) const {
2989 assert(MBB.empty() &&
2990 "new block should be inserted for expanding unconditional branch");
2991 assert(MBB.pred_size() == 1);
2992 assert(RestoreBB.empty() &&
2993 "restore block should be inserted for restoring clobbered registers");
2994
2995 MachineFunction *MF = MBB.getParent();
2996 MachineRegisterInfo &MRI = MF->getRegInfo();
2998 auto I = MBB.end();
2999 auto &MCCtx = MF->getContext();
3000
3001 if (ST.useAddPC64Inst()) {
3002 MCSymbol *Offset =
3003 MCCtx.createTempSymbol("offset", /*AlwaysAddSuffix=*/true);
3004 auto AddPC = BuildMI(MBB, I, DL, get(AMDGPU::S_ADD_PC_I64))
3006 MCSymbol *PostAddPCLabel =
3007 MCCtx.createTempSymbol("post_addpc", /*AlwaysAddSuffix=*/true);
3008 AddPC->setPostInstrSymbol(*MF, PostAddPCLabel);
3009 auto *OffsetExpr = MCBinaryExpr::createSub(
3010 MCSymbolRefExpr::create(DestBB.getSymbol(), MCCtx),
3011 MCSymbolRefExpr::create(PostAddPCLabel, MCCtx), MCCtx);
3012 Offset->setVariableValue(OffsetExpr);
3013 return;
3014 }
3015
3016 assert(RS && "RegScavenger required for long branching");
3017
3018 // FIXME: Virtual register workaround for RegScavenger not working with empty
3019 // blocks.
3020 Register PCReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
3021
3022 // Note: as this is used after hazard recognizer we need to apply some hazard
3023 // workarounds directly.
3024 const bool FlushSGPRWrites = (ST.isWave64() && ST.hasVALUMaskWriteHazard()) ||
3025 ST.hasVALUReadSGPRHazard();
3026 auto ApplyHazardWorkarounds = [this, &MBB, &I, &DL, FlushSGPRWrites]() {
3027 if (FlushSGPRWrites)
3028 BuildMI(MBB, I, DL, get(AMDGPU::S_WAITCNT_DEPCTR))
3030 };
3031
3032 // We need to compute the offset relative to the instruction immediately after
3033 // s_getpc_b64. Insert pc arithmetic code before last terminator.
3034 MachineInstr *GetPC = BuildMI(MBB, I, DL, get(AMDGPU::S_GETPC_B64), PCReg);
3035 ApplyHazardWorkarounds();
3036
3037 MCSymbol *PostGetPCLabel =
3038 MCCtx.createTempSymbol("post_getpc", /*AlwaysAddSuffix=*/true);
3039 GetPC->setPostInstrSymbol(*MF, PostGetPCLabel);
3040
3041 MCSymbol *OffsetLo =
3042 MCCtx.createTempSymbol("offset_lo", /*AlwaysAddSuffix=*/true);
3043 MCSymbol *OffsetHi =
3044 MCCtx.createTempSymbol("offset_hi", /*AlwaysAddSuffix=*/true);
3045 BuildMI(MBB, I, DL, get(AMDGPU::S_ADD_U32))
3046 .addReg(PCReg, RegState::Define, AMDGPU::sub0)
3047 .addReg(PCReg, {}, AMDGPU::sub0)
3048 .addSym(OffsetLo, MO_FAR_BRANCH_OFFSET);
3049 BuildMI(MBB, I, DL, get(AMDGPU::S_ADDC_U32))
3050 .addReg(PCReg, RegState::Define, AMDGPU::sub1)
3051 .addReg(PCReg, {}, AMDGPU::sub1)
3052 .addSym(OffsetHi, MO_FAR_BRANCH_OFFSET);
3053 ApplyHazardWorkarounds();
3054
3055 // Insert the indirect branch after the other terminator.
3056 BuildMI(&MBB, DL, get(AMDGPU::S_SETPC_B64))
3057 .addReg(PCReg);
3058
3059 // If a spill is needed for the pc register pair, we need to insert a spill
3060 // restore block right before the destination block, and insert a short branch
3061 // into the old destination block's fallthrough predecessor.
3062 // e.g.:
3063 //
3064 // s_cbranch_scc0 skip_long_branch:
3065 //
3066 // long_branch_bb:
3067 // spill s[8:9]
3068 // s_getpc_b64 s[8:9]
3069 // s_add_u32 s8, s8, restore_bb
3070 // s_addc_u32 s9, s9, 0
3071 // s_setpc_b64 s[8:9]
3072 //
3073 // skip_long_branch:
3074 // foo;
3075 //
3076 // .....
3077 //
3078 // dest_bb_fallthrough_predecessor:
3079 // bar;
3080 // s_branch dest_bb
3081 //
3082 // restore_bb:
3083 // restore s[8:9]
3084 // fallthrough dest_bb
3085 ///
3086 // dest_bb:
3087 // buzz;
3088
3089 Register LongBranchReservedReg = MFI->getLongBranchReservedReg();
3090 Register Scav;
3091
3092 // If we've previously reserved a register for long branches
3093 // avoid running the scavenger and just use those registers
3094 if (LongBranchReservedReg) {
3095 RS->enterBasicBlock(MBB);
3096 Scav = LongBranchReservedReg;
3097 } else {
3098 RS->enterBasicBlockEnd(MBB);
3099 Scav = RS->scavengeRegisterBackwards(
3100 AMDGPU::SReg_64RegClass, MachineBasicBlock::iterator(GetPC),
3101 /* RestoreAfter */ false, 0, /* AllowSpill */ false);
3102 }
3103 if (Scav) {
3104 RS->setRegUsed(Scav);
3105 MRI.replaceRegWith(PCReg, Scav);
3106 MRI.clearVirtRegs();
3107 } else {
3108 // As SGPR needs VGPR to be spilled, we reuse the slot of temporary VGPR for
3109 // SGPR spill.
3110 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3111 const SIRegisterInfo *TRI = ST.getRegisterInfo();
3112 TRI->spillEmergencySGPR(GetPC, RestoreBB, AMDGPU::SGPR0_SGPR1, RS);
3113 MRI.replaceRegWith(PCReg, AMDGPU::SGPR0_SGPR1);
3114 MRI.clearVirtRegs();
3115 }
3116
3117 MCSymbol *DestLabel = Scav ? DestBB.getSymbol() : RestoreBB.getSymbol();
3118 // Now, the distance could be defined.
3120 MCSymbolRefExpr::create(DestLabel, MCCtx),
3121 MCSymbolRefExpr::create(PostGetPCLabel, MCCtx), MCCtx);
3122 // Add offset assignments.
3123 auto *Mask = MCConstantExpr::create(0xFFFFFFFFULL, MCCtx);
3124 OffsetLo->setVariableValue(MCBinaryExpr::createAnd(Offset, Mask, MCCtx));
3125 auto *ShAmt = MCConstantExpr::create(32, MCCtx);
3126 OffsetHi->setVariableValue(MCBinaryExpr::createAShr(Offset, ShAmt, MCCtx));
3127}
3128
3129unsigned SIInstrInfo::getBranchOpcode(SIInstrInfo::BranchPredicate Cond) {
3130 switch (Cond) {
3131 case SIInstrInfo::SCC_TRUE:
3132 return AMDGPU::S_CBRANCH_SCC1;
3133 case SIInstrInfo::SCC_FALSE:
3134 return AMDGPU::S_CBRANCH_SCC0;
3135 case SIInstrInfo::VCCNZ:
3136 return AMDGPU::S_CBRANCH_VCCNZ;
3137 case SIInstrInfo::VCCZ:
3138 return AMDGPU::S_CBRANCH_VCCZ;
3139 case SIInstrInfo::EXECNZ:
3140 return AMDGPU::S_CBRANCH_EXECNZ;
3141 case SIInstrInfo::EXECZ:
3142 return AMDGPU::S_CBRANCH_EXECZ;
3143 default:
3144 llvm_unreachable("invalid branch predicate");
3145 }
3146}
3147
3148SIInstrInfo::BranchPredicate SIInstrInfo::getBranchPredicate(unsigned Opcode) {
3149 switch (Opcode) {
3150 case AMDGPU::S_CBRANCH_SCC0:
3151 return SCC_FALSE;
3152 case AMDGPU::S_CBRANCH_SCC1:
3153 return SCC_TRUE;
3154 case AMDGPU::S_CBRANCH_VCCNZ:
3155 return VCCNZ;
3156 case AMDGPU::S_CBRANCH_VCCZ:
3157 return VCCZ;
3158 case AMDGPU::S_CBRANCH_EXECNZ:
3159 return EXECNZ;
3160 case AMDGPU::S_CBRANCH_EXECZ:
3161 return EXECZ;
3162 default:
3163 return INVALID_BR;
3164 }
3165}
3166
3170 MachineBasicBlock *&FBB,
3172 bool AllowModify) const {
3173 if (I->getOpcode() == AMDGPU::S_BRANCH) {
3174 // Unconditional Branch
3175 TBB = I->getOperand(0).getMBB();
3176 return false;
3177 }
3178
3179 BranchPredicate Pred = getBranchPredicate(I->getOpcode());
3180 if (Pred == INVALID_BR)
3181 return true;
3182
3183 MachineBasicBlock *CondBB = I->getOperand(0).getMBB();
3184 Cond.push_back(MachineOperand::CreateImm(Pred));
3185 Cond.push_back(I->getOperand(1)); // Save the branch register.
3186
3187 ++I;
3188
3189 if (I == MBB.end()) {
3190 // Conditional branch followed by fall-through.
3191 TBB = CondBB;
3192 return false;
3193 }
3194
3195 if (I->getOpcode() == AMDGPU::S_BRANCH) {
3196 TBB = CondBB;
3197 FBB = I->getOperand(0).getMBB();
3198 return false;
3199 }
3200
3201 return true;
3202}
3203
3205 MachineBasicBlock *&FBB,
3207 bool AllowModify) const {
3208 MachineBasicBlock::iterator I = MBB.getFirstTerminator();
3209 auto E = MBB.end();
3210 if (I == E)
3211 return false;
3212
3213 // Skip over the instructions that are artificially terminators for special
3214 // exec management.
3215 while (I != E && !I->isBranch() && !I->isReturn()) {
3216 switch (I->getOpcode()) {
3217 case AMDGPU::S_MOV_B64_term:
3218 case AMDGPU::S_XOR_B64_term:
3219 case AMDGPU::S_OR_B64_term:
3220 case AMDGPU::S_ANDN2_B64_term:
3221 case AMDGPU::S_AND_B64_term:
3222 case AMDGPU::S_AND_SAVEEXEC_B64_term:
3223 case AMDGPU::S_MOV_B32_term:
3224 case AMDGPU::S_XOR_B32_term:
3225 case AMDGPU::S_OR_B32_term:
3226 case AMDGPU::S_ANDN2_B32_term:
3227 case AMDGPU::S_AND_B32_term:
3228 case AMDGPU::S_AND_SAVEEXEC_B32_term:
3229 break;
3230 case AMDGPU::SI_IF:
3231 case AMDGPU::SI_ELSE:
3232 case AMDGPU::SI_KILL_I1_TERMINATOR:
3233 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR:
3234 // FIXME: It's messy that these need to be considered here at all.
3235 return true;
3236 default:
3237 llvm_unreachable("unexpected non-branch terminator inst");
3238 }
3239
3240 ++I;
3241 }
3242
3243 if (I == E)
3244 return false;
3245
3246 return analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify);
3247}
3248
3250 int *BytesRemoved) const {
3251 unsigned Count = 0;
3252 unsigned RemovedSize = 0;
3253 for (MachineInstr &MI : llvm::make_early_inc_range(MBB.terminators())) {
3254 // Skip over artificial terminators when removing instructions.
3255 if (MI.isBranch() || MI.isReturn()) {
3256 RemovedSize += getInstSizeInBytes(MI);
3257 MI.eraseFromParent();
3258 ++Count;
3259 }
3260 }
3261
3262 if (BytesRemoved)
3263 *BytesRemoved = RemovedSize;
3264
3265 return Count;
3266}
3267
3268// Copy the flags onto the implicit condition register operand.
3270 const MachineOperand &OrigCond) {
3271 CondReg.setIsUndef(OrigCond.isUndef());
3272 CondReg.setIsKill(OrigCond.isKill());
3273}
3274
3277 MachineBasicBlock *FBB,
3279 const DebugLoc &DL,
3280 int *BytesAdded) const {
3281 if (!FBB && Cond.empty()) {
3282 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH))
3283 .addMBB(TBB);
3284 if (BytesAdded)
3285 *BytesAdded = ST.hasOffset3fBug() ? 8 : 4;
3286 return 1;
3287 }
3288
3289 assert(TBB && Cond[0].isImm());
3290
3291 unsigned Opcode
3292 = getBranchOpcode(static_cast<BranchPredicate>(Cond[0].getImm()));
3293
3294 if (!FBB) {
3295 MachineInstr *CondBr =
3296 BuildMI(&MBB, DL, get(Opcode))
3297 .addMBB(TBB);
3298
3299 // Copy the flags onto the implicit condition register operand.
3300 preserveCondRegFlags(CondBr->getOperand(1), Cond[1]);
3301 fixImplicitOperands(*CondBr);
3302
3303 if (BytesAdded)
3304 *BytesAdded = ST.hasOffset3fBug() ? 8 : 4;
3305 return 1;
3306 }
3307
3308 assert(TBB && FBB);
3309
3310 MachineInstr *CondBr =
3311 BuildMI(&MBB, DL, get(Opcode))
3312 .addMBB(TBB);
3313 fixImplicitOperands(*CondBr);
3314 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH))
3315 .addMBB(FBB);
3316
3317 MachineOperand &CondReg = CondBr->getOperand(1);
3318 CondReg.setIsUndef(Cond[1].isUndef());
3319 CondReg.setIsKill(Cond[1].isKill());
3320
3321 if (BytesAdded)
3322 *BytesAdded = ST.hasOffset3fBug() ? 16 : 8;
3323
3324 return 2;
3325}
3326
3329 if (Cond.size() != 2) {
3330 return true;
3331 }
3332
3333 if (Cond[0].isImm()) {
3334 Cond[0].setImm(-Cond[0].getImm());
3335 return false;
3336 }
3337
3338 return true;
3339}
3340
3343 Register DstReg, Register TrueReg,
3344 Register FalseReg, int &CondCycles,
3345 int &TrueCycles, int &FalseCycles) const {
3346 switch (Cond[0].getImm()) {
3347 case VCCNZ:
3348 case VCCZ: {
3349 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3350 const TargetRegisterClass *RC = MRI.getRegClass(TrueReg);
3351 if (MRI.getRegClass(FalseReg) != RC)
3352 return false;
3353
3354 int NumInsts = AMDGPU::getRegBitWidth(*RC) / 32;
3355 CondCycles = TrueCycles = FalseCycles = NumInsts; // ???
3356
3357 // Limit to equal cost for branch vs. N v_cndmask_b32s.
3358 return RI.hasVGPRs(RC) && NumInsts <= 6;
3359 }
3360 case SCC_TRUE:
3361 case SCC_FALSE: {
3362 // FIXME: We could insert for VGPRs if we could replace the original compare
3363 // with a vector one.
3364 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3365 const TargetRegisterClass *RC = MRI.getRegClass(TrueReg);
3366 if (MRI.getRegClass(FalseReg) != RC)
3367 return false;
3368
3369 int NumInsts = AMDGPU::getRegBitWidth(*RC) / 32;
3370
3371 // Multiples of 8 can do s_cselect_b64
3372 if (NumInsts % 2 == 0)
3373 NumInsts /= 2;
3374
3375 CondCycles = TrueCycles = FalseCycles = NumInsts; // ???
3376 return RI.isSGPRClass(RC);
3377 }
3378 default:
3379 return false;
3380 }
3381}
3382
3386 Register TrueReg, Register FalseReg) const {
3387 BranchPredicate Pred = static_cast<BranchPredicate>(Cond[0].getImm());
3388 if (Pred == VCCZ || Pred == SCC_FALSE) {
3389 Pred = static_cast<BranchPredicate>(-Pred);
3390 std::swap(TrueReg, FalseReg);
3391 }
3392
3393 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3394 const TargetRegisterClass *DstRC = MRI.getRegClass(DstReg);
3395 unsigned DstSize = RI.getRegSizeInBits(*DstRC);
3396
3397 if (DstSize == 32) {
3399 if (Pred == SCC_TRUE) {
3400 Select = BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B32), DstReg)
3401 .addReg(TrueReg)
3402 .addReg(FalseReg);
3403 } else {
3404 // Instruction's operands are backwards from what is expected.
3405 Select = BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e32), DstReg)
3406 .addReg(FalseReg)
3407 .addReg(TrueReg);
3408 }
3409
3410 preserveCondRegFlags(Select->getOperand(3), Cond[1]);
3411 return;
3412 }
3413
3414 if (DstSize == 64 && Pred == SCC_TRUE) {
3416 BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), DstReg)
3417 .addReg(TrueReg)
3418 .addReg(FalseReg);
3419
3420 preserveCondRegFlags(Select->getOperand(3), Cond[1]);
3421 return;
3422 }
3423
3424 static const int16_t Sub0_15[] = {
3425 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
3426 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
3427 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11,
3428 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15,
3429 };
3430
3431 static const int16_t Sub0_15_64[] = {
3432 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3,
3433 AMDGPU::sub4_sub5, AMDGPU::sub6_sub7,
3434 AMDGPU::sub8_sub9, AMDGPU::sub10_sub11,
3435 AMDGPU::sub12_sub13, AMDGPU::sub14_sub15,
3436 };
3437
3438 unsigned SelOp = AMDGPU::V_CNDMASK_B32_e32;
3439 const TargetRegisterClass *EltRC = &AMDGPU::VGPR_32RegClass;
3440 const int16_t *SubIndices = Sub0_15;
3441 int NElts = DstSize / 32;
3442
3443 // 64-bit select is only available for SALU.
3444 // TODO: Split 96-bit into 64-bit and 32-bit, not 3x 32-bit.
3445 if (Pred == SCC_TRUE) {
3446 if (NElts % 2) {
3447 SelOp = AMDGPU::S_CSELECT_B32;
3448 EltRC = &AMDGPU::SGPR_32RegClass;
3449 } else {
3450 SelOp = AMDGPU::S_CSELECT_B64;
3451 EltRC = &AMDGPU::SGPR_64RegClass;
3452 SubIndices = Sub0_15_64;
3453 NElts /= 2;
3454 }
3455 }
3456
3458 MBB, I, DL, get(AMDGPU::REG_SEQUENCE), DstReg);
3459
3460 I = MIB->getIterator();
3461
3463 for (int Idx = 0; Idx != NElts; ++Idx) {
3464 Register DstElt = MRI.createVirtualRegister(EltRC);
3465 Regs.push_back(DstElt);
3466
3467 unsigned SubIdx = SubIndices[Idx];
3468
3470 if (SelOp == AMDGPU::V_CNDMASK_B32_e32) {
3471 Select = BuildMI(MBB, I, DL, get(SelOp), DstElt)
3472 .addReg(FalseReg, {}, SubIdx)
3473 .addReg(TrueReg, {}, SubIdx);
3474 } else {
3475 Select = BuildMI(MBB, I, DL, get(SelOp), DstElt)
3476 .addReg(TrueReg, {}, SubIdx)
3477 .addReg(FalseReg, {}, SubIdx);
3478 }
3479
3480 preserveCondRegFlags(Select->getOperand(3), Cond[1]);
3482
3483 MIB.addReg(DstElt)
3484 .addImm(SubIdx);
3485 }
3486}
3487
3489 switch (MI.getOpcode()) {
3490 case AMDGPU::V_MOV_B16_t16_e32:
3491 case AMDGPU::V_MOV_B16_t16_e64:
3492 case AMDGPU::V_MOV_B32_e32:
3493 case AMDGPU::V_MOV_B32_e64:
3494 case AMDGPU::V_MOV_B64_PSEUDO:
3495 case AMDGPU::V_MOV_B64_e32:
3496 case AMDGPU::V_MOV_B64_e64:
3497 case AMDGPU::S_MOV_B32:
3498 case AMDGPU::S_MOV_B64:
3499 case AMDGPU::S_MOV_B64_IMM_PSEUDO:
3500 case AMDGPU::COPY:
3501 case AMDGPU::WWM_COPY:
3502 case AMDGPU::V_ACCVGPR_WRITE_B32_e64:
3503 case AMDGPU::V_ACCVGPR_READ_B32_e64:
3504 case AMDGPU::V_ACCVGPR_MOV_B32:
3505 case AMDGPU::AV_MOV_B32_IMM_PSEUDO:
3506 case AMDGPU::AV_MOV_B64_IMM_PSEUDO:
3507 return true;
3508 default:
3509 return false;
3510 }
3511}
3512
3514 switch (MI.getOpcode()) {
3515 case AMDGPU::V_MOV_B16_t16_e32:
3516 case AMDGPU::V_MOV_B16_t16_e64:
3517 return 2;
3518 case AMDGPU::V_MOV_B32_e32:
3519 case AMDGPU::V_MOV_B32_e64:
3520 case AMDGPU::V_MOV_B64_PSEUDO:
3521 case AMDGPU::V_MOV_B64_e32:
3522 case AMDGPU::V_MOV_B64_e64:
3523 case AMDGPU::S_MOV_B32:
3524 case AMDGPU::S_MOV_B64:
3525 case AMDGPU::S_MOV_B64_IMM_PSEUDO:
3526 case AMDGPU::COPY:
3527 case AMDGPU::WWM_COPY:
3528 case AMDGPU::V_ACCVGPR_WRITE_B32_e64:
3529 case AMDGPU::V_ACCVGPR_READ_B32_e64:
3530 case AMDGPU::V_ACCVGPR_MOV_B32:
3531 case AMDGPU::AV_MOV_B32_IMM_PSEUDO:
3532 case AMDGPU::AV_MOV_B64_IMM_PSEUDO:
3533 return 1;
3534 default:
3535 llvm_unreachable("MI is not a foldable copy");
3536 }
3537}
3538
3539static constexpr AMDGPU::OpName ModifierOpNames[] = {
3540 AMDGPU::OpName::src0_modifiers, AMDGPU::OpName::src1_modifiers,
3541 AMDGPU::OpName::src2_modifiers, AMDGPU::OpName::clamp,
3542 AMDGPU::OpName::omod, AMDGPU::OpName::op_sel};
3543
3545 unsigned Opc = MI.getOpcode();
3546 for (AMDGPU::OpName Name : reverse(ModifierOpNames)) {
3547 int Idx = AMDGPU::getNamedOperandIdx(Opc, Name);
3548 if (Idx >= 0)
3549 MI.removeOperand(Idx);
3550 }
3551}
3552
3554 const MCInstrDesc &NewDesc) const {
3555 MI.setDesc(NewDesc);
3556
3557 // Remove any leftover implicit operands from mutating the instruction. e.g.
3558 // if we replace an s_and_b32 with a copy, we don't need the implicit scc def
3559 // anymore.
3560 const MCInstrDesc &Desc = MI.getDesc();
3561 unsigned NumOps = Desc.getNumOperands() + Desc.implicit_uses().size() +
3562 Desc.implicit_defs().size();
3563
3564 for (unsigned I = MI.getNumOperands() - 1; I >= NumOps; --I)
3565 MI.removeOperand(I);
3566}
3567
3568std::optional<int64_t> SIInstrInfo::extractSubregFromImm(int64_t Imm,
3569 unsigned SubRegIndex) {
3570 switch (SubRegIndex) {
3571 case AMDGPU::NoSubRegister:
3572 return Imm;
3573 case AMDGPU::sub0:
3574 return SignExtend64<32>(Imm);
3575 case AMDGPU::sub1:
3576 return SignExtend64<32>(Imm >> 32);
3577 case AMDGPU::lo16:
3578 return SignExtend64<16>(Imm);
3579 case AMDGPU::hi16:
3580 return SignExtend64<16>(Imm >> 16);
3581 case AMDGPU::sub1_lo16:
3582 return SignExtend64<16>(Imm >> 32);
3583 case AMDGPU::sub1_hi16:
3584 return SignExtend64<16>(Imm >> 48);
3585 default:
3586 return std::nullopt;
3587 }
3588
3589 llvm_unreachable("covered subregister switch");
3590}
3591
3592static unsigned getNewFMAAKInst(const GCNSubtarget &ST, unsigned Opc) {
3593 switch (Opc) {
3594 case AMDGPU::V_MAC_F16_e32:
3595 case AMDGPU::V_MAC_F16_e64:
3596 case AMDGPU::V_MAD_F16_e64:
3597 return AMDGPU::V_MADAK_F16;
3598 case AMDGPU::V_MAC_F32_e32:
3599 case AMDGPU::V_MAC_F32_e64:
3600 case AMDGPU::V_MAD_F32_e64:
3601 return AMDGPU::V_MADAK_F32;
3602 case AMDGPU::V_FMAC_F32_e32:
3603 case AMDGPU::V_FMAC_F32_e64:
3604 case AMDGPU::V_FMA_F32_e64:
3605 return AMDGPU::V_FMAAK_F32;
3606 case AMDGPU::V_FMAC_F16_e32:
3607 case AMDGPU::V_FMAC_F16_e64:
3608 case AMDGPU::V_FMAC_F16_t16_e64:
3609 case AMDGPU::V_FMAC_F16_fake16_e64:
3610 case AMDGPU::V_FMAC_F16_t16_e32:
3611 case AMDGPU::V_FMAC_F16_fake16_e32:
3612 case AMDGPU::V_FMA_F16_e64:
3613 return ST.hasTrue16BitInsts() ? ST.useRealTrue16Insts()
3614 ? AMDGPU::V_FMAAK_F16_t16
3615 : AMDGPU::V_FMAAK_F16_fake16
3616 : AMDGPU::V_FMAAK_F16;
3617 case AMDGPU::V_FMAC_F64_e32:
3618 case AMDGPU::V_FMAC_F64_e64:
3619 case AMDGPU::V_FMA_F64_e64:
3620 return AMDGPU::V_FMAAK_F64;
3621 default:
3622 llvm_unreachable("invalid instruction");
3623 }
3624}
3625
3626static unsigned getNewFMAMKInst(const GCNSubtarget &ST, unsigned Opc) {
3627 switch (Opc) {
3628 case AMDGPU::V_MAC_F16_e32:
3629 case AMDGPU::V_MAC_F16_e64:
3630 case AMDGPU::V_MAD_F16_e64:
3631 return AMDGPU::V_MADMK_F16;
3632 case AMDGPU::V_MAC_F32_e32:
3633 case AMDGPU::V_MAC_F32_e64:
3634 case AMDGPU::V_MAD_F32_e64:
3635 return AMDGPU::V_MADMK_F32;
3636 case AMDGPU::V_FMAC_F32_e32:
3637 case AMDGPU::V_FMAC_F32_e64:
3638 case AMDGPU::V_FMA_F32_e64:
3639 return AMDGPU::V_FMAMK_F32;
3640 case AMDGPU::V_FMAC_F16_e32:
3641 case AMDGPU::V_FMAC_F16_e64:
3642 case AMDGPU::V_FMAC_F16_t16_e64:
3643 case AMDGPU::V_FMAC_F16_fake16_e64:
3644 case AMDGPU::V_FMAC_F16_t16_e32:
3645 case AMDGPU::V_FMAC_F16_fake16_e32:
3646 case AMDGPU::V_FMA_F16_e64:
3647 return ST.hasTrue16BitInsts() ? ST.useRealTrue16Insts()
3648 ? AMDGPU::V_FMAMK_F16_t16
3649 : AMDGPU::V_FMAMK_F16_fake16
3650 : AMDGPU::V_FMAMK_F16;
3651 case AMDGPU::V_FMAC_F64_e32:
3652 case AMDGPU::V_FMAC_F64_e64:
3653 case AMDGPU::V_FMA_F64_e64:
3654 return AMDGPU::V_FMAMK_F64;
3655 default:
3656 llvm_unreachable("invalid instruction");
3657 }
3658}
3659
3661 Register Reg, MachineRegisterInfo *MRI) const {
3662 int64_t Imm;
3663 if (!getConstValDefinedInReg(DefMI, Reg, Imm))
3664 return false;
3665
3666 const bool HasMultipleUses = !MRI->hasOneNonDBGUse(Reg);
3667
3668 assert(!DefMI.getOperand(0).getSubReg() && "Expected SSA form");
3669
3670 unsigned Opc = UseMI.getOpcode();
3671 if (Opc == AMDGPU::COPY) {
3672 assert(!UseMI.getOperand(0).getSubReg() && "Expected SSA form");
3673
3674 Register DstReg = UseMI.getOperand(0).getReg();
3675 Register UseSubReg = UseMI.getOperand(1).getSubReg();
3676
3677 const TargetRegisterClass *DstRC = RI.getRegClassForReg(*MRI, DstReg);
3678
3679 if (HasMultipleUses) {
3680 // TODO: This should fold in more cases with multiple use, but we need to
3681 // more carefully consider what those uses are.
3682 unsigned ImmDefSize = RI.getRegSizeInBits(*MRI->getRegClass(Reg));
3683
3684 // Avoid breaking up a 64-bit inline immediate into a subregister extract.
3685 if (UseSubReg != AMDGPU::NoSubRegister && ImmDefSize == 64)
3686 return false;
3687
3688 // Most of the time folding a 32-bit inline constant is free (though this
3689 // might not be true if we can't later fold it into a real user).
3690 //
3691 // FIXME: This isInlineConstant check is imprecise if
3692 // getConstValDefinedInReg handled the tricky non-mov cases.
3693 if (ImmDefSize == 32 &&
3695 return false;
3696 }
3697
3698 bool Is16Bit = UseSubReg != AMDGPU::NoSubRegister &&
3699 RI.getSubRegIdxSize(UseSubReg) == 16;
3700
3701 if (Is16Bit) {
3702 if (RI.hasVGPRs(DstRC))
3703 return false; // Do not clobber vgpr_hi16
3704
3705 if (DstReg.isVirtual() && UseSubReg != AMDGPU::lo16)
3706 return false;
3707 }
3708
3709 MachineFunction *MF = UseMI.getMF();
3710
3711 unsigned NewOpc = AMDGPU::INSTRUCTION_LIST_END;
3712 MCRegister MovDstPhysReg =
3713 DstReg.isPhysical() ? DstReg.asMCReg() : MCRegister();
3714
3715 std::optional<int64_t> SubRegImm = extractSubregFromImm(Imm, UseSubReg);
3716
3717 // TODO: Try to fold with AMDGPU::V_MOV_B16_t16_e64
3718 for (unsigned MovOp :
3719 {AMDGPU::S_MOV_B32, AMDGPU::V_MOV_B32_e32, AMDGPU::S_MOV_B64,
3720 AMDGPU::V_MOV_B64_PSEUDO, AMDGPU::V_ACCVGPR_WRITE_B32_e64}) {
3721 const MCInstrDesc &MovDesc = get(MovOp);
3722
3723 const TargetRegisterClass *MovDstRC = getRegClass(MovDesc, 0);
3724 if (Is16Bit) {
3725 // We just need to find a correctly sized register class, so the
3726 // subregister index compatibility doesn't matter since we're statically
3727 // extracting the immediate value.
3728 MovDstRC = RI.getMatchingSuperRegClass(MovDstRC, DstRC, AMDGPU::lo16);
3729 if (!MovDstRC)
3730 continue;
3731
3732 if (MovDstPhysReg) {
3733 // FIXME: We probably should not do this. If there is a live value in
3734 // the high half of the register, it will be corrupted.
3735 MovDstPhysReg =
3736 RI.getMatchingSuperReg(MovDstPhysReg, AMDGPU::lo16, MovDstRC);
3737 if (!MovDstPhysReg)
3738 continue;
3739 }
3740 }
3741
3742 // Result class isn't the right size, try the next instruction.
3743 if (MovDstPhysReg) {
3744 if (!MovDstRC->contains(MovDstPhysReg))
3745 return false;
3746 } else if (!MRI->constrainRegClass(DstReg, MovDstRC)) {
3747 // TODO: This will be overly conservative in the case of 16-bit virtual
3748 // SGPRs. We could hack up the virtual register uses to use a compatible
3749 // 32-bit class.
3750 continue;
3751 }
3752
3753 const MCOperandInfo &OpInfo = MovDesc.operands()[1];
3754
3755 // Ensure the interpreted immediate value is a valid operand in the new
3756 // mov.
3757 //
3758 // FIXME: isImmOperandLegal should have form that doesn't require existing
3759 // MachineInstr or MachineOperand
3760 if (!RI.opCanUseLiteralConstant(OpInfo.OperandType) &&
3761 !isInlineConstant(*SubRegImm, OpInfo.OperandType))
3762 break;
3763
3764 NewOpc = MovOp;
3765 break;
3766 }
3767
3768 if (NewOpc == AMDGPU::INSTRUCTION_LIST_END)
3769 return false;
3770
3771 if (Is16Bit) {
3772 UseMI.getOperand(0).setSubReg(AMDGPU::NoSubRegister);
3773 if (MovDstPhysReg)
3774 UseMI.getOperand(0).setReg(MovDstPhysReg);
3775 assert(UseMI.getOperand(1).getReg().isVirtual());
3776 }
3777
3778 const MCInstrDesc &NewMCID = get(NewOpc);
3779 UseMI.setDesc(NewMCID);
3780 UseMI.getOperand(1).ChangeToImmediate(*SubRegImm);
3781 UseMI.addImplicitDefUseOperands(*MF);
3782 return true;
3783 }
3784
3785 if (HasMultipleUses)
3786 return false;
3787
3788 if (Opc == AMDGPU::V_MAD_F32_e64 || Opc == AMDGPU::V_MAC_F32_e64 ||
3789 Opc == AMDGPU::V_MAD_F16_e64 || Opc == AMDGPU::V_MAC_F16_e64 ||
3790 Opc == AMDGPU::V_FMA_F32_e64 || Opc == AMDGPU::V_FMAC_F32_e64 ||
3791 Opc == AMDGPU::V_FMA_F16_e64 || Opc == AMDGPU::V_FMAC_F16_e64 ||
3792 Opc == AMDGPU::V_FMAC_F16_t16_e64 ||
3793 Opc == AMDGPU::V_FMAC_F16_fake16_e64 || Opc == AMDGPU::V_FMA_F64_e64 ||
3794 Opc == AMDGPU::V_FMAC_F64_e64) {
3795 // Don't fold if we are using source or output modifiers. The new VOP2
3796 // instructions don't have them.
3798 return false;
3799
3800 // If this is a free constant, there's no reason to do this.
3801 // TODO: We could fold this here instead of letting SIFoldOperands do it
3802 // later.
3803 int Src0Idx = getNamedOperandIdx(UseMI.getOpcode(), AMDGPU::OpName::src0);
3804
3805 // Any src operand can be used for the legality check.
3806 if (isInlineConstant(UseMI, Src0Idx, Imm))
3807 return false;
3808
3809 MachineOperand *Src0 = &UseMI.getOperand(Src0Idx);
3810
3811 MachineOperand *Src1 = getNamedOperand(UseMI, AMDGPU::OpName::src1);
3812 MachineOperand *Src2 = getNamedOperand(UseMI, AMDGPU::OpName::src2);
3813
3814 auto CopyRegOperandToNarrowerRC =
3815 [MRI, this](MachineInstr &MI, unsigned OpNo,
3816 const TargetRegisterClass *NewRC) -> void {
3817 if (!MI.getOperand(OpNo).isReg())
3818 return;
3819 Register Reg = MI.getOperand(OpNo).getReg();
3820 const TargetRegisterClass *RC = RI.getRegClassForReg(*MRI, Reg);
3821 if (RI.getCommonSubClass(RC, NewRC) != NewRC)
3822 return;
3823 Register Tmp = MRI->createVirtualRegister(NewRC);
3824 BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(),
3825 get(AMDGPU::COPY), Tmp)
3826 .addReg(Reg);
3827 MI.getOperand(OpNo).setReg(Tmp);
3828 MI.getOperand(OpNo).setIsKill();
3829 };
3830
3831 // Multiplied part is the constant: Use v_madmk_{f16, f32}.
3832 if ((Src0->isReg() && Src0->getReg() == Reg) ||
3833 (Src1->isReg() && Src1->getReg() == Reg)) {
3834 MachineOperand *RegSrc =
3835 Src1->isReg() && Src1->getReg() == Reg ? Src0 : Src1;
3836 if (!RegSrc->isReg())
3837 return false;
3838 if (RI.isSGPRClass(MRI->getRegClass(RegSrc->getReg())) &&
3839 ST.getConstantBusLimit(Opc) < 2)
3840 return false;
3841
3842 if (!Src2->isReg() || RI.isSGPRClass(MRI->getRegClass(Src2->getReg())))
3843 return false;
3844
3845 // If src2 is also a literal constant then we have to choose which one to
3846 // fold. In general it is better to choose madak so that the other literal
3847 // can be materialized in an sgpr instead of a vgpr:
3848 // s_mov_b32 s0, literal
3849 // v_madak_f32 v0, s0, v0, literal
3850 // Instead of:
3851 // v_mov_b32 v1, literal
3852 // v_madmk_f32 v0, v0, literal, v1
3853 MachineInstr *Def = MRI->getUniqueVRegDef(Src2->getReg());
3854 if (Def && Def->isMoveImmediate() &&
3855 !isInlineConstant(Def->getOperand(1)))
3856 return false;
3857
3858 unsigned NewOpc = getNewFMAMKInst(ST, Opc);
3859 if (pseudoToMCOpcode(NewOpc) == -1)
3860 return false;
3861
3862 const std::optional<int64_t> SubRegImm = extractSubregFromImm(
3863 Imm, RegSrc == Src1 ? Src0->getSubReg() : Src1->getSubReg());
3864
3865 // FIXME: This would be a lot easier if we could return a new instruction
3866 // instead of having to modify in place.
3867
3868 Register SrcReg = RegSrc->getReg();
3869 unsigned SrcSubReg = RegSrc->getSubReg();
3870 Src0->setReg(SrcReg);
3871 Src0->setSubReg(SrcSubReg);
3872 Src0->setIsKill(RegSrc->isKill());
3873
3874 if (Opc == AMDGPU::V_MAC_F32_e64 || Opc == AMDGPU::V_MAC_F16_e64 ||
3875 Opc == AMDGPU::V_FMAC_F32_e64 || Opc == AMDGPU::V_FMAC_F16_t16_e64 ||
3876 Opc == AMDGPU::V_FMAC_F16_fake16_e64 ||
3877 Opc == AMDGPU::V_FMAC_F16_e64 || Opc == AMDGPU::V_FMAC_F64_e64)
3878 UseMI.untieRegOperand(
3879 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2));
3880
3881 Src1->ChangeToImmediate(*SubRegImm);
3882
3884 UseMI.setDesc(get(NewOpc));
3885
3886 if (NewOpc == AMDGPU::V_FMAMK_F16_t16 ||
3887 NewOpc == AMDGPU::V_FMAMK_F16_fake16) {
3888 const TargetRegisterClass *NewRC = getRegClass(get(NewOpc), 0);
3889 Register Tmp = MRI->createVirtualRegister(NewRC);
3890 BuildMI(*UseMI.getParent(), std::next(UseMI.getIterator()),
3891 UseMI.getDebugLoc(), get(AMDGPU::COPY),
3892 UseMI.getOperand(0).getReg())
3893 .addReg(Tmp, RegState::Kill);
3894 UseMI.getOperand(0).setReg(Tmp);
3895 CopyRegOperandToNarrowerRC(UseMI, 1, NewRC);
3896 CopyRegOperandToNarrowerRC(UseMI, 3, NewRC);
3897 }
3898
3899 bool DeleteDef = MRI->use_nodbg_empty(Reg);
3900 if (DeleteDef)
3901 DefMI.eraseFromParent();
3902
3903 return true;
3904 }
3905
3906 // Added part is the constant: Use v_madak_{f16, f32}.
3907 if (Src2->isReg() && Src2->getReg() == Reg) {
3908 if (ST.getConstantBusLimit(Opc) < 2) {
3909 // Not allowed to use constant bus for another operand.
3910 // We can however allow an inline immediate as src0.
3911 bool Src0Inlined = false;
3912 if (Src0->isReg()) {
3913 // Try to inline constant if possible.
3914 // If the Def moves immediate and the use is single
3915 // We are saving VGPR here.
3916 MachineInstr *Def = MRI->getUniqueVRegDef(Src0->getReg());
3917 if (Def && Def->isMoveImmediate() &&
3918 isInlineConstant(Def->getOperand(1)) &&
3919 MRI->hasOneNonDBGUse(Src0->getReg())) {
3920 Src0->ChangeToImmediate(Def->getOperand(1).getImm());
3921 Src0Inlined = true;
3922 } else if (ST.getConstantBusLimit(Opc) <= 1 &&
3923 RI.isSGPRReg(*MRI, Src0->getReg())) {
3924 return false;
3925 }
3926 // VGPR is okay as Src0 - fallthrough
3927 }
3928
3929 if (Src1->isReg() && !Src0Inlined) {
3930 // We have one slot for inlinable constant so far - try to fill it
3931 MachineInstr *Def = MRI->getUniqueVRegDef(Src1->getReg());
3932 if (Def && Def->isMoveImmediate() &&
3933 isInlineConstant(Def->getOperand(1)) &&
3934 MRI->hasOneNonDBGUse(Src1->getReg()) && commuteInstruction(UseMI))
3935 Src0->ChangeToImmediate(Def->getOperand(1).getImm());
3936 else if (RI.isSGPRReg(*MRI, Src1->getReg()))
3937 return false;
3938 // VGPR is okay as Src1 - fallthrough
3939 }
3940 }
3941
3942 unsigned NewOpc = getNewFMAAKInst(ST, Opc);
3943 if (pseudoToMCOpcode(NewOpc) == -1)
3944 return false;
3945
3946 // FIXME: This would be a lot easier if we could return a new instruction
3947 // instead of having to modify in place.
3948
3949 if (Opc == AMDGPU::V_MAC_F32_e64 || Opc == AMDGPU::V_MAC_F16_e64 ||
3950 Opc == AMDGPU::V_FMAC_F32_e64 || Opc == AMDGPU::V_FMAC_F16_t16_e64 ||
3951 Opc == AMDGPU::V_FMAC_F16_fake16_e64 ||
3952 Opc == AMDGPU::V_FMAC_F16_e64 || Opc == AMDGPU::V_FMAC_F64_e64)
3953 UseMI.untieRegOperand(
3954 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2));
3955
3956 const std::optional<int64_t> SubRegImm =
3957 extractSubregFromImm(Imm, Src2->getSubReg());
3958
3959 // ChangingToImmediate adds Src2 back to the instruction.
3960 Src2->ChangeToImmediate(*SubRegImm);
3961
3962 // These come before src2.
3964 UseMI.setDesc(get(NewOpc));
3965
3966 if (NewOpc == AMDGPU::V_FMAAK_F16_t16 ||
3967 NewOpc == AMDGPU::V_FMAAK_F16_fake16) {
3968 const TargetRegisterClass *NewRC = getRegClass(get(NewOpc), 0);
3969 Register Tmp = MRI->createVirtualRegister(NewRC);
3970 BuildMI(*UseMI.getParent(), std::next(UseMI.getIterator()),
3971 UseMI.getDebugLoc(), get(AMDGPU::COPY),
3972 UseMI.getOperand(0).getReg())
3973 .addReg(Tmp, RegState::Kill);
3974 UseMI.getOperand(0).setReg(Tmp);
3975 CopyRegOperandToNarrowerRC(UseMI, 1, NewRC);
3976 CopyRegOperandToNarrowerRC(UseMI, 2, NewRC);
3977 }
3978
3979 // It might happen that UseMI was commuted
3980 // and we now have SGPR as SRC1. If so 2 inlined
3981 // constant and SGPR are illegal.
3983
3984 bool DeleteDef = MRI->use_nodbg_empty(Reg);
3985 if (DeleteDef)
3986 DefMI.eraseFromParent();
3987
3988 return true;
3989 }
3990 }
3991
3992 return false;
3993}
3994
3995static bool
3998 if (BaseOps1.size() != BaseOps2.size())
3999 return false;
4000 for (size_t I = 0, E = BaseOps1.size(); I < E; ++I) {
4001 if (!BaseOps1[I]->isIdenticalTo(*BaseOps2[I]))
4002 return false;
4003 }
4004 return true;
4005}
4006
4007static bool offsetsDoNotOverlap(LocationSize WidthA, int OffsetA,
4008 LocationSize WidthB, int OffsetB) {
4009 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
4010 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
4011 LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
4012 return LowWidth.hasValue() &&
4013 LowOffset + (int)LowWidth.getValue() <= HighOffset;
4014}
4015
4016bool SIInstrInfo::checkInstOffsetsDoNotOverlap(const MachineInstr &MIa,
4017 const MachineInstr &MIb) const {
4018 SmallVector<const MachineOperand *, 4> BaseOps0, BaseOps1;
4019 int64_t Offset0, Offset1;
4020 LocationSize Dummy0 = LocationSize::precise(0);
4021 LocationSize Dummy1 = LocationSize::precise(0);
4022 bool Offset0IsScalable, Offset1IsScalable;
4023 if (!getMemOperandsWithOffsetWidth(MIa, BaseOps0, Offset0, Offset0IsScalable,
4024 Dummy0, &RI) ||
4025 !getMemOperandsWithOffsetWidth(MIb, BaseOps1, Offset1, Offset1IsScalable,
4026 Dummy1, &RI))
4027 return false;
4028
4029 if (!memOpsHaveSameBaseOperands(BaseOps0, BaseOps1))
4030 return false;
4031
4032 if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand()) {
4033 // FIXME: Handle ds_read2 / ds_write2.
4034 return false;
4035 }
4036 LocationSize Width0 = MIa.memoperands().front()->getSize();
4037 LocationSize Width1 = MIb.memoperands().front()->getSize();
4038 return offsetsDoNotOverlap(Width0, Offset0, Width1, Offset1);
4039}
4040
4042 const MachineInstr &MIb) const {
4043 assert(MIa.mayLoadOrStore() &&
4044 "MIa must load from or modify a memory location");
4045 assert(MIb.mayLoadOrStore() &&
4046 "MIb must load from or modify a memory location");
4047
4049 return false;
4050
4051 // XXX - Can we relax this between address spaces?
4052 if (MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
4053 return false;
4054
4055 if (isLDSDMA(MIa) || isLDSDMA(MIb))
4056 return false;
4057
4058 if (MIa.isBundle() || MIb.isBundle())
4059 return false;
4060
4061 // TODO: Should we check the address space from the MachineMemOperand? That
4062 // would allow us to distinguish objects we know don't alias based on the
4063 // underlying address space, even if it was lowered to a different one,
4064 // e.g. private accesses lowered to use MUBUF instructions on a scratch
4065 // buffer.
4066 if (isDS(MIa)) {
4067 if (isDS(MIb))
4068 return checkInstOffsetsDoNotOverlap(MIa, MIb);
4069
4070 return !isFLAT(MIb) || isSegmentSpecificFLAT(MIb);
4071 }
4072
4073 if (isMUBUF(MIa) || isMTBUF(MIa)) {
4074 if (isMUBUF(MIb) || isMTBUF(MIb))
4075 return checkInstOffsetsDoNotOverlap(MIa, MIb);
4076
4077 if (isFLAT(MIb))
4078 return isFLATScratch(MIb);
4079
4080 return !isSMRD(MIb);
4081 }
4082
4083 if (isSMRD(MIa)) {
4084 if (isSMRD(MIb))
4085 return checkInstOffsetsDoNotOverlap(MIa, MIb);
4086
4087 if (isFLAT(MIb))
4088 return isFLATScratch(MIb);
4089
4090 return !isMUBUF(MIb) && !isMTBUF(MIb);
4091 }
4092
4093 if (isFLAT(MIa)) {
4094 if (isFLAT(MIb)) {
4095 if ((isFLATScratch(MIa) && isFLATGlobal(MIb)) ||
4096 (isFLATGlobal(MIa) && isFLATScratch(MIb)))
4097 return true;
4098
4099 return checkInstOffsetsDoNotOverlap(MIa, MIb);
4100 }
4101
4102 return false;
4103 }
4104
4105 return false;
4106}
4107
4109 int64_t &Imm, MachineInstr **DefMI = nullptr) {
4110 if (Reg.isPhysical())
4111 return false;
4112 auto *Def = MRI.getUniqueVRegDef(Reg);
4113 if (Def && SIInstrInfo::isFoldableCopy(*Def) && Def->getOperand(1).isImm()) {
4114 Imm = Def->getOperand(1).getImm();
4115 if (DefMI)
4116 *DefMI = Def;
4117 return true;
4118 }
4119 return false;
4120}
4121
4122static bool getFoldableImm(const MachineOperand *MO, int64_t &Imm,
4123 MachineInstr **DefMI = nullptr) {
4124 if (!MO->isReg())
4125 return false;
4126 const MachineFunction *MF = MO->getParent()->getMF();
4127 const MachineRegisterInfo &MRI = MF->getRegInfo();
4128 return getFoldableImm(MO->getReg(), MRI, Imm, DefMI);
4129}
4130
4132 MachineInstr &NewMI) {
4133 if (LV) {
4134 unsigned NumOps = MI.getNumOperands();
4135 for (unsigned I = 1; I < NumOps; ++I) {
4136 MachineOperand &Op = MI.getOperand(I);
4137 if (Op.isReg() && Op.isKill())
4138 LV->replaceKillInstruction(Op.getReg(), MI, NewMI);
4139 }
4140 }
4141}
4142
4143static unsigned getNewFMAInst(const GCNSubtarget &ST, unsigned Opc) {
4144 switch (Opc) {
4145 case AMDGPU::V_MAC_F16_e32:
4146 case AMDGPU::V_MAC_F16_e64:
4147 return AMDGPU::V_MAD_F16_e64;
4148 case AMDGPU::V_MAC_F32_e32:
4149 case AMDGPU::V_MAC_F32_e64:
4150 return AMDGPU::V_MAD_F32_e64;
4151 case AMDGPU::V_MAC_LEGACY_F32_e32:
4152 case AMDGPU::V_MAC_LEGACY_F32_e64:
4153 return AMDGPU::V_MAD_LEGACY_F32_e64;
4154 case AMDGPU::V_FMAC_LEGACY_F32_e32:
4155 case AMDGPU::V_FMAC_LEGACY_F32_e64:
4156 return AMDGPU::V_FMA_LEGACY_F32_e64;
4157 case AMDGPU::V_FMAC_F16_e32:
4158 case AMDGPU::V_FMAC_F16_e64:
4159 case AMDGPU::V_FMAC_F16_t16_e64:
4160 case AMDGPU::V_FMAC_F16_fake16_e64:
4161 return ST.hasTrue16BitInsts() ? ST.useRealTrue16Insts()
4162 ? AMDGPU::V_FMA_F16_gfx9_t16_e64
4163 : AMDGPU::V_FMA_F16_gfx9_fake16_e64
4164 : AMDGPU::V_FMA_F16_gfx9_e64;
4165 case AMDGPU::V_FMAC_F32_e32:
4166 case AMDGPU::V_FMAC_F32_e64:
4167 return AMDGPU::V_FMA_F32_e64;
4168 case AMDGPU::V_FMAC_F64_e32:
4169 case AMDGPU::V_FMAC_F64_e64:
4170 return AMDGPU::V_FMA_F64_e64;
4171 default:
4172 llvm_unreachable("invalid instruction");
4173 }
4174}
4175
4176/// Helper struct for the implementation of 3-address conversion to communicate
4177/// updates made to instruction operands.
4179 /// Other instruction whose def is no longer used by the converted
4180 /// instruction.
4182};
4183
4185 LiveVariables *LV,
4186 LiveIntervals *LIS) const {
4187 MachineBasicBlock &MBB = *MI.getParent();
4188 MachineInstr *CandidateMI = &MI;
4189
4190 if (MI.isBundle()) {
4191 // This is a temporary placeholder for bundle handling that enables us to
4192 // exercise the relevant code paths in the two-address instruction pass.
4193 if (MI.getBundleSize() != 1)
4194 return nullptr;
4195 CandidateMI = MI.getNextNode();
4196 }
4197
4199 MachineInstr *NewMI = convertToThreeAddressImpl(*CandidateMI, U);
4200 if (!NewMI)
4201 return nullptr;
4202
4203 if (MI.isBundle()) {
4204 CandidateMI->eraseFromBundle();
4205
4206 for (MachineOperand &MO : MI.all_defs()) {
4207 if (MO.isTied())
4208 MI.untieRegOperand(MO.getOperandNo());
4209 }
4210 } else {
4211 updateLiveVariables(LV, MI, *NewMI);
4212 if (LIS) {
4213 LIS->ReplaceMachineInstrInMaps(MI, *NewMI);
4214 // SlotIndex of defs needs to be updated when converting to early-clobber
4215 MachineOperand &Def = NewMI->getOperand(0);
4216 if (Def.isEarlyClobber() && Def.isReg() &&
4217 LIS->hasInterval(Def.getReg())) {
4218 SlotIndex OldIndex = LIS->getInstructionIndex(*NewMI).getRegSlot(false);
4219 SlotIndex NewIndex = LIS->getInstructionIndex(*NewMI).getRegSlot(true);
4220 auto &LI = LIS->getInterval(Def.getReg());
4221 auto UpdateDefIndex = [&](LiveRange &LR) {
4222 auto *S = LR.find(OldIndex);
4223 if (S != LR.end() && S->start == OldIndex) {
4224 assert(S->valno && S->valno->def == OldIndex);
4225 S->start = NewIndex;
4226 S->valno->def = NewIndex;
4227 }
4228 };
4229 UpdateDefIndex(LI);
4230 for (auto &SR : LI.subranges())
4231 UpdateDefIndex(SR);
4232 }
4233 }
4234 }
4235
4236 if (U.RemoveMIUse) {
4237 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
4238 // The only user is the instruction which will be killed.
4239 Register DefReg = U.RemoveMIUse->getOperand(0).getReg();
4240
4241 if (MRI.hasOneNonDBGUse(DefReg)) {
4242 // We cannot just remove the DefMI here, calling pass will crash.
4243 U.RemoveMIUse->setDesc(get(AMDGPU::IMPLICIT_DEF));
4244 U.RemoveMIUse->getOperand(0).setIsDead(true);
4245 for (unsigned I = U.RemoveMIUse->getNumOperands() - 1; I != 0; --I)
4246 U.RemoveMIUse->removeOperand(I);
4247 if (LV)
4248 LV->getVarInfo(DefReg).AliveBlocks.clear();
4249 }
4250
4251 if (MI.isBundle()) {
4252 VirtRegInfo VRI = AnalyzeVirtRegInBundle(MI, DefReg);
4253 if (!VRI.Reads && !VRI.Writes) {
4254 for (MachineOperand &MO : MI.all_uses()) {
4255 if (MO.isReg() && MO.getReg() == DefReg) {
4256 assert(MO.getSubReg() == 0 &&
4257 "tied sub-registers in bundles currently not supported");
4258 MI.removeOperand(MO.getOperandNo());
4259 break;
4260 }
4261 }
4262
4263 if (LIS)
4264 LIS->shrinkToUses(&LIS->getInterval(DefReg));
4265 }
4266 } else if (LIS) {
4267 LiveInterval &DefLI = LIS->getInterval(DefReg);
4268
4269 // We cannot delete the original instruction here, so hack out the use
4270 // in the original instruction with a dummy register so we can use
4271 // shrinkToUses to deal with any multi-use edge cases. Other targets do
4272 // not have the complexity of deleting a use to consider here.
4273 Register DummyReg = MRI.cloneVirtualRegister(DefReg);
4274 for (MachineOperand &MIOp : MI.uses()) {
4275 if (MIOp.isReg() && MIOp.getReg() == DefReg) {
4276 MIOp.setIsUndef(true);
4277 MIOp.setReg(DummyReg);
4278 }
4279 }
4280
4281 if (MI.isBundle()) {
4282 VirtRegInfo VRI = AnalyzeVirtRegInBundle(MI, DefReg);
4283 if (!VRI.Reads && !VRI.Writes) {
4284 for (MachineOperand &MIOp : MI.uses()) {
4285 if (MIOp.isReg() && MIOp.getReg() == DefReg) {
4286 MIOp.setIsUndef(true);
4287 MIOp.setReg(DummyReg);
4288 }
4289 }
4290 }
4291
4292 MI.addOperand(MachineOperand::CreateReg(DummyReg, false, false, false,
4293 false, /*isUndef=*/true));
4294 }
4295
4296 LIS->shrinkToUses(&DefLI);
4297 }
4298 }
4299
4300 return MI.isBundle() ? &MI : NewMI;
4301}
4302
4304SIInstrInfo::convertToThreeAddressImpl(MachineInstr &MI,
4305 ThreeAddressUpdates &U) const {
4306 MachineBasicBlock &MBB = *MI.getParent();
4307 unsigned Opc = MI.getOpcode();
4308
4309 // Handle MFMA.
4310 int NewMFMAOpc = AMDGPU::getMFMAEarlyClobberOp(Opc);
4311 if (NewMFMAOpc != -1) {
4313 BuildMI(MBB, MI, MI.getDebugLoc(), get(NewMFMAOpc));
4314 for (unsigned I = 0, E = MI.getNumExplicitOperands(); I != E; ++I)
4315 MIB.add(MI.getOperand(I));
4316 return MIB;
4317 }
4318
4319 if (SIInstrInfo::isWMMA(MI)) {
4320 unsigned NewOpc = AMDGPU::mapWMMA2AddrTo3AddrOpcode(MI.getOpcode());
4321 MachineInstrBuilder MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc))
4322 .setMIFlags(MI.getFlags());
4323 for (unsigned I = 0, E = MI.getNumExplicitOperands(); I != E; ++I)
4324 MIB->addOperand(MI.getOperand(I));
4325 return MIB;
4326 }
4327
4328 assert(Opc != AMDGPU::V_FMAC_F16_t16_e32 &&
4329 Opc != AMDGPU::V_FMAC_F16_fake16_e32 &&
4330 "V_FMAC_F16_t16/fake16_e32 is not supported and not expected to be "
4331 "present pre-RA");
4332
4333 // Handle MAC/FMAC.
4334 bool IsF64 = Opc == AMDGPU::V_FMAC_F64_e32 || Opc == AMDGPU::V_FMAC_F64_e64;
4335 bool IsLegacy = Opc == AMDGPU::V_MAC_LEGACY_F32_e32 ||
4336 Opc == AMDGPU::V_MAC_LEGACY_F32_e64 ||
4337 Opc == AMDGPU::V_FMAC_LEGACY_F32_e32 ||
4338 Opc == AMDGPU::V_FMAC_LEGACY_F32_e64;
4339 bool Src0Literal = false;
4340
4341 switch (Opc) {
4342 default:
4343 return nullptr;
4344 case AMDGPU::V_MAC_F16_e64:
4345 case AMDGPU::V_FMAC_F16_e64:
4346 case AMDGPU::V_FMAC_F16_t16_e64:
4347 case AMDGPU::V_FMAC_F16_fake16_e64:
4348 case AMDGPU::V_MAC_F32_e64:
4349 case AMDGPU::V_MAC_LEGACY_F32_e64:
4350 case AMDGPU::V_FMAC_F32_e64:
4351 case AMDGPU::V_FMAC_LEGACY_F32_e64:
4352 case AMDGPU::V_FMAC_F64_e64:
4353 break;
4354 case AMDGPU::V_MAC_F16_e32:
4355 case AMDGPU::V_FMAC_F16_e32:
4356 case AMDGPU::V_MAC_F32_e32:
4357 case AMDGPU::V_MAC_LEGACY_F32_e32:
4358 case AMDGPU::V_FMAC_F32_e32:
4359 case AMDGPU::V_FMAC_LEGACY_F32_e32:
4360 case AMDGPU::V_FMAC_F64_e32: {
4361 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
4362 AMDGPU::OpName::src0);
4363 const MachineOperand *Src0 = &MI.getOperand(Src0Idx);
4364 if (!Src0->isReg() && !Src0->isImm())
4365 return nullptr;
4366
4367 if (Src0->isImm() && !isInlineConstant(MI, Src0Idx, *Src0))
4368 Src0Literal = true;
4369
4370 break;
4371 }
4372 }
4373
4374 MachineInstrBuilder MIB;
4375 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst);
4376 const MachineOperand *Src0 = getNamedOperand(MI, AMDGPU::OpName::src0);
4377 const MachineOperand *Src0Mods =
4378 getNamedOperand(MI, AMDGPU::OpName::src0_modifiers);
4379 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1);
4380 const MachineOperand *Src1Mods =
4381 getNamedOperand(MI, AMDGPU::OpName::src1_modifiers);
4382 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2);
4383 const MachineOperand *Src2Mods =
4384 getNamedOperand(MI, AMDGPU::OpName::src2_modifiers);
4385 const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp);
4386 const MachineOperand *Omod = getNamedOperand(MI, AMDGPU::OpName::omod);
4387 const MachineOperand *OpSel = getNamedOperand(MI, AMDGPU::OpName::op_sel);
4388
4389 if (!Src0Mods && !Src1Mods && !Src2Mods && !Clamp && !Omod && !IsLegacy &&
4390 (!IsF64 || ST.hasFmaakFmamkF64Insts()) &&
4391 // If we have an SGPR input, we will violate the constant bus restriction.
4392 (ST.getConstantBusLimit(Opc) > 1 || !Src0->isReg() ||
4393 !RI.isSGPRReg(MBB.getParent()->getRegInfo(), Src0->getReg()))) {
4394 MachineInstr *DefMI;
4395
4396 int64_t Imm;
4397 if (!Src0Literal && getFoldableImm(Src2, Imm, &DefMI)) {
4398 unsigned NewOpc = getNewFMAAKInst(ST, Opc);
4399 if (pseudoToMCOpcode(NewOpc) != -1) {
4400 MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc))
4401 .add(*Dst)
4402 .add(*Src0)
4403 .add(*Src1)
4404 .addImm(Imm)
4405 .setMIFlags(MI.getFlags());
4406 U.RemoveMIUse = DefMI;
4407 return MIB;
4408 }
4409 }
4410 unsigned NewOpc = getNewFMAMKInst(ST, Opc);
4411 if (!Src0Literal && getFoldableImm(Src1, Imm, &DefMI)) {
4412 if (pseudoToMCOpcode(NewOpc) != -1) {
4413 MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc))
4414 .add(*Dst)
4415 .add(*Src0)
4416 .addImm(Imm)
4417 .add(*Src2)
4418 .setMIFlags(MI.getFlags());
4419 U.RemoveMIUse = DefMI;
4420 return MIB;
4421 }
4422 }
4423 if (Src0Literal || getFoldableImm(Src0, Imm, &DefMI)) {
4424 if (Src0Literal) {
4425 Imm = Src0->getImm();
4426 DefMI = nullptr;
4427 }
4428 if (pseudoToMCOpcode(NewOpc) != -1 &&
4430 MI, AMDGPU::getNamedOperandIdx(NewOpc, AMDGPU::OpName::src0),
4431 Src1)) {
4432 MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc))
4433 .add(*Dst)
4434 .add(*Src1)
4435 .addImm(Imm)
4436 .add(*Src2)
4437 .setMIFlags(MI.getFlags());
4438 U.RemoveMIUse = DefMI;
4439 return MIB;
4440 }
4441 }
4442 }
4443
4444 // VOP2 mac/fmac with a literal operand cannot be converted to VOP3 mad/fma
4445 // if VOP3 does not allow a literal operand.
4446 if (Src0Literal && !ST.hasVOP3Literal())
4447 return nullptr;
4448
4449 unsigned NewOpc = getNewFMAInst(ST, Opc);
4450
4451 if (pseudoToMCOpcode(NewOpc) == -1)
4452 return nullptr;
4453
4454 MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc))
4455 .add(*Dst)
4456 .addImm(Src0Mods ? Src0Mods->getImm() : 0)
4457 .add(*Src0)
4458 .addImm(Src1Mods ? Src1Mods->getImm() : 0)
4459 .add(*Src1)
4460 .addImm(Src2Mods ? Src2Mods->getImm() : 0)
4461 .add(*Src2)
4462 .addImm(Clamp ? Clamp->getImm() : 0)
4463 .addImm(Omod ? Omod->getImm() : 0)
4464 .setMIFlags(MI.getFlags());
4465 if (AMDGPU::hasNamedOperand(NewOpc, AMDGPU::OpName::op_sel))
4466 MIB.addImm(OpSel ? OpSel->getImm() : 0);
4467 return MIB;
4468}
4469
4470// It's not generally safe to move VALU instructions across these since it will
4471// start using the register as a base index rather than directly.
4472// XXX - Why isn't hasSideEffects sufficient for these?
4474 switch (MI.getOpcode()) {
4475 case AMDGPU::S_SET_GPR_IDX_ON:
4476 case AMDGPU::S_SET_GPR_IDX_MODE:
4477 case AMDGPU::S_SET_GPR_IDX_OFF:
4478 return true;
4479 default:
4480 return false;
4481 }
4482}
4483
4485 const MachineBasicBlock *MBB,
4486 const MachineFunction &MF) const {
4487 // Skipping the check for SP writes in the base implementation. The reason it
4488 // was added was apparently due to compile time concerns.
4489 //
4490 // TODO: Do we really want this barrier? It triggers unnecessary hazard nops
4491 // but is probably avoidable.
4492
4493 // Copied from base implementation.
4494 // Terminators and labels can't be scheduled around.
4495 if (MI.isTerminator() || MI.isPosition())
4496 return true;
4497
4498 // INLINEASM_BR can jump to another block
4499 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR)
4500 return true;
4501
4502 if (MI.getOpcode() == AMDGPU::SCHED_BARRIER && MI.getOperand(0).getImm() == 0)
4503 return true;
4504
4505 // Target-independent instructions do not have an implicit-use of EXEC, even
4506 // when they operate on VGPRs. Treating EXEC modifications as scheduling
4507 // boundaries prevents incorrect movements of such instructions.
4508 return MI.modifiesRegister(AMDGPU::EXEC, &RI) ||
4509 MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32 ||
4510 MI.getOpcode() == AMDGPU::S_SETREG_B32 ||
4511 MI.getOpcode() == AMDGPU::S_SETPRIO ||
4512 MI.getOpcode() == AMDGPU::S_SETPRIO_INC_WG ||
4514}
4515
4517 return Opcode == AMDGPU::DS_ORDERED_COUNT ||
4518 Opcode == AMDGPU::DS_ADD_GS_REG_RTN ||
4519 Opcode == AMDGPU::DS_SUB_GS_REG_RTN || isGWS(Opcode);
4520}
4521
4523 // Instructions that access scratch use FLAT encoding or BUF encodings.
4524 if ((!isFLAT(MI) || isFLATGlobal(MI)) && !isBUF(MI))
4525 return false;
4526
4527 // SCRATCH instructions always access scratch.
4528 if (isFLATScratch(MI))
4529 return true;
4530
4531 // If FLAT_SCRATCH registers are not initialized, we can never access scratch
4532 // via the aperture.
4533 if (MI.getMF()->getFunction().hasFnAttribute("amdgpu-no-flat-scratch-init"))
4534 return false;
4535
4536 // If there are no memory operands then conservatively assume the flat
4537 // operation may access scratch.
4538 if (MI.memoperands_empty())
4539 return true;
4540
4541 // See if any memory operand specifies an address space that involves scratch.
4542 return any_of(MI.memoperands(), [](const MachineMemOperand *Memop) {
4543 unsigned AS = Memop->getAddrSpace();
4544 if (AS == AMDGPUAS::FLAT_ADDRESS) {
4545 const MDNode *MD = Memop->getAAInfo().NoAliasAddrSpace;
4546 return !MD || !AMDGPU::hasValueInRangeLikeMetadata(
4547 *MD, AMDGPUAS::PRIVATE_ADDRESS);
4548 }
4549 return AS == AMDGPUAS::PRIVATE_ADDRESS;
4550 });
4551}
4552
4554 assert(isFLAT(MI));
4555
4556 // All flat instructions use the VMEM counter except prefetch.
4557 if (!usesVM_CNT(MI))
4558 return false;
4559
4560 // If there are no memory operands then conservatively assume the flat
4561 // operation may access VMEM.
4562 if (MI.memoperands_empty())
4563 return true;
4564
4565 // See if any memory operand specifies an address space that involves VMEM.
4566 // Flat operations only supported FLAT, LOCAL (LDS), or address spaces
4567 // involving VMEM such as GLOBAL, CONSTANT, PRIVATE (SCRATCH), etc. The REGION
4568 // (GDS) address space is not supported by flat operations. Therefore, simply
4569 // return true unless only the LDS address space is found.
4570 for (const MachineMemOperand *Memop : MI.memoperands()) {
4571 unsigned AS = Memop->getAddrSpace();
4573 if (AS != AMDGPUAS::LOCAL_ADDRESS)
4574 return true;
4575 }
4576
4577 return false;
4578}
4579
4581 assert(isFLAT(MI));
4582
4583 // Flat instruction such as SCRATCH and GLOBAL do not use the lgkm counter.
4584 if (!usesLGKM_CNT(MI))
4585 return false;
4586
4587 // If in tgsplit mode then there can be no use of LDS.
4588 if (ST.isTgSplitEnabled())
4589 return false;
4590
4591 // If there are no memory operands then conservatively assume the flat
4592 // operation may access LDS.
4593 if (MI.memoperands_empty())
4594 return true;
4595
4596 // See if any memory operand specifies an address space that involves LDS.
4597 for (const MachineMemOperand *Memop : MI.memoperands()) {
4598 unsigned AS = Memop->getAddrSpace();
4600 return true;
4601 }
4602
4603 return false;
4604}
4605
4607 // Skip the full operand and register alias search modifiesRegister
4608 // does. There's only a handful of instructions that touch this, it's only an
4609 // implicit def, and doesn't alias any other registers.
4610 return is_contained(MI.getDesc().implicit_defs(), AMDGPU::MODE);
4611}
4612
4614 unsigned Opcode = MI.getOpcode();
4615
4616 if (MI.mayStore() && isSMRD(MI))
4617 return true; // scalar store or atomic
4618
4619 // This will terminate the function when other lanes may need to continue.
4620 if (MI.isReturn())
4621 return true;
4622
4623 // These instructions cause shader I/O that may cause hardware lockups
4624 // when executed with an empty EXEC mask.
4625 //
4626 // Note: exp with VM = DONE = 0 is automatically skipped by hardware when
4627 // EXEC = 0, but checking for that case here seems not worth it
4628 // given the typical code patterns.
4629 if (Opcode == AMDGPU::S_SENDMSG || Opcode == AMDGPU::S_SENDMSGHALT ||
4630 isEXP(Opcode) || Opcode == AMDGPU::DS_ORDERED_COUNT ||
4631 Opcode == AMDGPU::S_TRAP || Opcode == AMDGPU::S_WAIT_EVENT)
4632 return true;
4633
4634 if (MI.isCall() || MI.isInlineAsm())
4635 return true; // conservative assumption
4636
4637 // Assume that barrier interactions are only intended with active lanes.
4638 if (isBarrier(Opcode))
4639 return true;
4640
4641 // A mode change is a scalar operation that influences vector instructions.
4643 return true;
4644
4645 // These are like SALU instructions in terms of effects, so it's questionable
4646 // whether we should return true for those.
4647 //
4648 // However, executing them with EXEC = 0 causes them to operate on undefined
4649 // data, which we avoid by returning true here.
4650 if (Opcode == AMDGPU::V_READFIRSTLANE_B32 ||
4651 Opcode == AMDGPU::V_READLANE_B32 || Opcode == AMDGPU::V_WRITELANE_B32 ||
4652 Opcode == AMDGPU::SI_RESTORE_S32_FROM_VGPR ||
4653 Opcode == AMDGPU::SI_SPILL_S32_TO_VGPR)
4654 return true;
4655
4656 return false;
4657}
4658
4660 const MachineInstr &MI) const {
4661 if (MI.isMetaInstruction())
4662 return false;
4663
4664 // This won't read exec if this is an SGPR->SGPR copy.
4665 if (MI.isCopyLike()) {
4666 if (!RI.isSGPRReg(MRI, MI.getOperand(0).getReg()))
4667 return true;
4668
4669 // Make sure this isn't copying exec as a normal operand
4670 return MI.readsRegister(AMDGPU::EXEC, &RI);
4671 }
4672
4673 // Make a conservative assumption about the callee.
4674 if (MI.isCall())
4675 return true;
4676
4677 // Be conservative with any unhandled generic opcodes.
4678 if (!isTargetSpecificOpcode(MI.getOpcode()))
4679 return true;
4680
4681 return !isSALU(MI) || MI.readsRegister(AMDGPU::EXEC, &RI);
4682}
4683
4684bool SIInstrInfo::isInlineConstant(const APInt &Imm) const {
4685 switch (Imm.getBitWidth()) {
4686 case 1: // This likely will be a condition code mask.
4687 return true;
4688
4689 case 32:
4690 return AMDGPU::isInlinableLiteral32(Imm.getSExtValue(),
4691 ST.hasInv2PiInlineImm());
4692 case 64:
4693 return AMDGPU::isInlinableLiteral64(Imm.getSExtValue(),
4694 ST.hasInv2PiInlineImm());
4695 case 16:
4696 return ST.has16BitInsts() &&
4697 AMDGPU::isInlinableLiteralI16(Imm.getSExtValue(),
4698 ST.hasInv2PiInlineImm());
4699 default:
4700 llvm_unreachable("invalid bitwidth");
4701 }
4702}
4703
4705 APInt IntImm = Imm.bitcastToAPInt();
4706 int64_t IntImmVal = IntImm.getSExtValue();
4707 bool HasInv2Pi = ST.hasInv2PiInlineImm();
4708 switch (APFloat::SemanticsToEnum(Imm.getSemantics())) {
4709 default:
4710 llvm_unreachable("invalid fltSemantics");
4713 return isInlineConstant(IntImm);
4715 return ST.has16BitInsts() &&
4716 AMDGPU::isInlinableLiteralBF16(IntImmVal, HasInv2Pi);
4718 return ST.has16BitInsts() &&
4719 AMDGPU::isInlinableLiteralFP16(IntImmVal, HasInv2Pi);
4720 }
4721}
4722
4723bool SIInstrInfo::isInlineConstant(int64_t Imm, uint8_t OperandType) const {
4724 // MachineOperand provides no way to tell the true operand size, since it only
4725 // records a 64-bit value. We need to know the size to determine if a 32-bit
4726 // floating point immediate bit pattern is legal for an integer immediate. It
4727 // would be for any 32-bit integer operand, but would not be for a 64-bit one.
4728 switch (OperandType) {
4738 int32_t Trunc = static_cast<int32_t>(Imm);
4739 return AMDGPU::isInlinableLiteral32(Trunc, ST.hasInv2PiInlineImm());
4740 }
4746 return AMDGPU::isInlinableLiteral64(Imm, ST.hasInv2PiInlineImm());
4749 // We would expect inline immediates to not be concerned with an integer/fp
4750 // distinction. However, in the case of 16-bit integer operations, the
4751 // "floating point" values appear to not work. It seems read the low 16-bits
4752 // of 32-bit immediates, which happens to always work for the integer
4753 // values.
4754 //
4755 // See llvm bugzilla 46302.
4756 //
4757 // TODO: Theoretically we could use op-sel to use the high bits of the
4758 // 32-bit FP values.
4767 return AMDGPU::isPKFMACF16InlineConstant(Imm, ST.isGFX11Plus());
4772 return false;
4775 if (isInt<16>(Imm) || isUInt<16>(Imm)) {
4776 // A few special case instructions have 16-bit operands on subtargets
4777 // where 16-bit instructions are not legal.
4778 // TODO: Do the 32-bit immediates work? We shouldn't really need to handle
4779 // constants in these cases
4780 int16_t Trunc = static_cast<int16_t>(Imm);
4781 return ST.has16BitInsts() &&
4782 AMDGPU::isInlinableLiteralFP16(Trunc, ST.hasInv2PiInlineImm());
4783 }
4784
4785 return false;
4786 }
4789 if (isInt<16>(Imm) || isUInt<16>(Imm)) {
4790 int16_t Trunc = static_cast<int16_t>(Imm);
4791 return ST.has16BitInsts() &&
4792 AMDGPU::isInlinableLiteralBF16(Trunc, ST.hasInv2PiInlineImm());
4793 }
4794 return false;
4795 }
4799 return false;
4801 return isLegalAV64PseudoImm(Imm);
4804 // Always embedded in the instruction for free.
4805 return true;
4815 // Just ignore anything else.
4816 return true;
4817 default:
4818 llvm_unreachable("invalid operand type");
4819 }
4820}
4821
4822static bool compareMachineOp(const MachineOperand &Op0,
4823 const MachineOperand &Op1) {
4824 if (Op0.getType() != Op1.getType())
4825 return false;
4826
4827 switch (Op0.getType()) {
4829 return Op0.getReg() == Op1.getReg();
4831 return Op0.getImm() == Op1.getImm();
4832 default:
4833 llvm_unreachable("Didn't expect to be comparing these operand types");
4834 }
4835}
4836
4838 const MCOperandInfo &OpInfo) const {
4839 if (OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE)
4840 return true;
4841
4842 if (!RI.opCanUseLiteralConstant(OpInfo.OperandType))
4843 return false;
4844
4845 if (!isVOP3(InstDesc) || !AMDGPU::isSISrcOperand(OpInfo))
4846 return true;
4847
4848 return ST.hasVOP3Literal();
4849}
4850
4851bool SIInstrInfo::isImmOperandLegal(const MCInstrDesc &InstDesc, unsigned OpNo,
4852 int64_t ImmVal) const {
4853 const MCOperandInfo &OpInfo = InstDesc.operands()[OpNo];
4854 if (isInlineConstant(ImmVal, OpInfo.OperandType)) {
4855 if (isMAI(InstDesc) && ST.hasMFMAInlineLiteralBug() &&
4856 OpNo == (unsigned)AMDGPU::getNamedOperandIdx(InstDesc.getOpcode(),
4857 AMDGPU::OpName::src2))
4858 return false;
4859 return RI.opCanUseInlineConstant(OpInfo.OperandType);
4860 }
4861
4862 return isLiteralOperandLegal(InstDesc, OpInfo);
4863}
4864
4865bool SIInstrInfo::isImmOperandLegal(const MCInstrDesc &InstDesc, unsigned OpNo,
4866 const MachineOperand &MO) const {
4867 if (MO.isImm())
4868 return isImmOperandLegal(InstDesc, OpNo, MO.getImm());
4869
4870 assert((MO.isTargetIndex() || MO.isFI() || MO.isGlobal()) &&
4871 "unexpected imm-like operand kind");
4872 const MCOperandInfo &OpInfo = InstDesc.operands()[OpNo];
4873 return isLiteralOperandLegal(InstDesc, OpInfo);
4874}
4875
4877 // 2 32-bit inline constants packed into one.
4878 return AMDGPU::isInlinableLiteral32(Lo_32(Imm), ST.hasInv2PiInlineImm()) &&
4879 AMDGPU::isInlinableLiteral32(Hi_32(Imm), ST.hasInv2PiInlineImm());
4880}
4881
4882bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const {
4883 // GFX90A does not have V_MUL_LEGACY_F32_e32.
4884 if (Opcode == AMDGPU::V_MUL_LEGACY_F32_e64 && ST.hasGFX90AInsts())
4885 return false;
4886
4887 int Op32 = AMDGPU::getVOPe32(Opcode);
4888 if (Op32 == -1)
4889 return false;
4890
4891 return pseudoToMCOpcode(Op32) != -1;
4892}
4893
4894bool SIInstrInfo::hasModifiers(unsigned Opcode) const {
4895 // The src0_modifier operand is present on all instructions
4896 // that have modifiers.
4897
4898 return AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::src0_modifiers);
4899}
4900
4902 AMDGPU::OpName OpName) const {
4903 const MachineOperand *Mods = getNamedOperand(MI, OpName);
4904 return Mods && Mods->getImm();
4905}
4906
4908 return any_of(ModifierOpNames,
4909 [&](AMDGPU::OpName Name) { return hasModifiersSet(MI, Name); });
4910}
4911
4913 const MachineRegisterInfo &MRI) const {
4914 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2);
4915 // Can't shrink instruction with three operands.
4916 if (Src2) {
4917 switch (MI.getOpcode()) {
4918 default: return false;
4919
4920 case AMDGPU::V_ADDC_U32_e64:
4921 case AMDGPU::V_SUBB_U32_e64:
4922 case AMDGPU::V_SUBBREV_U32_e64: {
4923 const MachineOperand *Src1
4924 = getNamedOperand(MI, AMDGPU::OpName::src1);
4925 if (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg()))
4926 return false;
4927 // Additional verification is needed for sdst/src2.
4928 return true;
4929 }
4930 case AMDGPU::V_MAC_F16_e64:
4931 case AMDGPU::V_MAC_F32_e64:
4932 case AMDGPU::V_MAC_LEGACY_F32_e64:
4933 case AMDGPU::V_FMAC_F16_e64:
4934 case AMDGPU::V_FMAC_F16_t16_e64:
4935 case AMDGPU::V_FMAC_F16_fake16_e64:
4936 case AMDGPU::V_FMAC_F32_e64:
4937 case AMDGPU::V_FMAC_F64_e64:
4938 case AMDGPU::V_FMAC_LEGACY_F32_e64:
4939 if (!Src2->isReg() || !RI.isVGPR(MRI, Src2->getReg()) ||
4940 hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers))
4941 return false;
4942 break;
4943
4944 case AMDGPU::V_CNDMASK_B32_e64:
4945 break;
4946 }
4947 }
4948
4949 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1);
4950 if (Src1 && (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg()) ||
4951 hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers)))
4952 return false;
4953
4954 // We don't need to check src0, all input types are legal, so just make sure
4955 // src0 isn't using any modifiers.
4956 if (hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers))
4957 return false;
4958
4959 // Can it be shrunk to a valid 32 bit opcode?
4960 if (!hasVALU32BitEncoding(MI.getOpcode()))
4961 return false;
4962
4963 // Check output modifiers
4964 return !hasModifiersSet(MI, AMDGPU::OpName::omod) &&
4965 !hasModifiersSet(MI, AMDGPU::OpName::clamp) &&
4966 !hasModifiersSet(MI, AMDGPU::OpName::byte_sel) &&
4967 // TODO: Can we avoid checking bound_ctrl/fi here?
4968 // They are only used by permlane*_swap special case.
4969 !hasModifiersSet(MI, AMDGPU::OpName::bound_ctrl) &&
4970 !hasModifiersSet(MI, AMDGPU::OpName::fi);
4971}
4972
4973// Set VCC operand with all flags from \p Orig, except for setting it as
4974// implicit.
4976 const MachineOperand &Orig) {
4977
4978 for (MachineOperand &Use : MI.implicit_operands()) {
4979 if (Use.isUse() &&
4980 (Use.getReg() == AMDGPU::VCC || Use.getReg() == AMDGPU::VCC_LO)) {
4981 Use.setIsUndef(Orig.isUndef());
4982 Use.setIsKill(Orig.isKill());
4983 return;
4984 }
4985 }
4986}
4987
4989 unsigned Op32) const {
4990 MachineBasicBlock *MBB = MI.getParent();
4991
4992 const MCInstrDesc &Op32Desc = get(Op32);
4993 MachineInstrBuilder Inst32 =
4994 BuildMI(*MBB, MI, MI.getDebugLoc(), Op32Desc)
4995 .setMIFlags(MI.getFlags());
4996
4997 // Add the dst operand if the 32-bit encoding also has an explicit $vdst.
4998 // For VOPC instructions, this is replaced by an implicit def of vcc.
4999
5000 // We assume the defs of the shrunk opcode are in the same order, and the
5001 // shrunk opcode loses the last def (SGPR def, in the VOP3->VOPC case).
5002 for (int I = 0, E = Op32Desc.getNumDefs(); I != E; ++I)
5003 Inst32.add(MI.getOperand(I));
5004
5005 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2);
5006
5007 int Idx = MI.getNumExplicitDefs();
5008 for (const MachineOperand &Use : MI.explicit_uses()) {
5009 int OpTy = MI.getDesc().operands()[Idx++].OperandType;
5011 continue;
5012
5013 if (&Use == Src2) {
5014 if (AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::src2) == -1) {
5015 // In the case of V_CNDMASK_B32_e32, the explicit operand src2 is
5016 // replaced with an implicit read of vcc or vcc_lo. The implicit read
5017 // of vcc was already added during the initial BuildMI, but we
5018 // 1) may need to change vcc to vcc_lo to preserve the original register
5019 // 2) have to preserve the original flags.
5020 copyFlagsToImplicitVCC(*Inst32, *Src2);
5021 continue;
5022 }
5023 }
5024
5025 Inst32.add(Use);
5026 }
5027
5028 // FIXME: Losing implicit operands
5029 fixImplicitOperands(*Inst32);
5030 return Inst32;
5031}
5032
5034 // Null is free
5035 Register Reg = RegOp.getReg();
5036 if (Reg == AMDGPU::SGPR_NULL || Reg == AMDGPU::SGPR_NULL64)
5037 return false;
5038
5039 // SGPRs use the constant bus
5040
5041 // FIXME: implicit registers that are not part of the MCInstrDesc's implicit
5042 // physical register operands should also count, except for exec.
5043 if (RegOp.isImplicit())
5044 return Reg == AMDGPU::VCC || Reg == AMDGPU::VCC_LO || Reg == AMDGPU::M0;
5045
5046 // SGPRs use the constant bus
5047 return AMDGPU::SReg_32RegClass.contains(Reg) ||
5048 AMDGPU::SReg_64RegClass.contains(Reg);
5049}
5050
5052 const MachineRegisterInfo &MRI) const {
5053 Register Reg = RegOp.getReg();
5054 return Reg.isVirtual() ? RI.isSGPRClass(MRI.getRegClass(Reg))
5055 : physRegUsesConstantBus(RegOp);
5056}
5057
5059 const MachineOperand &MO,
5060 const MCOperandInfo &OpInfo) const {
5061 // Literal constants use the constant bus.
5062 if (!MO.isReg())
5063 return !isInlineConstant(MO, OpInfo);
5064
5065 Register Reg = MO.getReg();
5066 return Reg.isVirtual() ? RI.isSGPRClass(MRI.getRegClass(Reg))
5068}
5069
5071 for (const MachineOperand &MO : MI.implicit_operands()) {
5072 // We only care about reads.
5073 if (MO.isDef())
5074 continue;
5075
5076 switch (MO.getReg()) {
5077 case AMDGPU::VCC:
5078 case AMDGPU::VCC_LO:
5079 case AMDGPU::VCC_HI:
5080 case AMDGPU::M0:
5081 case AMDGPU::FLAT_SCR:
5082 return MO.getReg();
5083
5084 default:
5085 break;
5086 }
5087 }
5088
5089 return Register();
5090}
5091
5092static bool shouldReadExec(const MachineInstr &MI) {
5093 if (SIInstrInfo::isVALU(MI)) {
5094 switch (MI.getOpcode()) {
5095 case AMDGPU::V_READLANE_B32:
5096 case AMDGPU::SI_RESTORE_S32_FROM_VGPR:
5097 case AMDGPU::V_WRITELANE_B32:
5098 case AMDGPU::SI_SPILL_S32_TO_VGPR:
5099 return false;
5100 }
5101
5102 return true;
5103 }
5104
5105 if (MI.isPreISelOpcode() ||
5106 SIInstrInfo::isGenericOpcode(MI.getOpcode()) ||
5109 return false;
5110
5111 return true;
5112}
5113
5114static bool isRegOrFI(const MachineOperand &MO) {
5115 return MO.isReg() || MO.isFI();
5116}
5117
5118static bool isSubRegOf(const SIRegisterInfo &TRI,
5119 const MachineOperand &SuperVec,
5120 const MachineOperand &SubReg) {
5121 if (SubReg.getReg().isPhysical())
5122 return TRI.isSubRegister(SuperVec.getReg(), SubReg.getReg());
5123
5124 return SubReg.getSubReg() != AMDGPU::NoSubRegister &&
5125 SubReg.getReg() == SuperVec.getReg();
5126}
5127
5128// Verify the illegal copy from vector register to SGPR for generic opcode COPY
5129bool SIInstrInfo::verifyCopy(const MachineInstr &MI,
5130 const MachineRegisterInfo &MRI,
5131 StringRef &ErrInfo) const {
5132 Register DstReg = MI.getOperand(0).getReg();
5133 Register SrcReg = MI.getOperand(1).getReg();
5134 // This is a check for copy from vector register to SGPR
5135 if (RI.isVectorRegister(MRI, SrcReg) && RI.isSGPRReg(MRI, DstReg)) {
5136 ErrInfo = "illegal copy from vector register to SGPR";
5137 return false;
5138 }
5139 return true;
5140}
5141
5143 StringRef &ErrInfo) const {
5144 uint32_t Opcode = MI.getOpcode();
5145 const MachineFunction *MF = MI.getMF();
5146 const MachineRegisterInfo &MRI = MF->getRegInfo();
5147
5148 // FIXME: At this point the COPY verify is done only for non-ssa forms.
5149 // Find a better property to recognize the point where instruction selection
5150 // is just done.
5151 // We can only enforce this check after SIFixSGPRCopies pass so that the
5152 // illegal copies are legalized and thereafter we don't expect a pass
5153 // inserting similar copies.
5154 if (!MRI.isSSA() && MI.isCopy())
5155 return verifyCopy(MI, MRI, ErrInfo);
5156
5157 if (SIInstrInfo::isGenericOpcode(Opcode))
5158 return true;
5159
5160 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
5161 int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
5162 int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
5163 int Src3Idx = -1;
5164 if (Src0Idx == -1) {
5165 // VOPD V_DUAL_* instructions use different operand names.
5166 Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0X);
5167 Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vsrc1X);
5168 Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0Y);
5169 Src3Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vsrc1Y);
5170 }
5171
5172 // Make sure the number of operands is correct.
5173 const MCInstrDesc &Desc = get(Opcode);
5174 if (!Desc.isVariadic() &&
5175 Desc.getNumOperands() != MI.getNumExplicitOperands()) {
5176 ErrInfo = "Instruction has wrong number of operands.";
5177 return false;
5178 }
5179
5180 if (MI.isInlineAsm()) {
5181 // Verify register classes for inlineasm constraints.
5182 for (unsigned I = InlineAsm::MIOp_FirstOperand, E = MI.getNumOperands();
5183 I != E; ++I) {
5184 const TargetRegisterClass *RC = MI.getRegClassConstraint(I, this, &RI);
5185 if (!RC)
5186 continue;
5187
5188 const MachineOperand &Op = MI.getOperand(I);
5189 if (!Op.isReg())
5190 continue;
5191
5192 Register Reg = Op.getReg();
5193 if (!Reg.isVirtual() && !RC->contains(Reg)) {
5194 ErrInfo = "inlineasm operand has incorrect register class.";
5195 return false;
5196 }
5197 }
5198
5199 return true;
5200 }
5201
5202 if (isImage(MI) && MI.memoperands_empty() && MI.mayLoadOrStore()) {
5203 ErrInfo = "missing memory operand from image instruction.";
5204 return false;
5205 }
5206
5207 // Make sure the register classes are correct.
5208 for (int i = 0, e = Desc.getNumOperands(); i != e; ++i) {
5209 const MachineOperand &MO = MI.getOperand(i);
5210 if (MO.isFPImm()) {
5211 ErrInfo = "FPImm Machine Operands are not supported. ISel should bitcast "
5212 "all fp values to integers.";
5213 return false;
5214 }
5215
5216 const MCOperandInfo &OpInfo = Desc.operands()[i];
5217 int16_t RegClass = getOpRegClassID(OpInfo);
5218
5219 switch (OpInfo.OperandType) {
5221 if (MI.getOperand(i).isImm() || MI.getOperand(i).isGlobal()) {
5222 ErrInfo = "Illegal immediate value for operand.";
5223 return false;
5224 }
5225 break;
5239 break;
5241 break;
5242 break;
5256 if (!MO.isReg() && (!MO.isImm() || !isInlineConstant(MI, i))) {
5257 ErrInfo = "Illegal immediate value for operand.";
5258 return false;
5259 }
5260 break;
5261 }
5263 if (!MI.getOperand(i).isImm() || !isInlineConstant(MI, i)) {
5264 ErrInfo = "Expected inline constant for operand.";
5265 return false;
5266 }
5267 break;
5271 break;
5276 // Check if this operand is an immediate.
5277 // FrameIndex operands will be replaced by immediates, so they are
5278 // allowed.
5279 if (!MI.getOperand(i).isImm() && !MI.getOperand(i).isFI()) {
5280 ErrInfo = "Expected immediate, but got non-immediate";
5281 return false;
5282 }
5283 break;
5287 break;
5288 default:
5289 if (OpInfo.isGenericType())
5290 continue;
5291 break;
5292 }
5293
5294 if (!MO.isReg())
5295 continue;
5296 Register Reg = MO.getReg();
5297 if (!Reg)
5298 continue;
5299
5300 // FIXME: Ideally we would have separate instruction definitions with the
5301 // aligned register constraint.
5302 // FIXME: We do not verify inline asm operands, but custom inline asm
5303 // verification is broken anyway
5304 if (ST.needsAlignedVGPRs() && Opcode != AMDGPU::AV_MOV_B64_IMM_PSEUDO &&
5305 Opcode != AMDGPU::V_MOV_B64_PSEUDO && !isSpill(MI)) {
5306 const TargetRegisterClass *RC = RI.getRegClassForReg(MRI, Reg);
5307 if (RI.hasVectorRegisters(RC) && MO.getSubReg()) {
5308 if (const TargetRegisterClass *SubRC =
5309 RI.getSubRegisterClass(RC, MO.getSubReg())) {
5310 RC = RI.getCompatibleSubRegClass(RC, SubRC, MO.getSubReg());
5311 if (RC)
5312 RC = SubRC;
5313 }
5314 }
5315
5316 // Check that this is the aligned version of the class.
5317 if (!RC || !RI.isProperlyAlignedRC(*RC)) {
5318 ErrInfo = "Subtarget requires even aligned vector registers";
5319 return false;
5320 }
5321 }
5322
5323 if (RegClass != -1) {
5324 if (Reg.isVirtual())
5325 continue;
5326
5327 const TargetRegisterClass *RC = RI.getRegClass(RegClass);
5328 if (!RC->contains(Reg)) {
5329 ErrInfo = "Operand has incorrect register class.";
5330 return false;
5331 }
5332 }
5333 }
5334
5335 // Verify SDWA
5336 if (isSDWA(MI)) {
5337 if (!ST.hasSDWA()) {
5338 ErrInfo = "SDWA is not supported on this target";
5339 return false;
5340 }
5341
5342 for (auto Op : {AMDGPU::OpName::src0_sel, AMDGPU::OpName::src1_sel,
5343 AMDGPU::OpName::dst_sel}) {
5344 const MachineOperand *MO = getNamedOperand(MI, Op);
5345 if (!MO)
5346 continue;
5347 int64_t Imm = MO->getImm();
5348 if (Imm < 0 || Imm > AMDGPU::SDWA::SdwaSel::DWORD) {
5349 ErrInfo = "Invalid SDWA selection";
5350 return false;
5351 }
5352 }
5353
5354 int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst);
5355
5356 for (int OpIdx : {DstIdx, Src0Idx, Src1Idx, Src2Idx}) {
5357 if (OpIdx == -1)
5358 continue;
5359 const MachineOperand &MO = MI.getOperand(OpIdx);
5360
5361 if (!ST.hasSDWAScalar()) {
5362 // Only VGPRS on VI
5363 if (!MO.isReg() || !RI.hasVGPRs(RI.getRegClassForReg(MRI, MO.getReg()))) {
5364 ErrInfo = "Only VGPRs allowed as operands in SDWA instructions on VI";
5365 return false;
5366 }
5367 } else {
5368 // No immediates on GFX9
5369 if (!MO.isReg()) {
5370 ErrInfo =
5371 "Only reg allowed as operands in SDWA instructions on GFX9+";
5372 return false;
5373 }
5374 }
5375 }
5376
5377 if (!ST.hasSDWAOmod()) {
5378 // No omod allowed on VI
5379 const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod);
5380 if (OMod != nullptr &&
5381 (!OMod->isImm() || OMod->getImm() != 0)) {
5382 ErrInfo = "OMod not allowed in SDWA instructions on VI";
5383 return false;
5384 }
5385 }
5386
5387 if (Opcode == AMDGPU::V_CVT_F32_FP8_sdwa ||
5388 Opcode == AMDGPU::V_CVT_F32_BF8_sdwa ||
5389 Opcode == AMDGPU::V_CVT_PK_F32_FP8_sdwa ||
5390 Opcode == AMDGPU::V_CVT_PK_F32_BF8_sdwa) {
5391 const MachineOperand *Src0ModsMO =
5392 getNamedOperand(MI, AMDGPU::OpName::src0_modifiers);
5393 unsigned Mods = Src0ModsMO->getImm();
5394 if (Mods & SISrcMods::ABS || Mods & SISrcMods::NEG ||
5395 Mods & SISrcMods::SEXT) {
5396 ErrInfo = "sext, abs and neg are not allowed on this instruction";
5397 return false;
5398 }
5399 }
5400
5401 uint32_t BasicOpcode = AMDGPU::getBasicFromSDWAOp(Opcode);
5402 if (isVOPC(BasicOpcode)) {
5403 if (!ST.hasSDWASdst() && DstIdx != -1) {
5404 // Only vcc allowed as dst on VI for VOPC
5405 const MachineOperand &Dst = MI.getOperand(DstIdx);
5406 if (!Dst.isReg() || Dst.getReg() != AMDGPU::VCC) {
5407 ErrInfo = "Only VCC allowed as dst in SDWA instructions on VI";
5408 return false;
5409 }
5410 } else if (!ST.hasSDWAOutModsVOPC()) {
5411 // No clamp allowed on GFX9 for VOPC
5412 const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp);
5413 if (Clamp && (!Clamp->isImm() || Clamp->getImm() != 0)) {
5414 ErrInfo = "Clamp not allowed in VOPC SDWA instructions on VI";
5415 return false;
5416 }
5417
5418 // No omod allowed on GFX9 for VOPC
5419 const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod);
5420 if (OMod && (!OMod->isImm() || OMod->getImm() != 0)) {
5421 ErrInfo = "OMod not allowed in VOPC SDWA instructions on VI";
5422 return false;
5423 }
5424 }
5425 }
5426
5427 const MachineOperand *DstUnused = getNamedOperand(MI, AMDGPU::OpName::dst_unused);
5428 if (DstUnused && DstUnused->isImm() &&
5429 DstUnused->getImm() == AMDGPU::SDWA::UNUSED_PRESERVE) {
5430 const MachineOperand &Dst = MI.getOperand(DstIdx);
5431 if (!Dst.isReg() || !Dst.isTied()) {
5432 ErrInfo = "Dst register should have tied register";
5433 return false;
5434 }
5435
5436 const MachineOperand &TiedMO =
5437 MI.getOperand(MI.findTiedOperandIdx(DstIdx));
5438 if (!TiedMO.isReg() || !TiedMO.isImplicit() || !TiedMO.isUse()) {
5439 ErrInfo =
5440 "Dst register should be tied to implicit use of preserved register";
5441 return false;
5442 }
5443 if (TiedMO.getReg().isPhysical() && Dst.getReg() != TiedMO.getReg()) {
5444 ErrInfo = "Dst register should use same physical register as preserved";
5445 return false;
5446 }
5447 }
5448 }
5449
5450 // Verify MIMG / VIMAGE / VSAMPLE
5451 if (isImage(Opcode) && !MI.mayStore()) {
5452 // Ensure that the return type used is large enough for all the options
5453 // being used TFE/LWE require an extra result register.
5454 const MachineOperand *DMask = getNamedOperand(MI, AMDGPU::OpName::dmask);
5455 if (DMask) {
5456 uint64_t DMaskImm = DMask->getImm();
5457 uint32_t RegCount = isGather4(Opcode) ? 4 : llvm::popcount(DMaskImm);
5458 const MachineOperand *TFE = getNamedOperand(MI, AMDGPU::OpName::tfe);
5459 const MachineOperand *LWE = getNamedOperand(MI, AMDGPU::OpName::lwe);
5460 const MachineOperand *D16 = getNamedOperand(MI, AMDGPU::OpName::d16);
5461
5462 // Adjust for packed 16 bit values
5463 if (D16 && D16->getImm() && !ST.hasUnpackedD16VMem())
5464 RegCount = divideCeil(RegCount, 2);
5465
5466 // Adjust if using LWE or TFE
5467 if ((LWE && LWE->getImm()) || (TFE && TFE->getImm()))
5468 RegCount += 1;
5469
5470 const uint32_t DstIdx =
5471 AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdata);
5472 const MachineOperand &Dst = MI.getOperand(DstIdx);
5473 if (Dst.isReg()) {
5474 const TargetRegisterClass *DstRC = getOpRegClass(MI, DstIdx);
5475 uint32_t DstSize = RI.getRegSizeInBits(*DstRC) / 32;
5476 if (RegCount > DstSize) {
5477 ErrInfo = "Image instruction returns too many registers for dst "
5478 "register class";
5479 return false;
5480 }
5481 }
5482 }
5483 }
5484
5485 // Verify VOP*. Ignore multiple sgpr operands on writelane.
5486 if (isVALU(MI) && Desc.getOpcode() != AMDGPU::V_WRITELANE_B32) {
5487 unsigned ConstantBusCount = 0;
5488 bool UsesLiteral = false;
5489 const MachineOperand *LiteralVal = nullptr;
5490
5491 int ImmIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm);
5492 if (ImmIdx != -1) {
5493 ++ConstantBusCount;
5494 UsesLiteral = true;
5495 LiteralVal = &MI.getOperand(ImmIdx);
5496 }
5497
5498 SmallVector<Register, 2> SGPRsUsed;
5499 Register SGPRUsed;
5500
5501 // Only look at the true operands. Only a real operand can use the constant
5502 // bus, and we don't want to check pseudo-operands like the source modifier
5503 // flags.
5504 for (int OpIdx : {Src0Idx, Src1Idx, Src2Idx, Src3Idx}) {
5505 if (OpIdx == -1)
5506 continue;
5507 const MachineOperand &MO = MI.getOperand(OpIdx);
5508 if (usesConstantBus(MRI, MO, MI.getDesc().operands()[OpIdx])) {
5509 if (MO.isReg()) {
5510 SGPRUsed = MO.getReg();
5511 if (!llvm::is_contained(SGPRsUsed, SGPRUsed)) {
5512 ++ConstantBusCount;
5513 SGPRsUsed.push_back(SGPRUsed);
5514 }
5515 } else if (!MO.isFI()) { // Treat FI like a register.
5516 if (!UsesLiteral) {
5517 ++ConstantBusCount;
5518 UsesLiteral = true;
5519 LiteralVal = &MO;
5520 } else if (!MO.isIdenticalTo(*LiteralVal)) {
5521 assert(isVOP2(MI) || isVOP3(MI));
5522 ErrInfo = "VOP2/VOP3 instruction uses more than one literal";
5523 return false;
5524 }
5525 }
5526 }
5527 }
5528
5529 SGPRUsed = findImplicitSGPRRead(MI);
5530 if (SGPRUsed) {
5531 // Implicit uses may safely overlap true operands
5532 if (llvm::all_of(SGPRsUsed, [this, SGPRUsed](unsigned SGPR) {
5533 return !RI.regsOverlap(SGPRUsed, SGPR);
5534 })) {
5535 ++ConstantBusCount;
5536 SGPRsUsed.push_back(SGPRUsed);
5537 }
5538 }
5539
5540 // v_writelane_b32 is an exception from constant bus restriction:
5541 // vsrc0 can be sgpr, const or m0 and lane select sgpr, m0 or inline-const
5542 if (ConstantBusCount > ST.getConstantBusLimit(Opcode) &&
5543 Opcode != AMDGPU::V_WRITELANE_B32) {
5544 ErrInfo = "VOP* instruction violates constant bus restriction";
5545 return false;
5546 }
5547
5548 if (isVOP3(MI) && UsesLiteral && !ST.hasVOP3Literal()) {
5549 ErrInfo = "VOP3 instruction uses literal";
5550 return false;
5551 }
5552 }
5553
5554 // Special case for writelane - this can break the multiple constant bus rule,
5555 // but still can't use more than one SGPR register
5556 if (Desc.getOpcode() == AMDGPU::V_WRITELANE_B32) {
5557 unsigned SGPRCount = 0;
5558 Register SGPRUsed;
5559
5560 for (int OpIdx : {Src0Idx, Src1Idx}) {
5561 if (OpIdx == -1)
5562 break;
5563
5564 const MachineOperand &MO = MI.getOperand(OpIdx);
5565
5566 if (usesConstantBus(MRI, MO, MI.getDesc().operands()[OpIdx])) {
5567 if (MO.isReg() && MO.getReg() != AMDGPU::M0) {
5568 if (MO.getReg() != SGPRUsed)
5569 ++SGPRCount;
5570 SGPRUsed = MO.getReg();
5571 }
5572 }
5573 if (SGPRCount > ST.getConstantBusLimit(Opcode)) {
5574 ErrInfo = "WRITELANE instruction violates constant bus restriction";
5575 return false;
5576 }
5577 }
5578 }
5579
5580 // Verify misc. restrictions on specific instructions.
5581 if (Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32_e64 ||
5582 Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64_e64) {
5583 const MachineOperand &Src0 = MI.getOperand(Src0Idx);
5584 const MachineOperand &Src1 = MI.getOperand(Src1Idx);
5585 const MachineOperand &Src2 = MI.getOperand(Src2Idx);
5586 if (Src0.isReg() && Src1.isReg() && Src2.isReg()) {
5587 if (!compareMachineOp(Src0, Src1) &&
5588 !compareMachineOp(Src0, Src2)) {
5589 ErrInfo = "v_div_scale_{f32|f64} require src0 = src1 or src2";
5590 return false;
5591 }
5592 }
5593 if ((getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm() &
5594 SISrcMods::ABS) ||
5595 (getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm() &
5596 SISrcMods::ABS) ||
5597 (getNamedOperand(MI, AMDGPU::OpName::src2_modifiers)->getImm() &
5598 SISrcMods::ABS)) {
5599 ErrInfo = "ABS not allowed in VOP3B instructions";
5600 return false;
5601 }
5602 }
5603
5604 if (isSOP2(MI) || isSOPC(MI)) {
5605 const MachineOperand &Src0 = MI.getOperand(Src0Idx);
5606 const MachineOperand &Src1 = MI.getOperand(Src1Idx);
5607
5608 if (!isRegOrFI(Src0) && !isRegOrFI(Src1) &&
5609 !isInlineConstant(Src0, Desc.operands()[Src0Idx]) &&
5610 !isInlineConstant(Src1, Desc.operands()[Src1Idx]) &&
5611 !Src0.isIdenticalTo(Src1)) {
5612 ErrInfo = "SOP2/SOPC instruction requires too many immediate constants";
5613 return false;
5614 }
5615 }
5616
5617 if (isSOPK(MI)) {
5618 const auto *Op = getNamedOperand(MI, AMDGPU::OpName::simm16);
5619 if (Desc.isBranch()) {
5620 if (!Op->isMBB()) {
5621 ErrInfo = "invalid branch target for SOPK instruction";
5622 return false;
5623 }
5624 } else {
5625 uint64_t Imm = Op->getImm();
5626 if (sopkIsZext(Opcode)) {
5627 if (!isUInt<16>(Imm)) {
5628 ErrInfo = "invalid immediate for SOPK instruction";
5629 return false;
5630 }
5631 } else {
5632 if (!isInt<16>(Imm)) {
5633 ErrInfo = "invalid immediate for SOPK instruction";
5634 return false;
5635 }
5636 }
5637 }
5638 }
5639
5640 if (Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e32 ||
5641 Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e64 ||
5642 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 ||
5643 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64) {
5644 const bool IsDst = Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 ||
5645 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64;
5646
5647 const unsigned StaticNumOps =
5648 Desc.getNumOperands() + Desc.implicit_uses().size();
5649 const unsigned NumImplicitOps = IsDst ? 2 : 1;
5650
5651 // Require additional implicit operands. This allows a fixup done by the
5652 // post RA scheduler where the main implicit operand is killed and
5653 // implicit-defs are added for sub-registers that remain live after this
5654 // instruction.
5655 if (MI.getNumOperands() < StaticNumOps + NumImplicitOps) {
5656 ErrInfo = "missing implicit register operands";
5657 return false;
5658 }
5659
5660 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst);
5661 if (IsDst) {
5662 if (!Dst->isUse()) {
5663 ErrInfo = "v_movreld_b32 vdst should be a use operand";
5664 return false;
5665 }
5666
5667 unsigned UseOpIdx;
5668 if (!MI.isRegTiedToUseOperand(StaticNumOps, &UseOpIdx) ||
5669 UseOpIdx != StaticNumOps + 1) {
5670 ErrInfo = "movrel implicit operands should be tied";
5671 return false;
5672 }
5673 }
5674
5675 const MachineOperand &Src0 = MI.getOperand(Src0Idx);
5676 const MachineOperand &ImpUse
5677 = MI.getOperand(StaticNumOps + NumImplicitOps - 1);
5678 if (!ImpUse.isReg() || !ImpUse.isUse() ||
5679 !isSubRegOf(RI, ImpUse, IsDst ? *Dst : Src0)) {
5680 ErrInfo = "src0 should be subreg of implicit vector use";
5681 return false;
5682 }
5683 }
5684
5685 // Make sure we aren't losing exec uses in the td files. This mostly requires
5686 // being careful when using let Uses to try to add other use registers.
5687 if (shouldReadExec(MI)) {
5688 if (!MI.hasRegisterImplicitUseOperand(AMDGPU::EXEC)) {
5689 ErrInfo = "VALU instruction does not implicitly read exec mask";
5690 return false;
5691 }
5692 }
5693
5694 if (isSMRD(MI)) {
5695 if (MI.mayStore() &&
5696 ST.getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS) {
5697 // The register offset form of scalar stores may only use m0 as the
5698 // soffset register.
5699 const MachineOperand *Soff = getNamedOperand(MI, AMDGPU::OpName::soffset);
5700 if (Soff && Soff->getReg() != AMDGPU::M0) {
5701 ErrInfo = "scalar stores must use m0 as offset register";
5702 return false;
5703 }
5704 }
5705 }
5706
5707 if (isFLAT(MI) && !ST.hasFlatInstOffsets()) {
5708 const MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset);
5709 if (Offset->getImm() != 0) {
5710 ErrInfo = "subtarget does not support offsets in flat instructions";
5711 return false;
5712 }
5713 }
5714
5715 if (isDS(MI) && !ST.hasGDS()) {
5716 const MachineOperand *GDSOp = getNamedOperand(MI, AMDGPU::OpName::gds);
5717 if (GDSOp && GDSOp->getImm() != 0) {
5718 ErrInfo = "GDS is not supported on this subtarget";
5719 return false;
5720 }
5721 }
5722
5723 if (isImage(MI)) {
5724 const MachineOperand *DimOp = getNamedOperand(MI, AMDGPU::OpName::dim);
5725 if (DimOp) {
5726 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opcode,
5727 AMDGPU::OpName::vaddr0);
5728 AMDGPU::OpName RSrcOpName =
5729 isMIMG(MI) ? AMDGPU::OpName::srsrc : AMDGPU::OpName::rsrc;
5730 int RsrcIdx = AMDGPU::getNamedOperandIdx(Opcode, RSrcOpName);
5731 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opcode);
5732 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
5733 AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode);
5734 const AMDGPU::MIMGDimInfo *Dim =
5736
5737 if (!Dim) {
5738 ErrInfo = "dim is out of range";
5739 return false;
5740 }
5741
5742 bool IsA16 = false;
5743 if (ST.hasR128A16()) {
5744 const MachineOperand *R128A16 = getNamedOperand(MI, AMDGPU::OpName::r128);
5745 IsA16 = R128A16->getImm() != 0;
5746 } else if (ST.hasA16()) {
5747 const MachineOperand *A16 = getNamedOperand(MI, AMDGPU::OpName::a16);
5748 IsA16 = A16->getImm() != 0;
5749 }
5750
5751 bool IsNSA = RsrcIdx - VAddr0Idx > 1;
5752
5753 unsigned AddrWords =
5754 AMDGPU::getAddrSizeMIMGOp(BaseOpcode, Dim, IsA16, ST.hasG16());
5755
5756 unsigned VAddrWords;
5757 if (IsNSA) {
5758 VAddrWords = RsrcIdx - VAddr0Idx;
5759 if (ST.hasPartialNSAEncoding() &&
5760 AddrWords > ST.getNSAMaxSize(isVSAMPLE(MI))) {
5761 unsigned LastVAddrIdx = RsrcIdx - 1;
5762 VAddrWords += getOpSize(MI, LastVAddrIdx) / 4 - 1;
5763 }
5764 } else {
5765 VAddrWords = getOpSize(MI, VAddr0Idx) / 4;
5766 if (AddrWords > 12)
5767 AddrWords = 16;
5768 }
5769
5770 if (VAddrWords != AddrWords) {
5771 LLVM_DEBUG(dbgs() << "bad vaddr size, expected " << AddrWords
5772 << " but got " << VAddrWords << "\n");
5773 ErrInfo = "bad vaddr size";
5774 return false;
5775 }
5776 }
5777 }
5778
5779 const MachineOperand *DppCt = getNamedOperand(MI, AMDGPU::OpName::dpp_ctrl);
5780 if (DppCt) {
5781 using namespace AMDGPU::DPP;
5782
5783 unsigned DC = DppCt->getImm();
5784 if (DC == DppCtrl::DPP_UNUSED1 || DC == DppCtrl::DPP_UNUSED2 ||
5785 DC == DppCtrl::DPP_UNUSED3 || DC > DppCtrl::DPP_LAST ||
5786 (DC >= DppCtrl::DPP_UNUSED4_FIRST && DC <= DppCtrl::DPP_UNUSED4_LAST) ||
5787 (DC >= DppCtrl::DPP_UNUSED5_FIRST && DC <= DppCtrl::DPP_UNUSED5_LAST) ||
5788 (DC >= DppCtrl::DPP_UNUSED6_FIRST && DC <= DppCtrl::DPP_UNUSED6_LAST) ||
5789 (DC >= DppCtrl::DPP_UNUSED7_FIRST && DC <= DppCtrl::DPP_UNUSED7_LAST) ||
5790 (DC >= DppCtrl::DPP_UNUSED8_FIRST && DC <= DppCtrl::DPP_UNUSED8_LAST)) {
5791 ErrInfo = "Invalid dpp_ctrl value";
5792 return false;
5793 }
5794 if (DC >= DppCtrl::WAVE_SHL1 && DC <= DppCtrl::WAVE_ROR1 &&
5795 !ST.hasDPPWavefrontShifts()) {
5796 ErrInfo = "Invalid dpp_ctrl value: "
5797 "wavefront shifts are not supported on GFX10+";
5798 return false;
5799 }
5800 if (DC >= DppCtrl::BCAST15 && DC <= DppCtrl::BCAST31 &&
5801 !ST.hasDPPBroadcasts()) {
5802 ErrInfo = "Invalid dpp_ctrl value: "
5803 "broadcasts are not supported on GFX10+";
5804 return false;
5805 }
5806 if (DC >= DppCtrl::ROW_SHARE_FIRST && DC <= DppCtrl::ROW_XMASK_LAST &&
5807 ST.getGeneration() < AMDGPUSubtarget::GFX10) {
5808 if (DC >= DppCtrl::ROW_NEWBCAST_FIRST &&
5809 DC <= DppCtrl::ROW_NEWBCAST_LAST &&
5810 !ST.hasGFX90AInsts()) {
5811 ErrInfo = "Invalid dpp_ctrl value: "
5812 "row_newbroadcast/row_share is not supported before "
5813 "GFX90A/GFX10";
5814 return false;
5815 }
5816 if (DC > DppCtrl::ROW_NEWBCAST_LAST || !ST.hasGFX90AInsts()) {
5817 ErrInfo = "Invalid dpp_ctrl value: "
5818 "row_share and row_xmask are not supported before GFX10";
5819 return false;
5820 }
5821 }
5822
5823 if (Opcode != AMDGPU::V_MOV_B64_DPP_PSEUDO &&
5825 AMDGPU::isDPALU_DPP(Desc, *this, ST)) {
5826 ErrInfo = "Invalid dpp_ctrl value: "
5827 "DP ALU dpp only support row_newbcast";
5828 return false;
5829 }
5830 }
5831
5832 if ((MI.mayStore() || MI.mayLoad()) && !isVGPRSpill(MI)) {
5833 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst);
5834 AMDGPU::OpName DataName =
5835 isDS(Opcode) ? AMDGPU::OpName::data0 : AMDGPU::OpName::vdata;
5836 const MachineOperand *Data = getNamedOperand(MI, DataName);
5837 const MachineOperand *Data2 = getNamedOperand(MI, AMDGPU::OpName::data1);
5838 if (Data && !Data->isReg())
5839 Data = nullptr;
5840
5841 if (ST.hasGFX90AInsts()) {
5842 if (Dst && Data && !Dst->isTied() && !Data->isTied() &&
5843 (RI.isAGPR(MRI, Dst->getReg()) != RI.isAGPR(MRI, Data->getReg()))) {
5844 ErrInfo = "Invalid register class: "
5845 "vdata and vdst should be both VGPR or AGPR";
5846 return false;
5847 }
5848 if (Data && Data2 &&
5849 (RI.isAGPR(MRI, Data->getReg()) != RI.isAGPR(MRI, Data2->getReg()))) {
5850 ErrInfo = "Invalid register class: "
5851 "both data operands should be VGPR or AGPR";
5852 return false;
5853 }
5854 } else {
5855 if ((Dst && RI.isAGPR(MRI, Dst->getReg())) ||
5856 (Data && RI.isAGPR(MRI, Data->getReg())) ||
5857 (Data2 && RI.isAGPR(MRI, Data2->getReg()))) {
5858 ErrInfo = "Invalid register class: "
5859 "agpr loads and stores not supported on this GPU";
5860 return false;
5861 }
5862 }
5863 }
5864
5865 if (ST.needsAlignedVGPRs()) {
5866 const auto isAlignedReg = [&MI, &MRI, this](AMDGPU::OpName OpName) -> bool {
5868 if (!Op)
5869 return true;
5870 Register Reg = Op->getReg();
5871 if (Reg.isPhysical())
5872 return !(RI.getHWRegIndex(Reg) & 1);
5873 const TargetRegisterClass &RC = *MRI.getRegClass(Reg);
5874 return RI.getRegSizeInBits(RC) > 32 && RI.isProperlyAlignedRC(RC) &&
5875 !(RI.getChannelFromSubReg(Op->getSubReg()) & 1);
5876 };
5877
5878 if (Opcode == AMDGPU::DS_GWS_INIT || Opcode == AMDGPU::DS_GWS_SEMA_BR ||
5879 Opcode == AMDGPU::DS_GWS_BARRIER) {
5880
5881 if (!isAlignedReg(AMDGPU::OpName::data0)) {
5882 ErrInfo = "Subtarget requires even aligned vector registers "
5883 "for DS_GWS instructions";
5884 return false;
5885 }
5886 }
5887
5888 if (isMIMG(MI)) {
5889 if (!isAlignedReg(AMDGPU::OpName::vaddr)) {
5890 ErrInfo = "Subtarget requires even aligned vector registers "
5891 "for vaddr operand of image instructions";
5892 return false;
5893 }
5894 }
5895 }
5896
5897 if (Opcode == AMDGPU::V_ACCVGPR_WRITE_B32_e64 && !ST.hasGFX90AInsts()) {
5898 const MachineOperand *Src = getNamedOperand(MI, AMDGPU::OpName::src0);
5899 if (Src->isReg() && RI.isSGPRReg(MRI, Src->getReg())) {
5900 ErrInfo = "Invalid register class: "
5901 "v_accvgpr_write with an SGPR is not supported on this GPU";
5902 return false;
5903 }
5904 }
5905
5906 if (Desc.getOpcode() == AMDGPU::G_AMDGPU_WAVE_ADDRESS) {
5907 const MachineOperand &SrcOp = MI.getOperand(1);
5908 if (!SrcOp.isReg() || SrcOp.getReg().isVirtual()) {
5909 ErrInfo = "pseudo expects only physical SGPRs";
5910 return false;
5911 }
5912 }
5913
5914 if (const MachineOperand *CPol = getNamedOperand(MI, AMDGPU::OpName::cpol)) {
5915 if (CPol->getImm() & AMDGPU::CPol::SCAL) {
5916 if (!ST.hasScaleOffset()) {
5917 ErrInfo = "Subtarget does not support offset scaling";
5918 return false;
5919 }
5920 if (!AMDGPU::supportsScaleOffset(*this, MI.getOpcode())) {
5921 ErrInfo = "Instruction does not support offset scaling";
5922 return false;
5923 }
5924 }
5925 }
5926
5927 // See SIInstrInfo::isLegalGFX12PlusPackedMathFP32Operand for more
5928 // information.
5929 if (AMDGPU::isPackedFP32Inst(Opcode) && AMDGPU::isGFX12Plus(ST)) {
5930 for (unsigned I = 0; I < 3; ++I) {
5932 return false;
5933 }
5934 }
5935
5936 if (ST.hasFlatScratchHiInB64InstHazard() && isSALU(MI) &&
5937 MI.readsRegister(AMDGPU::SRC_FLAT_SCRATCH_BASE_HI, nullptr)) {
5938 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::sdst);
5939 if ((Dst && RI.getRegClassForReg(MRI, Dst->getReg()) ==
5940 &AMDGPU::SReg_64RegClass) ||
5941 Opcode == AMDGPU::S_BITCMP0_B64 || Opcode == AMDGPU::S_BITCMP1_B64) {
5942 ErrInfo = "Instruction cannot read flat_scratch_base_hi";
5943 return false;
5944 }
5945 }
5946
5947 return true;
5948}
5949
5950// It is more readable to list mapped opcodes on the same line.
5951// clang-format off
5952
5954 switch (MI.getOpcode()) {
5955 default: return AMDGPU::INSTRUCTION_LIST_END;
5956 case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE;
5957 case AMDGPU::COPY: return AMDGPU::COPY;
5958 case AMDGPU::PHI: return AMDGPU::PHI;
5959 case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG;
5960 case AMDGPU::WQM: return AMDGPU::WQM;
5961 case AMDGPU::SOFT_WQM: return AMDGPU::SOFT_WQM;
5962 case AMDGPU::STRICT_WWM: return AMDGPU::STRICT_WWM;
5963 case AMDGPU::STRICT_WQM: return AMDGPU::STRICT_WQM;
5964 case AMDGPU::S_MOV_B32: {
5965 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
5966 return MI.getOperand(1).isReg() ||
5967 RI.isAGPR(MRI, MI.getOperand(0).getReg()) ?
5968 AMDGPU::COPY : AMDGPU::V_MOV_B32_e32;
5969 }
5970 case AMDGPU::S_ADD_I32:
5971 return ST.hasAddNoCarryInsts() ? AMDGPU::V_ADD_U32_e64 : AMDGPU::V_ADD_CO_U32_e32;
5972 case AMDGPU::S_ADDC_U32:
5973 return AMDGPU::V_ADDC_U32_e32;
5974 case AMDGPU::S_SUB_I32:
5975 return ST.hasAddNoCarryInsts() ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_SUB_CO_U32_e32;
5976 // FIXME: These are not consistently handled, and selected when the carry is
5977 // used.
5978 case AMDGPU::S_ADD_U32:
5979 return AMDGPU::V_ADD_CO_U32_e32;
5980 case AMDGPU::S_SUB_U32:
5981 return AMDGPU::V_SUB_CO_U32_e32;
5982 case AMDGPU::S_ADD_U64_PSEUDO:
5983 return AMDGPU::V_ADD_U64_PSEUDO;
5984 case AMDGPU::S_SUB_U64_PSEUDO:
5985 return AMDGPU::V_SUB_U64_PSEUDO;
5986 case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32;
5987 case AMDGPU::S_MUL_I32: return AMDGPU::V_MUL_LO_U32_e64;
5988 case AMDGPU::S_MUL_HI_U32: return AMDGPU::V_MUL_HI_U32_e64;
5989 case AMDGPU::S_MUL_HI_I32: return AMDGPU::V_MUL_HI_I32_e64;
5990 case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e64;
5991 case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e64;
5992 case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e64;
5993 case AMDGPU::S_XNOR_B32:
5994 return ST.hasDLInsts() ? AMDGPU::V_XNOR_B32_e64 : AMDGPU::INSTRUCTION_LIST_END;
5995 case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e64;
5996 case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e64;
5997 case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e64;
5998 case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e64;
5999 case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32;
6000 case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64_e64;
6001 case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32;
6002 case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64_e64;
6003 case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32;
6004 case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64_e64;
6005 case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32_e64;
6006 case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32_e64;
6007 case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32_e64;
6008 case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32_e64;
6009 case AMDGPU::S_BFM_B32: return AMDGPU::V_BFM_B32_e64;
6010 case AMDGPU::S_BREV_B32: return AMDGPU::V_BFREV_B32_e32;
6011 case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32;
6012 case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32;
6013 case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e64;
6014 case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e64;
6015 case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e64;
6016 case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e64;
6017 case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e64;
6018 case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e64;
6019 case AMDGPU::S_CMP_EQ_U32: return AMDGPU::V_CMP_EQ_U32_e64;
6020 case AMDGPU::S_CMP_LG_U32: return AMDGPU::V_CMP_NE_U32_e64;
6021 case AMDGPU::S_CMP_GT_U32: return AMDGPU::V_CMP_GT_U32_e64;
6022 case AMDGPU::S_CMP_GE_U32: return AMDGPU::V_CMP_GE_U32_e64;
6023 case AMDGPU::S_CMP_LT_U32: return AMDGPU::V_CMP_LT_U32_e64;
6024 case AMDGPU::S_CMP_LE_U32: return AMDGPU::V_CMP_LE_U32_e64;
6025 case AMDGPU::S_CMP_EQ_U64: return AMDGPU::V_CMP_EQ_U64_e64;
6026 case AMDGPU::S_CMP_LG_U64: return AMDGPU::V_CMP_NE_U64_e64;
6027 case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e64;
6028 case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32;
6029 case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32;
6030 case AMDGPU::S_FLBIT_I32: return AMDGPU::V_FFBH_I32_e64;
6031 case AMDGPU::S_CBRANCH_SCC0: return AMDGPU::S_CBRANCH_VCCZ;
6032 case AMDGPU::S_CBRANCH_SCC1: return AMDGPU::S_CBRANCH_VCCNZ;
6033 case AMDGPU::S_CVT_F32_I32: return AMDGPU::V_CVT_F32_I32_e64;
6034 case AMDGPU::S_CVT_F32_U32: return AMDGPU::V_CVT_F32_U32_e64;
6035 case AMDGPU::S_CVT_I32_F32: return AMDGPU::V_CVT_I32_F32_e64;
6036 case AMDGPU::S_CVT_U32_F32: return AMDGPU::V_CVT_U32_F32_e64;
6037 case AMDGPU::S_CVT_F32_F16:
6038 case AMDGPU::S_CVT_HI_F32_F16:
6039 return ST.useRealTrue16Insts() ? AMDGPU::V_CVT_F32_F16_t16_e64
6040 : AMDGPU::V_CVT_F32_F16_fake16_e64;
6041 case AMDGPU::S_CVT_F16_F32:
6042 return ST.useRealTrue16Insts() ? AMDGPU::V_CVT_F16_F32_t16_e64
6043 : AMDGPU::V_CVT_F16_F32_fake16_e64;
6044 case AMDGPU::S_CEIL_F32: return AMDGPU::V_CEIL_F32_e64;
6045 case AMDGPU::S_FLOOR_F32: return AMDGPU::V_FLOOR_F32_e64;
6046 case AMDGPU::S_TRUNC_F32: return AMDGPU::V_TRUNC_F32_e64;
6047 case AMDGPU::S_RNDNE_F32: return AMDGPU::V_RNDNE_F32_e64;
6048 case AMDGPU::S_CEIL_F16:
6049 return ST.useRealTrue16Insts() ? AMDGPU::V_CEIL_F16_t16_e64
6050 : AMDGPU::V_CEIL_F16_fake16_e64;
6051 case AMDGPU::S_FLOOR_F16:
6052 return ST.useRealTrue16Insts() ? AMDGPU::V_FLOOR_F16_t16_e64
6053 : AMDGPU::V_FLOOR_F16_fake16_e64;
6054 case AMDGPU::S_TRUNC_F16:
6055 return ST.useRealTrue16Insts() ? AMDGPU::V_TRUNC_F16_t16_e64
6056 : AMDGPU::V_TRUNC_F16_fake16_e64;
6057 case AMDGPU::S_RNDNE_F16:
6058 return ST.useRealTrue16Insts() ? AMDGPU::V_RNDNE_F16_t16_e64
6059 : AMDGPU::V_RNDNE_F16_fake16_e64;
6060 case AMDGPU::S_ADD_F32: return AMDGPU::V_ADD_F32_e64;
6061 case AMDGPU::S_SUB_F32: return AMDGPU::V_SUB_F32_e64;
6062 case AMDGPU::S_MIN_F32: return AMDGPU::V_MIN_F32_e64;
6063 case AMDGPU::S_MAX_F32: return AMDGPU::V_MAX_F32_e64;
6064 case AMDGPU::S_MINIMUM_F32: return AMDGPU::V_MINIMUM_F32_e64;
6065 case AMDGPU::S_MAXIMUM_F32: return AMDGPU::V_MAXIMUM_F32_e64;
6066 case AMDGPU::S_MUL_F32: return AMDGPU::V_MUL_F32_e64;
6067 case AMDGPU::S_ADD_F16:
6068 return ST.useRealTrue16Insts() ? AMDGPU::V_ADD_F16_t16_e64
6069 : AMDGPU::V_ADD_F16_fake16_e64;
6070 case AMDGPU::S_SUB_F16:
6071 return ST.useRealTrue16Insts() ? AMDGPU::V_SUB_F16_t16_e64
6072 : AMDGPU::V_SUB_F16_fake16_e64;
6073 case AMDGPU::S_MIN_F16:
6074 return ST.useRealTrue16Insts() ? AMDGPU::V_MIN_F16_t16_e64
6075 : AMDGPU::V_MIN_F16_fake16_e64;
6076 case AMDGPU::S_MAX_F16:
6077 return ST.useRealTrue16Insts() ? AMDGPU::V_MAX_F16_t16_e64
6078 : AMDGPU::V_MAX_F16_fake16_e64;
6079 case AMDGPU::S_MINIMUM_F16:
6080 return ST.useRealTrue16Insts() ? AMDGPU::V_MINIMUM_F16_t16_e64
6081 : AMDGPU::V_MINIMUM_F16_fake16_e64;
6082 case AMDGPU::S_MAXIMUM_F16:
6083 return ST.useRealTrue16Insts() ? AMDGPU::V_MAXIMUM_F16_t16_e64
6084 : AMDGPU::V_MAXIMUM_F16_fake16_e64;
6085 case AMDGPU::S_MUL_F16:
6086 return ST.useRealTrue16Insts() ? AMDGPU::V_MUL_F16_t16_e64
6087 : AMDGPU::V_MUL_F16_fake16_e64;
6088 case AMDGPU::S_CVT_PK_RTZ_F16_F32: return AMDGPU::V_CVT_PKRTZ_F16_F32_e64;
6089 case AMDGPU::S_FMAC_F32: return AMDGPU::V_FMAC_F32_e64;
6090 case AMDGPU::S_FMAC_F16:
6091 return ST.useRealTrue16Insts() ? AMDGPU::V_FMAC_F16_t16_e64
6092 : AMDGPU::V_FMAC_F16_fake16_e64;
6093 case AMDGPU::S_FMAMK_F32: return AMDGPU::V_FMAMK_F32;
6094 case AMDGPU::S_FMAAK_F32: return AMDGPU::V_FMAAK_F32;
6095 case AMDGPU::S_CMP_LT_F32: return AMDGPU::V_CMP_LT_F32_e64;
6096 case AMDGPU::S_CMP_EQ_F32: return AMDGPU::V_CMP_EQ_F32_e64;
6097 case AMDGPU::S_CMP_LE_F32: return AMDGPU::V_CMP_LE_F32_e64;
6098 case AMDGPU::S_CMP_GT_F32: return AMDGPU::V_CMP_GT_F32_e64;
6099 case AMDGPU::S_CMP_LG_F32: return AMDGPU::V_CMP_LG_F32_e64;
6100 case AMDGPU::S_CMP_GE_F32: return AMDGPU::V_CMP_GE_F32_e64;
6101 case AMDGPU::S_CMP_O_F32: return AMDGPU::V_CMP_O_F32_e64;
6102 case AMDGPU::S_CMP_U_F32: return AMDGPU::V_CMP_U_F32_e64;
6103 case AMDGPU::S_CMP_NGE_F32: return AMDGPU::V_CMP_NGE_F32_e64;
6104 case AMDGPU::S_CMP_NLG_F32: return AMDGPU::V_CMP_NLG_F32_e64;
6105 case AMDGPU::S_CMP_NGT_F32: return AMDGPU::V_CMP_NGT_F32_e64;
6106 case AMDGPU::S_CMP_NLE_F32: return AMDGPU::V_CMP_NLE_F32_e64;
6107 case AMDGPU::S_CMP_NEQ_F32: return AMDGPU::V_CMP_NEQ_F32_e64;
6108 case AMDGPU::S_CMP_NLT_F32: return AMDGPU::V_CMP_NLT_F32_e64;
6109 case AMDGPU::S_CMP_LT_F16:
6110 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_LT_F16_t16_e64
6111 : AMDGPU::V_CMP_LT_F16_fake16_e64;
6112 case AMDGPU::S_CMP_EQ_F16:
6113 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_EQ_F16_t16_e64
6114 : AMDGPU::V_CMP_EQ_F16_fake16_e64;
6115 case AMDGPU::S_CMP_LE_F16:
6116 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_LE_F16_t16_e64
6117 : AMDGPU::V_CMP_LE_F16_fake16_e64;
6118 case AMDGPU::S_CMP_GT_F16:
6119 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_GT_F16_t16_e64
6120 : AMDGPU::V_CMP_GT_F16_fake16_e64;
6121 case AMDGPU::S_CMP_LG_F16:
6122 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_LG_F16_t16_e64
6123 : AMDGPU::V_CMP_LG_F16_fake16_e64;
6124 case AMDGPU::S_CMP_GE_F16:
6125 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_GE_F16_t16_e64
6126 : AMDGPU::V_CMP_GE_F16_fake16_e64;
6127 case AMDGPU::S_CMP_O_F16:
6128 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_O_F16_t16_e64
6129 : AMDGPU::V_CMP_O_F16_fake16_e64;
6130 case AMDGPU::S_CMP_U_F16:
6131 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_U_F16_t16_e64
6132 : AMDGPU::V_CMP_U_F16_fake16_e64;
6133 case AMDGPU::S_CMP_NGE_F16:
6134 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_NGE_F16_t16_e64
6135 : AMDGPU::V_CMP_NGE_F16_fake16_e64;
6136 case AMDGPU::S_CMP_NLG_F16:
6137 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_NLG_F16_t16_e64
6138 : AMDGPU::V_CMP_NLG_F16_fake16_e64;
6139 case AMDGPU::S_CMP_NGT_F16:
6140 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_NGT_F16_t16_e64
6141 : AMDGPU::V_CMP_NGT_F16_fake16_e64;
6142 case AMDGPU::S_CMP_NLE_F16:
6143 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_NLE_F16_t16_e64
6144 : AMDGPU::V_CMP_NLE_F16_fake16_e64;
6145 case AMDGPU::S_CMP_NEQ_F16:
6146 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_NEQ_F16_t16_e64
6147 : AMDGPU::V_CMP_NEQ_F16_fake16_e64;
6148 case AMDGPU::S_CMP_NLT_F16:
6149 return ST.useRealTrue16Insts() ? AMDGPU::V_CMP_NLT_F16_t16_e64
6150 : AMDGPU::V_CMP_NLT_F16_fake16_e64;
6151 case AMDGPU::V_S_EXP_F32_e64: return AMDGPU::V_EXP_F32_e64;
6152 case AMDGPU::V_S_EXP_F16_e64:
6153 return ST.useRealTrue16Insts() ? AMDGPU::V_EXP_F16_t16_e64
6154 : AMDGPU::V_EXP_F16_fake16_e64;
6155 case AMDGPU::V_S_LOG_F32_e64: return AMDGPU::V_LOG_F32_e64;
6156 case AMDGPU::V_S_LOG_F16_e64:
6157 return ST.useRealTrue16Insts() ? AMDGPU::V_LOG_F16_t16_e64
6158 : AMDGPU::V_LOG_F16_fake16_e64;
6159 case AMDGPU::V_S_RCP_F32_e64: return AMDGPU::V_RCP_F32_e64;
6160 case AMDGPU::V_S_RCP_F16_e64:
6161 return ST.useRealTrue16Insts() ? AMDGPU::V_RCP_F16_t16_e64
6162 : AMDGPU::V_RCP_F16_fake16_e64;
6163 case AMDGPU::V_S_RSQ_F32_e64: return AMDGPU::V_RSQ_F32_e64;
6164 case AMDGPU::V_S_RSQ_F16_e64:
6165 return ST.useRealTrue16Insts() ? AMDGPU::V_RSQ_F16_t16_e64
6166 : AMDGPU::V_RSQ_F16_fake16_e64;
6167 case AMDGPU::V_S_SQRT_F32_e64: return AMDGPU::V_SQRT_F32_e64;
6168 case AMDGPU::V_S_SQRT_F16_e64:
6169 return ST.useRealTrue16Insts() ? AMDGPU::V_SQRT_F16_t16_e64
6170 : AMDGPU::V_SQRT_F16_fake16_e64;
6171 }
6173 "Unexpected scalar opcode without corresponding vector one!");
6174}
6175
6176// clang-format on
6177
6181 const DebugLoc &DL, Register Reg,
6182 bool IsSCCLive,
6183 SlotIndexes *Indexes) const {
6184 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
6185 const SIInstrInfo *TII = ST.getInstrInfo();
6187 if (IsSCCLive) {
6188 // Insert two move instructions, one to save the original value of EXEC and
6189 // the other to turn on all bits in EXEC. This is required as we can't use
6190 // the single instruction S_OR_SAVEEXEC that clobbers SCC.
6191 auto StoreExecMI = BuildMI(MBB, MBBI, DL, TII->get(LMC.MovOpc), Reg)
6193 auto FlipExecMI =
6194 BuildMI(MBB, MBBI, DL, TII->get(LMC.MovOpc), LMC.ExecReg).addImm(-1);
6195 if (Indexes) {
6196 Indexes->insertMachineInstrInMaps(*StoreExecMI);
6197 Indexes->insertMachineInstrInMaps(*FlipExecMI);
6198 }
6199 } else {
6200 auto SaveExec =
6201 BuildMI(MBB, MBBI, DL, TII->get(LMC.OrSaveExecOpc), Reg).addImm(-1);
6202 SaveExec->getOperand(3).setIsDead(); // Mark SCC as dead.
6203 if (Indexes)
6204 Indexes->insertMachineInstrInMaps(*SaveExec);
6205 }
6206}
6207
6210 const DebugLoc &DL, Register Reg,
6211 SlotIndexes *Indexes) const {
6213 auto ExecRestoreMI = BuildMI(MBB, MBBI, DL, get(LMC.MovOpc), LMC.ExecReg)
6214 .addReg(Reg, RegState::Kill);
6215 if (Indexes)
6216 Indexes->insertMachineInstrInMaps(*ExecRestoreMI);
6217}
6218
6222 "Not a whole wave func");
6223 MachineBasicBlock &MBB = *MF.begin();
6224 for (MachineInstr &MI : MBB)
6225 if (MI.getOpcode() == AMDGPU::SI_WHOLE_WAVE_FUNC_SETUP ||
6226 MI.getOpcode() == AMDGPU::G_AMDGPU_WHOLE_WAVE_FUNC_SETUP)
6227 return &MI;
6228
6229 llvm_unreachable("Couldn't find SI_SETUP_WHOLE_WAVE_FUNC instruction");
6230}
6231
6233 unsigned OpNo) const {
6234 const MCInstrDesc &Desc = get(MI.getOpcode());
6235 if (MI.isVariadic() || OpNo >= Desc.getNumOperands() ||
6236 Desc.operands()[OpNo].RegClass == -1) {
6237 Register Reg = MI.getOperand(OpNo).getReg();
6238
6239 if (Reg.isVirtual()) {
6240 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
6241 return MRI.getRegClass(Reg);
6242 }
6243 return RI.getPhysRegBaseClass(Reg);
6244 }
6245
6246 int16_t RegClass = getOpRegClassID(Desc.operands()[OpNo]);
6247 return RegClass < 0 ? nullptr : RI.getRegClass(RegClass);
6248}
6249
6252 MachineBasicBlock *MBB = MI.getParent();
6253 MachineOperand &MO = MI.getOperand(OpIdx);
6254 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
6255 unsigned RCID = getOpRegClassID(get(MI.getOpcode()).operands()[OpIdx]);
6256 const TargetRegisterClass *RC = RI.getRegClass(RCID);
6257 unsigned Size = RI.getRegSizeInBits(*RC);
6258 unsigned Opcode = (Size == 64) ? AMDGPU::V_MOV_B64_PSEUDO
6259 : Size == 16 ? AMDGPU::V_MOV_B16_t16_e64
6260 : AMDGPU::V_MOV_B32_e32;
6261 if (MO.isReg())
6262 Opcode = AMDGPU::COPY;
6263 else if (RI.isSGPRClass(RC))
6264 Opcode = (Size == 64) ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
6265
6266 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC);
6267 Register Reg = MRI.createVirtualRegister(VRC);
6268 DebugLoc DL = MBB->findDebugLoc(I);
6269 BuildMI(*MI.getParent(), I, DL, get(Opcode), Reg).add(MO);
6270 MO.ChangeToRegister(Reg, false);
6271}
6272
6275 const MachineOperand &SuperReg, const TargetRegisterClass *SuperRC,
6276 unsigned SubIdx, const TargetRegisterClass *SubRC) const {
6277 if (!SuperReg.getReg().isVirtual())
6278 return RI.getSubReg(SuperReg.getReg(), SubIdx);
6279
6280 MachineBasicBlock *MBB = MI->getParent();
6281 const DebugLoc &DL = MI->getDebugLoc();
6282 Register SubReg = MRI.createVirtualRegister(SubRC);
6283
6284 unsigned NewSubIdx = RI.composeSubRegIndices(SuperReg.getSubReg(), SubIdx);
6285 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg)
6286 .addReg(SuperReg.getReg(), {}, NewSubIdx);
6287 return SubReg;
6288}
6289
6292 const MachineOperand &Op, const TargetRegisterClass *SuperRC,
6293 unsigned SubIdx, const TargetRegisterClass *SubRC) const {
6294 if (Op.isImm()) {
6295 if (SubIdx == AMDGPU::sub0)
6296 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm()));
6297 if (SubIdx == AMDGPU::sub1)
6298 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm() >> 32));
6299
6300 llvm_unreachable("Unhandled register index for immediate");
6301 }
6302
6303 unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC,
6304 SubIdx, SubRC);
6305 return MachineOperand::CreateReg(SubReg, false);
6306}
6307
6308// Change the order of operands from (0, 1, 2) to (0, 2, 1)
6309void SIInstrInfo::swapOperands(MachineInstr &Inst) const {
6310 assert(Inst.getNumExplicitOperands() == 3);
6311 MachineOperand Op1 = Inst.getOperand(1);
6312 Inst.removeOperand(1);
6313 Inst.addOperand(Op1);
6314}
6315
6317 const MCOperandInfo &OpInfo,
6318 const MachineOperand &MO) const {
6319 if (!MO.isReg())
6320 return false;
6321
6322 Register Reg = MO.getReg();
6323
6324 const TargetRegisterClass *DRC = RI.getRegClass(getOpRegClassID(OpInfo));
6325 if (Reg.isPhysical())
6326 return DRC->contains(Reg);
6327
6328 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
6329
6330 if (MO.getSubReg()) {
6331 const MachineFunction *MF = MO.getParent()->getMF();
6332 const TargetRegisterClass *SuperRC = RI.getLargestLegalSuperClass(RC, *MF);
6333 if (!SuperRC)
6334 return false;
6335 return RI.getMatchingSuperRegClass(SuperRC, DRC, MO.getSubReg()) != nullptr;
6336 }
6337
6338 return RI.getCommonSubClass(DRC, RC) != nullptr;
6339}
6340
6342 const MachineOperand &MO) const {
6343 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
6344 const MCOperandInfo OpInfo = MI.getDesc().operands()[OpIdx];
6345 unsigned Opc = MI.getOpcode();
6346
6347 // See SIInstrInfo::isLegalGFX12PlusPackedMathFP32Operand for more
6348 // information.
6349 if (AMDGPU::isPackedFP32Inst(MI.getOpcode()) && AMDGPU::isGFX12Plus(ST) &&
6350 MO.isReg() && RI.isSGPRReg(MRI, MO.getReg())) {
6351 constexpr AMDGPU::OpName OpNames[] = {
6352 AMDGPU::OpName::src0, AMDGPU::OpName::src1, AMDGPU::OpName::src2};
6353
6354 for (auto [I, OpName] : enumerate(OpNames)) {
6355 int SrcIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OpNames[I]);
6356 if (static_cast<unsigned>(SrcIdx) == OpIdx &&
6358 return false;
6359 }
6360 }
6361
6362 if (!isLegalRegOperand(MRI, OpInfo, MO))
6363 return false;
6364
6365 // check Accumulate GPR operand
6366 bool IsAGPR = RI.isAGPR(MRI, MO.getReg());
6367 if (IsAGPR && !ST.hasMAIInsts())
6368 return false;
6369 if (IsAGPR && (!ST.hasGFX90AInsts() || !MRI.reservedRegsFrozen()) &&
6370 (MI.mayLoad() || MI.mayStore() || isDS(Opc) || isMIMG(Opc)))
6371 return false;
6372 // Atomics should have both vdst and vdata either vgpr or agpr.
6373 const int VDstIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
6374 const int DataIdx = AMDGPU::getNamedOperandIdx(
6375 Opc, isDS(Opc) ? AMDGPU::OpName::data0 : AMDGPU::OpName::vdata);
6376 if ((int)OpIdx == VDstIdx && DataIdx != -1 &&
6377 MI.getOperand(DataIdx).isReg() &&
6378 RI.isAGPR(MRI, MI.getOperand(DataIdx).getReg()) != IsAGPR)
6379 return false;
6380 if ((int)OpIdx == DataIdx) {
6381 if (VDstIdx != -1 &&
6382 RI.isAGPR(MRI, MI.getOperand(VDstIdx).getReg()) != IsAGPR)
6383 return false;
6384 // DS instructions with 2 src operands also must have tied RC.
6385 const int Data1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data1);
6386 if (Data1Idx != -1 && MI.getOperand(Data1Idx).isReg() &&
6387 RI.isAGPR(MRI, MI.getOperand(Data1Idx).getReg()) != IsAGPR)
6388 return false;
6389 }
6390
6391 // Check V_ACCVGPR_WRITE_B32_e64
6392 if (Opc == AMDGPU::V_ACCVGPR_WRITE_B32_e64 && !ST.hasGFX90AInsts() &&
6393 (int)OpIdx == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) &&
6394 RI.isSGPRReg(MRI, MO.getReg()))
6395 return false;
6396
6397 if (ST.hasFlatScratchHiInB64InstHazard() &&
6398 MO.getReg() == AMDGPU::SRC_FLAT_SCRATCH_BASE_HI && isSALU(MI)) {
6399 if (const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::sdst)) {
6400 if (AMDGPU::getRegBitWidth(*RI.getRegClassForReg(MRI, Dst->getReg())) ==
6401 64)
6402 return false;
6403 }
6404 if (Opc == AMDGPU::S_BITCMP0_B64 || Opc == AMDGPU::S_BITCMP1_B64)
6405 return false;
6406 }
6407
6408 return true;
6409}
6410
6412 const MCOperandInfo &OpInfo,
6413 const MachineOperand &MO) const {
6414 if (MO.isReg())
6415 return isLegalRegOperand(MRI, OpInfo, MO);
6416
6417 // Handle non-register types that are treated like immediates.
6418 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI() || MO.isGlobal());
6419 return true;
6420}
6421
6423 const MachineRegisterInfo &MRI, const MachineInstr &MI, unsigned SrcN,
6424 const MachineOperand *MO) const {
6425 constexpr unsigned NumOps = 3;
6426 constexpr AMDGPU::OpName OpNames[NumOps * 2] = {
6427 AMDGPU::OpName::src0, AMDGPU::OpName::src1,
6428 AMDGPU::OpName::src2, AMDGPU::OpName::src0_modifiers,
6429 AMDGPU::OpName::src1_modifiers, AMDGPU::OpName::src2_modifiers};
6430
6431 assert(SrcN < NumOps);
6432
6433 if (!MO) {
6434 int SrcIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OpNames[SrcN]);
6435 if (SrcIdx == -1)
6436 return true;
6437 MO = &MI.getOperand(SrcIdx);
6438 }
6439
6440 if (!MO->isReg() || !RI.isSGPRReg(MRI, MO->getReg()))
6441 return true;
6442
6443 int ModsIdx =
6444 AMDGPU::getNamedOperandIdx(MI.getOpcode(), OpNames[NumOps + SrcN]);
6445 if (ModsIdx == -1)
6446 return true;
6447
6448 unsigned Mods = MI.getOperand(ModsIdx).getImm();
6449 bool OpSel = Mods & SISrcMods::OP_SEL_0;
6450 bool OpSelHi = Mods & SISrcMods::OP_SEL_1;
6451
6452 return !OpSel && !OpSelHi;
6453}
6454
6456 const MachineOperand *MO) const {
6457 const MachineFunction &MF = *MI.getMF();
6458 const MachineRegisterInfo &MRI = MF.getRegInfo();
6459 const MCInstrDesc &InstDesc = MI.getDesc();
6460 const MCOperandInfo &OpInfo = InstDesc.operands()[OpIdx];
6461 int64_t RegClass = getOpRegClassID(OpInfo);
6462 const TargetRegisterClass *DefinedRC =
6463 RegClass != -1 ? RI.getRegClass(RegClass) : nullptr;
6464 if (!MO)
6465 MO = &MI.getOperand(OpIdx);
6466
6467 const bool IsInlineConst = !MO->isReg() && isInlineConstant(*MO, OpInfo);
6468
6469 if (isVALU(MI) && !IsInlineConst && usesConstantBus(MRI, *MO, OpInfo)) {
6470 const MachineOperand *UsedLiteral = nullptr;
6471
6472 int ConstantBusLimit = ST.getConstantBusLimit(MI.getOpcode());
6473 int LiteralLimit = !isVOP3(MI) || ST.hasVOP3Literal() ? 1 : 0;
6474
6475 // TODO: Be more permissive with frame indexes.
6476 if (!MO->isReg() && !isInlineConstant(*MO, OpInfo)) {
6477 if (!LiteralLimit--)
6478 return false;
6479
6480 UsedLiteral = MO;
6481 }
6482
6484 if (MO->isReg())
6485 SGPRsUsed.insert(RegSubRegPair(MO->getReg(), MO->getSubReg()));
6486
6487 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
6488 if (i == OpIdx)
6489 continue;
6490 const MachineOperand &Op = MI.getOperand(i);
6491 if (Op.isReg()) {
6492 if (Op.isUse()) {
6493 RegSubRegPair SGPR(Op.getReg(), Op.getSubReg());
6494 if (regUsesConstantBus(Op, MRI) && SGPRsUsed.insert(SGPR).second) {
6495 if (--ConstantBusLimit <= 0)
6496 return false;
6497 }
6498 }
6499 } else if (AMDGPU::isSISrcOperand(InstDesc.operands()[i]) &&
6500 !isInlineConstant(Op, InstDesc.operands()[i])) {
6501 // The same literal may be used multiple times.
6502 if (!UsedLiteral)
6503 UsedLiteral = &Op;
6504 else if (UsedLiteral->isIdenticalTo(Op))
6505 continue;
6506
6507 if (!LiteralLimit--)
6508 return false;
6509 if (--ConstantBusLimit <= 0)
6510 return false;
6511 }
6512 }
6513 } else if (!IsInlineConst && !MO->isReg() && isSALU(MI)) {
6514 // There can be at most one literal operand, but it can be repeated.
6515 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
6516 if (i == OpIdx)
6517 continue;
6518 const MachineOperand &Op = MI.getOperand(i);
6519 if (!Op.isReg() && !Op.isFI() && !Op.isRegMask() &&
6520 !isInlineConstant(Op, InstDesc.operands()[i]) &&
6521 !Op.isIdenticalTo(*MO))
6522 return false;
6523
6524 // Do not fold a non-inlineable and non-register operand into an
6525 // instruction that already has a frame index. The frame index handling
6526 // code could not handle well when a frame index co-exists with another
6527 // non-register operand, unless that operand is an inlineable immediate.
6528 if (Op.isFI())
6529 return false;
6530 }
6531 } else if (IsInlineConst && ST.hasNoF16PseudoScalarTransInlineConstants() &&
6532 isF16PseudoScalarTrans(MI.getOpcode())) {
6533 return false;
6534 }
6535
6536 if (MO->isReg()) {
6537 if (!DefinedRC)
6538 return OpInfo.OperandType == MCOI::OPERAND_UNKNOWN;
6539 return isLegalRegOperand(MI, OpIdx, *MO);
6540 }
6541
6542 if (MO->isImm()) {
6543 uint64_t Imm = MO->getImm();
6544 bool Is64BitFPOp = OpInfo.OperandType == AMDGPU::OPERAND_REG_IMM_FP64;
6545 bool Is64BitOp = Is64BitFPOp ||
6546 OpInfo.OperandType == AMDGPU::OPERAND_REG_IMM_INT64 ||
6547 OpInfo.OperandType == AMDGPU::OPERAND_REG_IMM_V2INT32 ||
6548 OpInfo.OperandType == AMDGPU::OPERAND_REG_IMM_V2FP32;
6549 if (Is64BitOp &&
6550 !AMDGPU::isInlinableLiteral64(Imm, ST.hasInv2PiInlineImm())) {
6551 if (!AMDGPU::isValid32BitLiteral(Imm, Is64BitFPOp) &&
6552 (!ST.has64BitLiterals() || InstDesc.getSize() != 4))
6553 return false;
6554
6555 // FIXME: We can use sign extended 64-bit literals, but only for signed
6556 // operands. At the moment we do not know if an operand is signed.
6557 // Such operand will be encoded as its low 32 bits and then either
6558 // correctly sign extended or incorrectly zero extended by HW.
6559 // If 64-bit literals are supported and the literal will be encoded
6560 // as full 64 bit we still can use it.
6561 if (!Is64BitFPOp && (int32_t)Imm < 0 &&
6562 (!ST.has64BitLiterals() || AMDGPU::isValid32BitLiteral(Imm, false)))
6563 return false;
6564 }
6565 }
6566
6567 // Handle non-register types that are treated like immediates.
6568 assert(MO->isImm() || MO->isTargetIndex() || MO->isFI() || MO->isGlobal());
6569
6570 if (!DefinedRC) {
6571 // This operand expects an immediate.
6572 return true;
6573 }
6574
6575 return isImmOperandLegal(MI, OpIdx, *MO);
6576}
6577
6579 bool IsGFX950Only = ST.hasGFX950Insts();
6580 bool IsGFX940Only = ST.hasGFX940Insts();
6581
6582 if (!IsGFX950Only && !IsGFX940Only)
6583 return false;
6584
6585 if (!isVALU(MI))
6586 return false;
6587
6588 // V_COS, V_EXP, V_RCP, etc.
6589 if (isTRANS(MI))
6590 return true;
6591
6592 // DOT2, DOT2C, DOT4, etc.
6593 if (isDOT(MI))
6594 return true;
6595
6596 // MFMA, SMFMA
6597 if (isMFMA(MI))
6598 return true;
6599
6600 unsigned Opcode = MI.getOpcode();
6601 switch (Opcode) {
6602 case AMDGPU::V_CVT_PK_BF8_F32_e64:
6603 case AMDGPU::V_CVT_PK_FP8_F32_e64:
6604 case AMDGPU::V_MQSAD_PK_U16_U8_e64:
6605 case AMDGPU::V_MQSAD_U32_U8_e64:
6606 case AMDGPU::V_PK_ADD_F16:
6607 case AMDGPU::V_PK_ADD_F32:
6608 case AMDGPU::V_PK_ADD_I16:
6609 case AMDGPU::V_PK_ADD_U16:
6610 case AMDGPU::V_PK_ASHRREV_I16:
6611 case AMDGPU::V_PK_FMA_F16:
6612 case AMDGPU::V_PK_FMA_F32:
6613 case AMDGPU::V_PK_FMAC_F16_e32:
6614 case AMDGPU::V_PK_FMAC_F16_e64:
6615 case AMDGPU::V_PK_LSHLREV_B16:
6616 case AMDGPU::V_PK_LSHRREV_B16:
6617 case AMDGPU::V_PK_MAD_I16:
6618 case AMDGPU::V_PK_MAD_U16:
6619 case AMDGPU::V_PK_MAX_F16:
6620 case AMDGPU::V_PK_MAX_I16:
6621 case AMDGPU::V_PK_MAX_U16:
6622 case AMDGPU::V_PK_MIN_F16:
6623 case AMDGPU::V_PK_MIN_I16:
6624 case AMDGPU::V_PK_MIN_U16:
6625 case AMDGPU::V_PK_MOV_B32:
6626 case AMDGPU::V_PK_MUL_F16:
6627 case AMDGPU::V_PK_MUL_F32:
6628 case AMDGPU::V_PK_MUL_LO_U16:
6629 case AMDGPU::V_PK_SUB_I16:
6630 case AMDGPU::V_PK_SUB_U16:
6631 case AMDGPU::V_QSAD_PK_U16_U8_e64:
6632 return true;
6633 default:
6634 return false;
6635 }
6636}
6637
6639 MachineInstr &MI) const {
6640 unsigned Opc = MI.getOpcode();
6641 const MCInstrDesc &InstrDesc = get(Opc);
6642
6643 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
6644 MachineOperand &Src0 = MI.getOperand(Src0Idx);
6645
6646 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
6647 MachineOperand &Src1 = MI.getOperand(Src1Idx);
6648
6649 // If there is an implicit SGPR use such as VCC use for v_addc_u32/v_subb_u32
6650 // we need to only have one constant bus use before GFX10.
6651 bool HasImplicitSGPR = findImplicitSGPRRead(MI);
6652 if (HasImplicitSGPR && ST.getConstantBusLimit(Opc) <= 1 && Src0.isReg() &&
6653 RI.isSGPRReg(MRI, Src0.getReg()))
6654 legalizeOpWithMove(MI, Src0Idx);
6655
6656 // Special case: V_WRITELANE_B32 accepts only immediate or SGPR operands for
6657 // both the value to write (src0) and lane select (src1). Fix up non-SGPR
6658 // src0/src1 with V_READFIRSTLANE.
6659 if (Opc == AMDGPU::V_WRITELANE_B32) {
6660 const DebugLoc &DL = MI.getDebugLoc();
6661 if (Src0.isReg() && RI.isVGPR(MRI, Src0.getReg())) {
6662 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
6663 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg)
6664 .add(Src0);
6665 Src0.ChangeToRegister(Reg, false);
6666 }
6667 if (Src1.isReg() && RI.isVGPR(MRI, Src1.getReg())) {
6668 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
6669 const DebugLoc &DL = MI.getDebugLoc();
6670 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg)
6671 .add(Src1);
6672 Src1.ChangeToRegister(Reg, false);
6673 }
6674 return;
6675 }
6676
6677 // Special case: V_FMAC_F32 and V_FMAC_F16 have src2.
6678 if (Opc == AMDGPU::V_FMAC_F32_e32 || Opc == AMDGPU::V_FMAC_F16_e32) {
6679 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
6680 if (!RI.isVGPR(MRI, MI.getOperand(Src2Idx).getReg()))
6681 legalizeOpWithMove(MI, Src2Idx);
6682 }
6683
6684 // VOP2 src0 instructions support all operand types, so we don't need to check
6685 // their legality. If src1 is already legal, we don't need to do anything.
6686 if (isLegalRegOperand(MRI, InstrDesc.operands()[Src1Idx], Src1))
6687 return;
6688
6689 // Special case: V_READLANE_B32 accepts only immediate or SGPR operands for
6690 // lane select. Fix up using V_READFIRSTLANE, since we assume that the lane
6691 // select is uniform.
6692 if (Opc == AMDGPU::V_READLANE_B32 && Src1.isReg() &&
6693 RI.isVGPR(MRI, Src1.getReg())) {
6694 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
6695 const DebugLoc &DL = MI.getDebugLoc();
6696 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg)
6697 .add(Src1);
6698 Src1.ChangeToRegister(Reg, false);
6699 return;
6700 }
6701
6702 // We do not use commuteInstruction here because it is too aggressive and will
6703 // commute if it is possible. We only want to commute here if it improves
6704 // legality. This can be called a fairly large number of times so don't waste
6705 // compile time pointlessly swapping and checking legality again.
6706 if (HasImplicitSGPR || !MI.isCommutable()) {
6707 legalizeOpWithMove(MI, Src1Idx);
6708 return;
6709 }
6710
6711 // If src0 can be used as src1, commuting will make the operands legal.
6712 // Otherwise we have to give up and insert a move.
6713 //
6714 // TODO: Other immediate-like operand kinds could be commuted if there was a
6715 // MachineOperand::ChangeTo* for them.
6716 if ((!Src1.isImm() && !Src1.isReg()) ||
6717 !isLegalRegOperand(MRI, InstrDesc.operands()[Src1Idx], Src0)) {
6718 legalizeOpWithMove(MI, Src1Idx);
6719 return;
6720 }
6721
6722 int CommutedOpc = commuteOpcode(MI);
6723 if (CommutedOpc == -1) {
6724 legalizeOpWithMove(MI, Src1Idx);
6725 return;
6726 }
6727
6728 MI.setDesc(get(CommutedOpc));
6729
6730 Register Src0Reg = Src0.getReg();
6731 unsigned Src0SubReg = Src0.getSubReg();
6732 bool Src0Kill = Src0.isKill();
6733
6734 if (Src1.isImm())
6735 Src0.ChangeToImmediate(Src1.getImm());
6736 else if (Src1.isReg()) {
6737 Src0.ChangeToRegister(Src1.getReg(), false, false, Src1.isKill());
6738 Src0.setSubReg(Src1.getSubReg());
6739 } else
6740 llvm_unreachable("Should only have register or immediate operands");
6741
6742 Src1.ChangeToRegister(Src0Reg, false, false, Src0Kill);
6743 Src1.setSubReg(Src0SubReg);
6745}
6746
6747// Legalize VOP3 operands. All operand types are supported for any operand
6748// but only one literal constant and only starting from GFX10.
6750 MachineInstr &MI) const {
6751 unsigned Opc = MI.getOpcode();
6752
6753 int VOP3Idx[3] = {
6754 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0),
6755 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1),
6756 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)
6757 };
6758
6759 if (Opc == AMDGPU::V_PERMLANE16_B32_e64 ||
6760 Opc == AMDGPU::V_PERMLANEX16_B32_e64 ||
6761 Opc == AMDGPU::V_PERMLANE_BCAST_B32_e64 ||
6762 Opc == AMDGPU::V_PERMLANE_UP_B32_e64 ||
6763 Opc == AMDGPU::V_PERMLANE_DOWN_B32_e64 ||
6764 Opc == AMDGPU::V_PERMLANE_XOR_B32_e64 ||
6765 Opc == AMDGPU::V_PERMLANE_IDX_GEN_B32_e64) {
6766 // src1 and src2 must be scalar
6767 MachineOperand &Src1 = MI.getOperand(VOP3Idx[1]);
6768 const DebugLoc &DL = MI.getDebugLoc();
6769 if (Src1.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src1.getReg()))) {
6770 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
6771 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg)
6772 .add(Src1);
6773 Src1.ChangeToRegister(Reg, false);
6774 }
6775 if (VOP3Idx[2] != -1) {
6776 MachineOperand &Src2 = MI.getOperand(VOP3Idx[2]);
6777 if (Src2.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src2.getReg()))) {
6778 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
6779 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg)
6780 .add(Src2);
6781 Src2.ChangeToRegister(Reg, false);
6782 }
6783 }
6784 }
6785
6786 // Find the one SGPR operand we are allowed to use.
6787 int ConstantBusLimit = ST.getConstantBusLimit(Opc);
6788 int LiteralLimit = ST.hasVOP3Literal() ? 1 : 0;
6789 SmallDenseSet<unsigned> SGPRsUsed;
6790 Register SGPRReg = findUsedSGPR(MI, VOP3Idx);
6791 if (SGPRReg) {
6792 SGPRsUsed.insert(SGPRReg);
6793 --ConstantBusLimit;
6794 }
6795
6796 for (int Idx : VOP3Idx) {
6797 if (Idx == -1)
6798 break;
6799 MachineOperand &MO = MI.getOperand(Idx);
6800
6801 if (!MO.isReg()) {
6802 if (isInlineConstant(MO, get(Opc).operands()[Idx]))
6803 continue;
6804
6805 if (LiteralLimit > 0 && ConstantBusLimit > 0) {
6806 --LiteralLimit;
6807 --ConstantBusLimit;
6808 continue;
6809 }
6810
6811 --LiteralLimit;
6812 --ConstantBusLimit;
6813 legalizeOpWithMove(MI, Idx);
6814 continue;
6815 }
6816
6817 if (!RI.isSGPRClass(RI.getRegClassForReg(MRI, MO.getReg())))
6818 continue; // VGPRs are legal
6819
6820 // We can use one SGPR in each VOP3 instruction prior to GFX10
6821 // and two starting from GFX10.
6822 if (SGPRsUsed.count(MO.getReg()))
6823 continue;
6824 if (ConstantBusLimit > 0) {
6825 SGPRsUsed.insert(MO.getReg());
6826 --ConstantBusLimit;
6827 continue;
6828 }
6829
6830 // If we make it this far, then the operand is not legal and we must
6831 // legalize it.
6832 legalizeOpWithMove(MI, Idx);
6833 }
6834
6835 // Special case: V_FMAC_F32 and V_FMAC_F16 have src2 tied to vdst.
6836 if ((Opc == AMDGPU::V_FMAC_F32_e64 || Opc == AMDGPU::V_FMAC_F16_e64) &&
6837 !RI.isVGPR(MRI, MI.getOperand(VOP3Idx[2]).getReg()))
6838 legalizeOpWithMove(MI, VOP3Idx[2]);
6839
6840 // Fix the register class of packed FP32 instructions on gfx12+. See
6841 // SIInstrInfo::isLegalGFX12PlusPackedMathFP32Operand for more information.
6843 for (unsigned I = 0; I < 3; ++I) {
6844 if (!isLegalGFX12PlusPackedMathFP32Operand(MRI, MI, /*SrcN=*/I))
6845 legalizeOpWithMove(MI, VOP3Idx[I]);
6846 }
6847 }
6848}
6849
6852 const TargetRegisterClass *DstRC /*=nullptr*/) const {
6853 const TargetRegisterClass *VRC = MRI.getRegClass(SrcReg);
6854 const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC);
6855 if (DstRC)
6856 SRC = RI.getCommonSubClass(SRC, DstRC);
6857
6858 Register DstReg = MRI.createVirtualRegister(SRC);
6859 unsigned SubRegs = RI.getRegSizeInBits(*VRC) / 32;
6860
6861 if (RI.hasAGPRs(VRC)) {
6862 VRC = RI.getEquivalentVGPRClass(VRC);
6863 Register NewSrcReg = MRI.createVirtualRegister(VRC);
6864 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
6865 get(TargetOpcode::COPY), NewSrcReg)
6866 .addReg(SrcReg);
6867 SrcReg = NewSrcReg;
6868 }
6869
6870 if (SubRegs == 1) {
6871 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
6872 get(AMDGPU::V_READFIRSTLANE_B32), DstReg)
6873 .addReg(SrcReg);
6874 return DstReg;
6875 }
6876
6878 for (unsigned i = 0; i < SubRegs; ++i) {
6879 Register SGPR = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
6880 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
6881 get(AMDGPU::V_READFIRSTLANE_B32), SGPR)
6882 .addReg(SrcReg, {}, RI.getSubRegFromChannel(i));
6883 SRegs.push_back(SGPR);
6884 }
6885
6887 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
6888 get(AMDGPU::REG_SEQUENCE), DstReg);
6889 for (unsigned i = 0; i < SubRegs; ++i) {
6890 MIB.addReg(SRegs[i]);
6891 MIB.addImm(RI.getSubRegFromChannel(i));
6892 }
6893 return DstReg;
6894}
6895
6897 MachineInstr &MI) const {
6898
6899 // If the pointer is store in VGPRs, then we need to move them to
6900 // SGPRs using v_readfirstlane. This is safe because we only select
6901 // loads with uniform pointers to SMRD instruction so we know the
6902 // pointer value is uniform.
6903 MachineOperand *SBase = getNamedOperand(MI, AMDGPU::OpName::sbase);
6904 if (SBase && !RI.isSGPRClass(MRI.getRegClass(SBase->getReg()))) {
6905 Register SGPR = readlaneVGPRToSGPR(SBase->getReg(), MI, MRI);
6906 SBase->setReg(SGPR);
6907 }
6908 MachineOperand *SOff = getNamedOperand(MI, AMDGPU::OpName::soffset);
6909 if (SOff && !RI.isSGPRReg(MRI, SOff->getReg())) {
6910 Register SGPR = readlaneVGPRToSGPR(SOff->getReg(), MI, MRI);
6911 SOff->setReg(SGPR);
6912 }
6913}
6914
6916 unsigned Opc = Inst.getOpcode();
6917 int OldSAddrIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::saddr);
6918 if (OldSAddrIdx < 0)
6919 return false;
6920
6921 assert(isSegmentSpecificFLAT(Inst) || (isFLAT(Inst) && ST.hasFlatGVSMode()));
6922
6923 int NewOpc = AMDGPU::getGlobalVaddrOp(Opc);
6924 if (NewOpc < 0)
6926 if (NewOpc < 0)
6927 return false;
6928
6929 MachineRegisterInfo &MRI = Inst.getMF()->getRegInfo();
6930 MachineOperand &SAddr = Inst.getOperand(OldSAddrIdx);
6931 if (RI.isSGPRReg(MRI, SAddr.getReg()))
6932 return false;
6933
6934 int NewVAddrIdx = AMDGPU::getNamedOperandIdx(NewOpc, AMDGPU::OpName::vaddr);
6935 if (NewVAddrIdx < 0)
6936 return false;
6937
6938 int OldVAddrIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr);
6939
6940 // Check vaddr, it shall be zero or absent.
6941 MachineInstr *VAddrDef = nullptr;
6942 if (OldVAddrIdx >= 0) {
6943 MachineOperand &VAddr = Inst.getOperand(OldVAddrIdx);
6944 VAddrDef = MRI.getUniqueVRegDef(VAddr.getReg());
6945 if (!VAddrDef || !VAddrDef->isMoveImmediate() ||
6946 !VAddrDef->getOperand(1).isImm() ||
6947 VAddrDef->getOperand(1).getImm() != 0)
6948 return false;
6949 }
6950
6951 const MCInstrDesc &NewDesc = get(NewOpc);
6952 Inst.setDesc(NewDesc);
6953
6954 // Callers expect iterator to be valid after this call, so modify the
6955 // instruction in place.
6956 if (OldVAddrIdx == NewVAddrIdx) {
6957 MachineOperand &NewVAddr = Inst.getOperand(NewVAddrIdx);
6958 // Clear use list from the old vaddr holding a zero register.
6959 MRI.removeRegOperandFromUseList(&NewVAddr);
6960 MRI.moveOperands(&NewVAddr, &SAddr, 1);
6961 Inst.removeOperand(OldSAddrIdx);
6962 // Update the use list with the pointer we have just moved from vaddr to
6963 // saddr position. Otherwise new vaddr will be missing from the use list.
6964 MRI.removeRegOperandFromUseList(&NewVAddr);
6965 MRI.addRegOperandToUseList(&NewVAddr);
6966 } else {
6967 assert(OldSAddrIdx == NewVAddrIdx);
6968
6969 if (OldVAddrIdx >= 0) {
6970 int NewVDstIn = AMDGPU::getNamedOperandIdx(NewOpc,
6971 AMDGPU::OpName::vdst_in);
6972
6973 // removeOperand doesn't try to fixup tied operand indexes at it goes, so
6974 // it asserts. Untie the operands for now and retie them afterwards.
6975 if (NewVDstIn != -1) {
6976 int OldVDstIn = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst_in);
6977 Inst.untieRegOperand(OldVDstIn);
6978 }
6979
6980 Inst.removeOperand(OldVAddrIdx);
6981
6982 if (NewVDstIn != -1) {
6983 int NewVDst = AMDGPU::getNamedOperandIdx(NewOpc, AMDGPU::OpName::vdst);
6984 Inst.tieOperands(NewVDst, NewVDstIn);
6985 }
6986 }
6987 }
6988
6989 if (VAddrDef && MRI.use_nodbg_empty(VAddrDef->getOperand(0).getReg()))
6990 VAddrDef->eraseFromParent();
6991
6992 return true;
6993}
6994
6995// FIXME: Remove this when SelectionDAG is obsoleted.
6997 MachineInstr &MI) const {
6998 if (!isSegmentSpecificFLAT(MI) && !ST.hasFlatGVSMode())
6999 return;
7000
7001 // Fixup SGPR operands in VGPRs. We only select these when the DAG divergence
7002 // thinks they are uniform, so a readfirstlane should be valid.
7003 MachineOperand *SAddr = getNamedOperand(MI, AMDGPU::OpName::saddr);
7004 if (!SAddr || RI.isSGPRClass(MRI.getRegClass(SAddr->getReg())))
7005 return;
7006
7008 return;
7009
7010 const TargetRegisterClass *DeclaredRC =
7011 getRegClass(MI.getDesc(), SAddr->getOperandNo());
7012
7013 Register ToSGPR = readlaneVGPRToSGPR(SAddr->getReg(), MI, MRI, DeclaredRC);
7014 SAddr->setReg(ToSGPR);
7015}
7016
7019 const TargetRegisterClass *DstRC,
7022 const DebugLoc &DL) const {
7023 Register OpReg = Op.getReg();
7024 unsigned OpSubReg = Op.getSubReg();
7025
7026 const TargetRegisterClass *OpRC = RI.getSubClassWithSubReg(
7027 RI.getRegClassForReg(MRI, OpReg), OpSubReg);
7028
7029 // Check if operand is already the correct register class.
7030 if (DstRC == OpRC)
7031 return;
7032
7033 Register DstReg = MRI.createVirtualRegister(DstRC);
7034 auto Copy =
7035 BuildMI(InsertMBB, I, DL, get(AMDGPU::COPY), DstReg).addReg(OpReg);
7036 Op.setReg(DstReg);
7037
7038 MachineInstr *Def = MRI.getVRegDef(OpReg);
7039 if (!Def)
7040 return;
7041
7042 // Try to eliminate the copy if it is copying an immediate value.
7043 if (Def->isMoveImmediate() && DstRC != &AMDGPU::VReg_1RegClass)
7044 foldImmediate(*Copy, *Def, OpReg, &MRI);
7045
7046 bool ImpDef = Def->isImplicitDef();
7047 while (!ImpDef && Def && Def->isCopy()) {
7048 if (Def->getOperand(1).getReg().isPhysical())
7049 break;
7050 Def = MRI.getUniqueVRegDef(Def->getOperand(1).getReg());
7051 ImpDef = Def && Def->isImplicitDef();
7052 }
7053 if (!RI.isSGPRClass(DstRC) && !Copy->readsRegister(AMDGPU::EXEC, &RI) &&
7054 !ImpDef)
7055 Copy.addReg(AMDGPU::EXEC, RegState::Implicit);
7056}
7057
7058// Emit the actual waterfall loop, executing the wrapped instruction for each
7059// unique value of \p ScalarOps across all lanes. In the best case we execute 1
7060// iteration, in the worst case we execute 64 (once per lane).
7061static void
7064 MachineBasicBlock &LoopBB,
7065 MachineBasicBlock &BodyBB,
7066 const DebugLoc &DL,
7067 ArrayRef<MachineOperand *> ScalarOps) {
7068 MachineFunction &MF = *LoopBB.getParent();
7069 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
7070 const SIRegisterInfo *TRI = ST.getRegisterInfo();
7072 const auto *BoolXExecRC = TRI->getWaveMaskRegClass();
7073
7075 Register CondReg;
7076
7077 for (MachineOperand *ScalarOp : ScalarOps) {
7078 unsigned RegSize = TRI->getRegSizeInBits(ScalarOp->getReg(), MRI);
7079 unsigned NumSubRegs = RegSize / 32;
7080 Register VScalarOp = ScalarOp->getReg();
7081
7082 if (NumSubRegs == 1) {
7083 Register CurReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
7084
7085 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), CurReg)
7086 .addReg(VScalarOp);
7087
7088 Register NewCondReg = MRI.createVirtualRegister(BoolXExecRC);
7089
7090 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_CMP_EQ_U32_e64), NewCondReg)
7091 .addReg(CurReg)
7092 .addReg(VScalarOp);
7093
7094 // Combine the comparison results with AND.
7095 if (!CondReg) // First.
7096 CondReg = NewCondReg;
7097 else { // If not the first, we create an AND.
7098 Register AndReg = MRI.createVirtualRegister(BoolXExecRC);
7099 BuildMI(LoopBB, I, DL, TII.get(LMC.AndOpc), AndReg)
7100 .addReg(CondReg)
7101 .addReg(NewCondReg);
7102 CondReg = AndReg;
7103 }
7104
7105 // Update ScalarOp operand to use the SGPR ScalarOp.
7106 ScalarOp->setReg(CurReg);
7107 ScalarOp->setIsKill();
7108 } else {
7109 SmallVector<Register, 8> ReadlanePieces;
7110 RegState VScalarOpUndef = getUndefRegState(ScalarOp->isUndef());
7111 assert(NumSubRegs % 2 == 0 && NumSubRegs <= 32 &&
7112 "Unhandled register size");
7113
7114 for (unsigned Idx = 0; Idx < NumSubRegs; Idx += 2) {
7115 Register CurRegLo =
7116 MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
7117 Register CurRegHi =
7118 MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
7119
7120 // Read the next variant <- also loop target.
7121 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), CurRegLo)
7122 .addReg(VScalarOp, VScalarOpUndef, TRI->getSubRegFromChannel(Idx));
7123
7124 // Read the next variant <- also loop target.
7125 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), CurRegHi)
7126 .addReg(VScalarOp, VScalarOpUndef,
7127 TRI->getSubRegFromChannel(Idx + 1));
7128
7129 ReadlanePieces.push_back(CurRegLo);
7130 ReadlanePieces.push_back(CurRegHi);
7131
7132 // Comparison is to be done as 64-bit.
7133 Register CurReg = MRI.createVirtualRegister(&AMDGPU::SGPR_64RegClass);
7134 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), CurReg)
7135 .addReg(CurRegLo)
7136 .addImm(AMDGPU::sub0)
7137 .addReg(CurRegHi)
7138 .addImm(AMDGPU::sub1);
7139
7140 Register NewCondReg = MRI.createVirtualRegister(BoolXExecRC);
7141 auto Cmp = BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_CMP_EQ_U64_e64),
7142 NewCondReg)
7143 .addReg(CurReg);
7144 if (NumSubRegs <= 2)
7145 Cmp.addReg(VScalarOp);
7146 else
7147 Cmp.addReg(VScalarOp, VScalarOpUndef,
7148 TRI->getSubRegFromChannel(Idx, 2));
7149
7150 // Combine the comparison results with AND.
7151 if (!CondReg) // First.
7152 CondReg = NewCondReg;
7153 else { // If not the first, we create an AND.
7154 Register AndReg = MRI.createVirtualRegister(BoolXExecRC);
7155 BuildMI(LoopBB, I, DL, TII.get(LMC.AndOpc), AndReg)
7156 .addReg(CondReg)
7157 .addReg(NewCondReg);
7158 CondReg = AndReg;
7159 }
7160 } // End for loop.
7161
7162 const auto *SScalarOpRC =
7163 TRI->getEquivalentSGPRClass(MRI.getRegClass(VScalarOp));
7164 Register SScalarOp = MRI.createVirtualRegister(SScalarOpRC);
7165
7166 // Build scalar ScalarOp.
7167 auto Merge =
7168 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), SScalarOp);
7169 unsigned Channel = 0;
7170 for (Register Piece : ReadlanePieces) {
7171 Merge.addReg(Piece).addImm(TRI->getSubRegFromChannel(Channel++));
7172 }
7173
7174 // Update ScalarOp operand to use the SGPR ScalarOp.
7175 ScalarOp->setReg(SScalarOp);
7176 ScalarOp->setIsKill();
7177 }
7178 }
7179
7180 Register SaveExec = MRI.createVirtualRegister(BoolXExecRC);
7181 MRI.setSimpleHint(SaveExec, CondReg);
7182
7183 // Update EXEC to matching lanes, saving original to SaveExec.
7184 BuildMI(LoopBB, I, DL, TII.get(LMC.AndSaveExecOpc), SaveExec)
7185 .addReg(CondReg, RegState::Kill);
7186
7187 // The original instruction is here; we insert the terminators after it.
7188 I = BodyBB.end();
7189
7190 // Update EXEC, switch all done bits to 0 and all todo bits to 1.
7191 BuildMI(BodyBB, I, DL, TII.get(LMC.XorTermOpc), LMC.ExecReg)
7192 .addReg(LMC.ExecReg)
7193 .addReg(SaveExec);
7194
7195 BuildMI(BodyBB, I, DL, TII.get(AMDGPU::SI_WATERFALL_LOOP)).addMBB(&LoopBB);
7196}
7197
7198// Build a waterfall loop around \p MI, replacing the VGPR \p ScalarOp register
7199// with SGPRs by iterating over all unique values across all lanes.
7200// Returns the loop basic block that now contains \p MI.
7201static MachineBasicBlock *
7205 MachineBasicBlock::iterator Begin = nullptr,
7206 MachineBasicBlock::iterator End = nullptr) {
7207 MachineBasicBlock &MBB = *MI.getParent();
7208 MachineFunction &MF = *MBB.getParent();
7209 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
7210 const SIRegisterInfo *TRI = ST.getRegisterInfo();
7211 MachineRegisterInfo &MRI = MF.getRegInfo();
7212 if (!Begin.isValid())
7213 Begin = &MI;
7214 if (!End.isValid()) {
7215 End = &MI;
7216 ++End;
7217 }
7218 const DebugLoc &DL = MI.getDebugLoc();
7220 const auto *BoolXExecRC = TRI->getWaveMaskRegClass();
7221
7222 // Save SCC. Waterfall Loop may overwrite SCC.
7223 Register SaveSCCReg;
7224
7225 // FIXME: We should maintain SCC liveness while doing the FixSGPRCopies walk
7226 // rather than unlimited scan everywhere
7227 bool SCCNotDead =
7228 MBB.computeRegisterLiveness(TRI, AMDGPU::SCC, MI,
7229 std::numeric_limits<unsigned>::max()) !=
7231 if (SCCNotDead) {
7232 SaveSCCReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
7233 BuildMI(MBB, Begin, DL, TII.get(AMDGPU::S_CSELECT_B32), SaveSCCReg)
7234 .addImm(1)
7235 .addImm(0);
7236 }
7237
7238 Register SaveExec = MRI.createVirtualRegister(BoolXExecRC);
7239
7240 // Save the EXEC mask
7241 BuildMI(MBB, Begin, DL, TII.get(LMC.MovOpc), SaveExec).addReg(LMC.ExecReg);
7242
7243 // Killed uses in the instruction we are waterfalling around will be
7244 // incorrect due to the added control-flow.
7246 ++AfterMI;
7247 for (auto I = Begin; I != AfterMI; I++) {
7248 for (auto &MO : I->all_uses())
7249 MRI.clearKillFlags(MO.getReg());
7250 }
7251
7252 // To insert the loop we need to split the block. Move everything after this
7253 // point to a new block, and insert a new empty block between the two.
7256 MachineBasicBlock *RemainderBB = MF.CreateMachineBasicBlock();
7258 ++MBBI;
7259
7260 MF.insert(MBBI, LoopBB);
7261 MF.insert(MBBI, BodyBB);
7262 MF.insert(MBBI, RemainderBB);
7263
7264 LoopBB->addSuccessor(BodyBB);
7265 BodyBB->addSuccessor(LoopBB);
7266 BodyBB->addSuccessor(RemainderBB);
7267
7268 // Move Begin to MI to the BodyBB, and the remainder of the block to
7269 // RemainderBB.
7270 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB);
7271 RemainderBB->splice(RemainderBB->begin(), &MBB, End, MBB.end());
7272 BodyBB->splice(BodyBB->begin(), &MBB, Begin, MBB.end());
7273
7274 MBB.addSuccessor(LoopBB);
7275
7276 // Update dominators. We know that MBB immediately dominates LoopBB, that
7277 // LoopBB immediately dominates BodyBB, and BodyBB immediately dominates
7278 // RemainderBB. RemainderBB immediately dominates all of the successors
7279 // transferred to it from MBB that MBB used to properly dominate.
7280 if (MDT) {
7281 MDT->addNewBlock(LoopBB, &MBB);
7282 MDT->addNewBlock(BodyBB, LoopBB);
7283 MDT->addNewBlock(RemainderBB, BodyBB);
7284 for (auto &Succ : RemainderBB->successors()) {
7285 if (MDT->properlyDominates(&MBB, Succ)) {
7286 MDT->changeImmediateDominator(Succ, RemainderBB);
7287 }
7288 }
7289 }
7290
7291 emitLoadScalarOpsFromVGPRLoop(TII, MRI, *LoopBB, *BodyBB, DL, ScalarOps);
7292
7293 MachineBasicBlock::iterator First = RemainderBB->begin();
7294 // Restore SCC
7295 if (SCCNotDead) {
7296 BuildMI(*RemainderBB, First, DL, TII.get(AMDGPU::S_CMP_LG_U32))
7297 .addReg(SaveSCCReg, RegState::Kill)
7298 .addImm(0);
7299 }
7300
7301 // Restore the EXEC mask
7302 BuildMI(*RemainderBB, First, DL, TII.get(LMC.MovOpc), LMC.ExecReg)
7303 .addReg(SaveExec);
7304 return BodyBB;
7305}
7306
7307// Extract pointer from Rsrc and return a zero-value Rsrc replacement.
7308static std::tuple<unsigned, unsigned>
7310 MachineBasicBlock &MBB = *MI.getParent();
7311 MachineFunction &MF = *MBB.getParent();
7312 MachineRegisterInfo &MRI = MF.getRegInfo();
7313
7314 // Extract the ptr from the resource descriptor.
7315 unsigned RsrcPtr =
7316 TII.buildExtractSubReg(MI, MRI, Rsrc, &AMDGPU::VReg_128RegClass,
7317 AMDGPU::sub0_sub1, &AMDGPU::VReg_64RegClass);
7318
7319 // Create an empty resource descriptor
7320 Register Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
7321 Register SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
7322 Register SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
7323 Register NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass);
7324 uint64_t RsrcDataFormat = TII.getDefaultRsrcDataFormat();
7325
7326 // Zero64 = 0
7327 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B64), Zero64)
7328 .addImm(0);
7329
7330 // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0}
7331 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatLo)
7332 .addImm(Lo_32(RsrcDataFormat));
7333
7334 // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32}
7335 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatHi)
7336 .addImm(Hi_32(RsrcDataFormat));
7337
7338 // NewSRsrc = {Zero64, SRsrcFormat}
7339 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::REG_SEQUENCE), NewSRsrc)
7340 .addReg(Zero64)
7341 .addImm(AMDGPU::sub0_sub1)
7342 .addReg(SRsrcFormatLo)
7343 .addImm(AMDGPU::sub2)
7344 .addReg(SRsrcFormatHi)
7345 .addImm(AMDGPU::sub3);
7346
7347 return std::tuple(RsrcPtr, NewSRsrc);
7348}
7349
7352 MachineDominatorTree *MDT) const {
7353 MachineFunction &MF = *MI.getMF();
7354 MachineRegisterInfo &MRI = MF.getRegInfo();
7355 MachineBasicBlock *CreatedBB = nullptr;
7356
7357 // Legalize VOP2
7358 if (isVOP2(MI) || isVOPC(MI)) {
7360 return CreatedBB;
7361 }
7362
7363 // Legalize VOP3
7364 if (isVOP3(MI)) {
7366 return CreatedBB;
7367 }
7368
7369 // Legalize SMRD
7370 if (isSMRD(MI)) {
7372 return CreatedBB;
7373 }
7374
7375 // Legalize FLAT
7376 if (isFLAT(MI)) {
7378 return CreatedBB;
7379 }
7380
7381 // Legalize PHI
7382 // The register class of the operands must be the same type as the register
7383 // class of the output.
7384 if (MI.getOpcode() == AMDGPU::PHI) {
7385 const TargetRegisterClass *VRC = getOpRegClass(MI, 0);
7386 assert(!RI.isSGPRClass(VRC));
7387
7388 // Update all the operands so they have the same type.
7389 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
7390 MachineOperand &Op = MI.getOperand(I);
7391 if (!Op.isReg() || !Op.getReg().isVirtual())
7392 continue;
7393
7394 // MI is a PHI instruction.
7395 MachineBasicBlock *InsertBB = MI.getOperand(I + 1).getMBB();
7397
7398 // Avoid creating no-op copies with the same src and dst reg class. These
7399 // confuse some of the machine passes.
7400 legalizeGenericOperand(*InsertBB, Insert, VRC, Op, MRI, MI.getDebugLoc());
7401 }
7402 }
7403
7404 // REG_SEQUENCE doesn't really require operand legalization, but if one has a
7405 // VGPR dest type and SGPR sources, insert copies so all operands are
7406 // VGPRs. This seems to help operand folding / the register coalescer.
7407 if (MI.getOpcode() == AMDGPU::REG_SEQUENCE) {
7408 MachineBasicBlock *MBB = MI.getParent();
7409 const TargetRegisterClass *DstRC = getOpRegClass(MI, 0);
7410 if (RI.hasVGPRs(DstRC)) {
7411 // Update all the operands so they are VGPR register classes. These may
7412 // not be the same register class because REG_SEQUENCE supports mixing
7413 // subregister index types e.g. sub0_sub1 + sub2 + sub3
7414 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
7415 MachineOperand &Op = MI.getOperand(I);
7416 if (!Op.isReg() || !Op.getReg().isVirtual())
7417 continue;
7418
7419 const TargetRegisterClass *OpRC = MRI.getRegClass(Op.getReg());
7420 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(OpRC);
7421 if (VRC == OpRC)
7422 continue;
7423
7424 legalizeGenericOperand(*MBB, MI, VRC, Op, MRI, MI.getDebugLoc());
7425 Op.setIsKill();
7426 }
7427 }
7428
7429 return CreatedBB;
7430 }
7431
7432 // Legalize INSERT_SUBREG
7433 // src0 must have the same register class as dst
7434 if (MI.getOpcode() == AMDGPU::INSERT_SUBREG) {
7435 Register Dst = MI.getOperand(0).getReg();
7436 Register Src0 = MI.getOperand(1).getReg();
7437 const TargetRegisterClass *DstRC = MRI.getRegClass(Dst);
7438 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0);
7439 if (DstRC != Src0RC) {
7440 MachineBasicBlock *MBB = MI.getParent();
7441 MachineOperand &Op = MI.getOperand(1);
7442 legalizeGenericOperand(*MBB, MI, DstRC, Op, MRI, MI.getDebugLoc());
7443 }
7444 return CreatedBB;
7445 }
7446
7447 // Legalize SI_INIT_M0
7448 if (MI.getOpcode() == AMDGPU::SI_INIT_M0) {
7449 MachineOperand &Src = MI.getOperand(0);
7450 if (Src.isReg() && RI.hasVectorRegisters(MRI.getRegClass(Src.getReg())))
7451 Src.setReg(readlaneVGPRToSGPR(Src.getReg(), MI, MRI));
7452 return CreatedBB;
7453 }
7454
7455 // Legalize S_BITREPLICATE, S_QUADMASK and S_WQM
7456 if (MI.getOpcode() == AMDGPU::S_BITREPLICATE_B64_B32 ||
7457 MI.getOpcode() == AMDGPU::S_QUADMASK_B32 ||
7458 MI.getOpcode() == AMDGPU::S_QUADMASK_B64 ||
7459 MI.getOpcode() == AMDGPU::S_WQM_B32 ||
7460 MI.getOpcode() == AMDGPU::S_WQM_B64 ||
7461 MI.getOpcode() == AMDGPU::S_INVERSE_BALLOT_U32 ||
7462 MI.getOpcode() == AMDGPU::S_INVERSE_BALLOT_U64) {
7463 MachineOperand &Src = MI.getOperand(1);
7464 if (Src.isReg() && RI.hasVectorRegisters(MRI.getRegClass(Src.getReg())))
7465 Src.setReg(readlaneVGPRToSGPR(Src.getReg(), MI, MRI));
7466 return CreatedBB;
7467 }
7468
7469 // Legalize MIMG/VIMAGE/VSAMPLE and MUBUF/MTBUF for shaders.
7470 //
7471 // Shaders only generate MUBUF/MTBUF instructions via intrinsics or via
7472 // scratch memory access. In both cases, the legalization never involves
7473 // conversion to the addr64 form.
7475 (isMUBUF(MI) || isMTBUF(MI)))) {
7476 AMDGPU::OpName RSrcOpName = (isVIMAGE(MI) || isVSAMPLE(MI))
7477 ? AMDGPU::OpName::rsrc
7478 : AMDGPU::OpName::srsrc;
7479 MachineOperand *SRsrc = getNamedOperand(MI, RSrcOpName);
7480 if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg())))
7481 CreatedBB = loadScalarOperandsFromVGPR(*this, MI, {SRsrc}, MDT);
7482
7483 AMDGPU::OpName SampOpName =
7484 isMIMG(MI) ? AMDGPU::OpName::ssamp : AMDGPU::OpName::samp;
7485 MachineOperand *SSamp = getNamedOperand(MI, SampOpName);
7486 if (SSamp && !RI.isSGPRClass(MRI.getRegClass(SSamp->getReg())))
7487 CreatedBB = loadScalarOperandsFromVGPR(*this, MI, {SSamp}, MDT);
7488
7489 return CreatedBB;
7490 }
7491
7492 // Legalize SI_CALL
7493 if (MI.getOpcode() == AMDGPU::SI_CALL_ISEL) {
7494 MachineOperand *Dest = &MI.getOperand(0);
7495 if (!RI.isSGPRClass(MRI.getRegClass(Dest->getReg()))) {
7496 // Move everything between ADJCALLSTACKUP and ADJCALLSTACKDOWN and
7497 // following copies, we also need to move copies from and to physical
7498 // registers into the loop block.
7499 unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
7500 unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
7501
7502 // Also move the copies to physical registers into the loop block
7503 MachineBasicBlock &MBB = *MI.getParent();
7505 while (Start->getOpcode() != FrameSetupOpcode)
7506 --Start;
7508 while (End->getOpcode() != FrameDestroyOpcode)
7509 ++End;
7510 // Also include following copies of the return value
7511 ++End;
7512 while (End != MBB.end() && End->isCopy() && End->getOperand(1).isReg() &&
7513 MI.definesRegister(End->getOperand(1).getReg(), /*TRI=*/nullptr))
7514 ++End;
7515 CreatedBB =
7516 loadScalarOperandsFromVGPR(*this, MI, {Dest}, MDT, Start, End);
7517 }
7518 }
7519
7520 // Legalize s_sleep_var.
7521 if (MI.getOpcode() == AMDGPU::S_SLEEP_VAR) {
7522 const DebugLoc &DL = MI.getDebugLoc();
7523 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
7524 int Src0Idx =
7525 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
7526 MachineOperand &Src0 = MI.getOperand(Src0Idx);
7527 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg)
7528 .add(Src0);
7529 Src0.ChangeToRegister(Reg, false);
7530 return nullptr;
7531 }
7532
7533 // Legalize TENSOR_LOAD_TO_LDS_d2/_d4, TENSOR_STORE_FROM_LDS_d2/_d4. All their
7534 // operands are scalar.
7535 if (MI.getOpcode() == AMDGPU::TENSOR_LOAD_TO_LDS_d2 ||
7536 MI.getOpcode() == AMDGPU::TENSOR_LOAD_TO_LDS_d4 ||
7537 MI.getOpcode() == AMDGPU::TENSOR_STORE_FROM_LDS_d2 ||
7538 MI.getOpcode() == AMDGPU::TENSOR_STORE_FROM_LDS_d4) {
7539 for (MachineOperand &Src : MI.explicit_operands()) {
7540 if (Src.isReg() && RI.hasVectorRegisters(MRI.getRegClass(Src.getReg())))
7541 Src.setReg(readlaneVGPRToSGPR(Src.getReg(), MI, MRI));
7542 }
7543 return CreatedBB;
7544 }
7545
7546 // Legalize MUBUF instructions.
7547 bool isSoffsetLegal = true;
7548 int SoffsetIdx =
7549 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::soffset);
7550 if (SoffsetIdx != -1) {
7551 MachineOperand *Soffset = &MI.getOperand(SoffsetIdx);
7552 if (Soffset->isReg() && Soffset->getReg().isVirtual() &&
7553 !RI.isSGPRClass(MRI.getRegClass(Soffset->getReg()))) {
7554 isSoffsetLegal = false;
7555 }
7556 }
7557
7558 bool isRsrcLegal = true;
7559 int RsrcIdx =
7560 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc);
7561 if (RsrcIdx != -1) {
7562 MachineOperand *Rsrc = &MI.getOperand(RsrcIdx);
7563 if (Rsrc->isReg() && !RI.isSGPRReg(MRI, Rsrc->getReg()))
7564 isRsrcLegal = false;
7565 }
7566
7567 // The operands are legal.
7568 if (isRsrcLegal && isSoffsetLegal)
7569 return CreatedBB;
7570
7571 if (!isRsrcLegal) {
7572 // Legalize a VGPR Rsrc
7573 //
7574 // If the instruction is _ADDR64, we can avoid a waterfall by extracting
7575 // the base pointer from the VGPR Rsrc, adding it to the VAddr, then using
7576 // a zero-value SRsrc.
7577 //
7578 // If the instruction is _OFFSET (both idxen and offen disabled), and we
7579 // support ADDR64 instructions, we can convert to ADDR64 and do the same as
7580 // above.
7581 //
7582 // Otherwise we are on non-ADDR64 hardware, and/or we have
7583 // idxen/offen/bothen and we fall back to a waterfall loop.
7584
7585 MachineOperand *Rsrc = &MI.getOperand(RsrcIdx);
7586 MachineBasicBlock &MBB = *MI.getParent();
7587
7588 MachineOperand *VAddr = getNamedOperand(MI, AMDGPU::OpName::vaddr);
7589 if (VAddr && AMDGPU::getIfAddr64Inst(MI.getOpcode()) != -1) {
7590 // This is already an ADDR64 instruction so we need to add the pointer
7591 // extracted from the resource descriptor to the current value of VAddr.
7592 Register NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
7593 Register NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
7594 Register NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
7595
7596 const auto *BoolXExecRC = RI.getWaveMaskRegClass();
7597 Register CondReg0 = MRI.createVirtualRegister(BoolXExecRC);
7598 Register CondReg1 = MRI.createVirtualRegister(BoolXExecRC);
7599
7600 unsigned RsrcPtr, NewSRsrc;
7601 std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc);
7602
7603 // NewVaddrLo = RsrcPtr:sub0 + VAddr:sub0
7604 const DebugLoc &DL = MI.getDebugLoc();
7605 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_CO_U32_e64), NewVAddrLo)
7606 .addDef(CondReg0)
7607 .addReg(RsrcPtr, {}, AMDGPU::sub0)
7608 .addReg(VAddr->getReg(), {}, AMDGPU::sub0)
7609 .addImm(0);
7610
7611 // NewVaddrHi = RsrcPtr:sub1 + VAddr:sub1
7612 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADDC_U32_e64), NewVAddrHi)
7613 .addDef(CondReg1, RegState::Dead)
7614 .addReg(RsrcPtr, {}, AMDGPU::sub1)
7615 .addReg(VAddr->getReg(), {}, AMDGPU::sub1)
7616 .addReg(CondReg0, RegState::Kill)
7617 .addImm(0);
7618
7619 // NewVaddr = {NewVaddrHi, NewVaddrLo}
7620 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr)
7621 .addReg(NewVAddrLo)
7622 .addImm(AMDGPU::sub0)
7623 .addReg(NewVAddrHi)
7624 .addImm(AMDGPU::sub1);
7625
7626 VAddr->setReg(NewVAddr);
7627 Rsrc->setReg(NewSRsrc);
7628 } else if (!VAddr && ST.hasAddr64()) {
7629 // This instructions is the _OFFSET variant, so we need to convert it to
7630 // ADDR64.
7631 assert(ST.getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS &&
7632 "FIXME: Need to emit flat atomics here");
7633
7634 unsigned RsrcPtr, NewSRsrc;
7635 std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc);
7636
7637 Register NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
7638 MachineOperand *VData = getNamedOperand(MI, AMDGPU::OpName::vdata);
7639 MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset);
7640 MachineOperand *SOffset = getNamedOperand(MI, AMDGPU::OpName::soffset);
7641 unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI.getOpcode());
7642
7643 // Atomics with return have an additional tied operand and are
7644 // missing some of the special bits.
7645 MachineOperand *VDataIn = getNamedOperand(MI, AMDGPU::OpName::vdata_in);
7646 MachineInstr *Addr64;
7647
7648 if (!VDataIn) {
7649 // Regular buffer load / store.
7651 BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode))
7652 .add(*VData)
7653 .addReg(NewVAddr)
7654 .addReg(NewSRsrc)
7655 .add(*SOffset)
7656 .add(*Offset);
7657
7658 if (const MachineOperand *CPol =
7659 getNamedOperand(MI, AMDGPU::OpName::cpol)) {
7660 MIB.addImm(CPol->getImm());
7661 }
7662
7663 if (const MachineOperand *TFE =
7664 getNamedOperand(MI, AMDGPU::OpName::tfe)) {
7665 MIB.addImm(TFE->getImm());
7666 }
7667
7668 MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::swz));
7669
7670 MIB.cloneMemRefs(MI);
7671 Addr64 = MIB;
7672 } else {
7673 // Atomics with return.
7674 Addr64 = BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode))
7675 .add(*VData)
7676 .add(*VDataIn)
7677 .addReg(NewVAddr)
7678 .addReg(NewSRsrc)
7679 .add(*SOffset)
7680 .add(*Offset)
7681 .addImm(getNamedImmOperand(MI, AMDGPU::OpName::cpol))
7682 .cloneMemRefs(MI);
7683 }
7684
7685 MI.removeFromParent();
7686
7687 // NewVaddr = {NewVaddrHi, NewVaddrLo}
7688 BuildMI(MBB, Addr64, Addr64->getDebugLoc(), get(AMDGPU::REG_SEQUENCE),
7689 NewVAddr)
7690 .addReg(RsrcPtr, {}, AMDGPU::sub0)
7691 .addImm(AMDGPU::sub0)
7692 .addReg(RsrcPtr, {}, AMDGPU::sub1)
7693 .addImm(AMDGPU::sub1);
7694 } else {
7695 // Legalize a VGPR Rsrc and soffset together.
7696 if (!isSoffsetLegal) {
7697 MachineOperand *Soffset = getNamedOperand(MI, AMDGPU::OpName::soffset);
7698 CreatedBB = loadScalarOperandsFromVGPR(*this, MI, {Rsrc, Soffset}, MDT);
7699 return CreatedBB;
7700 }
7701 CreatedBB = loadScalarOperandsFromVGPR(*this, MI, {Rsrc}, MDT);
7702 return CreatedBB;
7703 }
7704 }
7705
7706 // Legalize a VGPR soffset.
7707 if (!isSoffsetLegal) {
7708 MachineOperand *Soffset = getNamedOperand(MI, AMDGPU::OpName::soffset);
7709 CreatedBB = loadScalarOperandsFromVGPR(*this, MI, {Soffset}, MDT);
7710 return CreatedBB;
7711 }
7712 return CreatedBB;
7713}
7714
7716 InstrList.insert(MI);
7717 // Add MBUF instructiosn to deferred list.
7718 int RsrcIdx =
7719 AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::srsrc);
7720 if (RsrcIdx != -1) {
7721 DeferredList.insert(MI);
7722 }
7723}
7724
7726 return DeferredList.contains(MI);
7727}
7728
7729// Legalize size mismatches between 16bit and 32bit registers in v2s copy
7730// lowering (change sgpr to vgpr).
7731// This is mainly caused by 16bit SALU and 16bit VALU using reg with different
7732// size. Need to legalize the size of the operands during the vgpr lowering
7733// chain. This can be removed after we have sgpr16 in place
7735 MachineRegisterInfo &MRI) const {
7736 if (!ST.useRealTrue16Insts())
7737 return;
7738
7739 unsigned Opcode = MI.getOpcode();
7740 MachineBasicBlock *MBB = MI.getParent();
7741 // Legalize operands and check for size mismatch
7742 if (!OpIdx || OpIdx >= MI.getNumExplicitOperands() ||
7743 OpIdx >= get(Opcode).getNumOperands() ||
7744 get(Opcode).operands()[OpIdx].RegClass == -1)
7745 return;
7746
7747 MachineOperand &Op = MI.getOperand(OpIdx);
7748 if (!Op.isReg() || !Op.getReg().isVirtual())
7749 return;
7750
7751 const TargetRegisterClass *CurrRC = MRI.getRegClass(Op.getReg());
7752 if (!RI.isVGPRClass(CurrRC))
7753 return;
7754
7755 int16_t RCID = getOpRegClassID(get(Opcode).operands()[OpIdx]);
7756 const TargetRegisterClass *ExpectedRC = RI.getRegClass(RCID);
7757 if (RI.getMatchingSuperRegClass(CurrRC, ExpectedRC, AMDGPU::lo16)) {
7758 Op.setSubReg(AMDGPU::lo16);
7759 } else if (RI.getMatchingSuperRegClass(ExpectedRC, CurrRC, AMDGPU::lo16)) {
7760 const DebugLoc &DL = MI.getDebugLoc();
7761 Register NewDstReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
7762 Register Undef = MRI.createVirtualRegister(&AMDGPU::VGPR_16RegClass);
7763 BuildMI(*MBB, MI, DL, get(AMDGPU::IMPLICIT_DEF), Undef);
7764 BuildMI(*MBB, MI, DL, get(AMDGPU::REG_SEQUENCE), NewDstReg)
7765 .addReg(Op.getReg())
7766 .addImm(AMDGPU::lo16)
7767 .addReg(Undef)
7768 .addImm(AMDGPU::hi16);
7769 Op.setReg(NewDstReg);
7770 }
7771}
7773 MachineRegisterInfo &MRI) const {
7774 for (unsigned OpIdx = 1; OpIdx < MI.getNumExplicitOperands(); OpIdx++)
7776}
7777
7779 MachineDominatorTree *MDT) const {
7780
7781 while (!Worklist.empty()) {
7782 MachineInstr &Inst = *Worklist.top();
7783 Worklist.erase_top();
7784 // Skip MachineInstr in the deferred list.
7785 if (Worklist.isDeferred(&Inst))
7786 continue;
7787 moveToVALUImpl(Worklist, MDT, Inst);
7788 }
7789
7790 // Deferred list of instructions will be processed once
7791 // all the MachineInstr in the worklist are done.
7792 for (MachineInstr *Inst : Worklist.getDeferredList()) {
7793 moveToVALUImpl(Worklist, MDT, *Inst);
7794 assert(Worklist.empty() &&
7795 "Deferred MachineInstr are not supposed to re-populate worklist");
7796 }
7797}
7798
7801 MachineInstr &Inst) const {
7802
7804 if (!MBB)
7805 return;
7806 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
7807 unsigned Opcode = Inst.getOpcode();
7808 unsigned NewOpcode = getVALUOp(Inst);
7809 const DebugLoc &DL = Inst.getDebugLoc();
7810
7811 // Handle some special cases
7812 switch (Opcode) {
7813 default:
7814 break;
7815 case AMDGPU::S_ADD_I32:
7816 case AMDGPU::S_SUB_I32: {
7817 // FIXME: The u32 versions currently selected use the carry.
7818 bool Changed;
7819 MachineBasicBlock *CreatedBBTmp = nullptr;
7820 std::tie(Changed, CreatedBBTmp) = moveScalarAddSub(Worklist, Inst, MDT);
7821 if (Changed)
7822 return;
7823
7824 // Default handling
7825 break;
7826 }
7827
7828 case AMDGPU::S_MUL_U64:
7829 if (ST.hasVectorMulU64()) {
7830 NewOpcode = AMDGPU::V_MUL_U64_e64;
7831 break;
7832 }
7833 // Split s_mul_u64 in 32-bit vector multiplications.
7834 splitScalarSMulU64(Worklist, Inst, MDT);
7835 Inst.eraseFromParent();
7836 return;
7837
7838 case AMDGPU::S_MUL_U64_U32_PSEUDO:
7839 case AMDGPU::S_MUL_I64_I32_PSEUDO:
7840 // This is a special case of s_mul_u64 where all the operands are either
7841 // zero extended or sign extended.
7842 splitScalarSMulPseudo(Worklist, Inst, MDT);
7843 Inst.eraseFromParent();
7844 return;
7845
7846 case AMDGPU::S_AND_B64:
7847 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_AND_B32, MDT);
7848 Inst.eraseFromParent();
7849 return;
7850
7851 case AMDGPU::S_OR_B64:
7852 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_OR_B32, MDT);
7853 Inst.eraseFromParent();
7854 return;
7855
7856 case AMDGPU::S_XOR_B64:
7857 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XOR_B32, MDT);
7858 Inst.eraseFromParent();
7859 return;
7860
7861 case AMDGPU::S_NAND_B64:
7862 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NAND_B32, MDT);
7863 Inst.eraseFromParent();
7864 return;
7865
7866 case AMDGPU::S_NOR_B64:
7867 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NOR_B32, MDT);
7868 Inst.eraseFromParent();
7869 return;
7870
7871 case AMDGPU::S_XNOR_B64:
7872 if (ST.hasDLInsts())
7873 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XNOR_B32, MDT);
7874 else
7875 splitScalar64BitXnor(Worklist, Inst, MDT);
7876 Inst.eraseFromParent();
7877 return;
7878
7879 case AMDGPU::S_ANDN2_B64:
7880 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ANDN2_B32, MDT);
7881 Inst.eraseFromParent();
7882 return;
7883
7884 case AMDGPU::S_ORN2_B64:
7885 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ORN2_B32, MDT);
7886 Inst.eraseFromParent();
7887 return;
7888
7889 case AMDGPU::S_BREV_B64:
7890 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::S_BREV_B32, true);
7891 Inst.eraseFromParent();
7892 return;
7893
7894 case AMDGPU::S_NOT_B64:
7895 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::S_NOT_B32);
7896 Inst.eraseFromParent();
7897 return;
7898
7899 case AMDGPU::S_BCNT1_I32_B64:
7900 splitScalar64BitBCNT(Worklist, Inst);
7901 Inst.eraseFromParent();
7902 return;
7903
7904 case AMDGPU::S_BFE_I64:
7905 splitScalar64BitBFE(Worklist, Inst);
7906 Inst.eraseFromParent();
7907 return;
7908
7909 case AMDGPU::S_FLBIT_I32_B64:
7910 splitScalar64BitCountOp(Worklist, Inst, AMDGPU::V_FFBH_U32_e32);
7911 Inst.eraseFromParent();
7912 return;
7913 case AMDGPU::S_FF1_I32_B64:
7914 splitScalar64BitCountOp(Worklist, Inst, AMDGPU::V_FFBL_B32_e32);
7915 Inst.eraseFromParent();
7916 return;
7917
7918 case AMDGPU::S_LSHL_B32:
7919 if (ST.hasOnlyRevVALUShifts()) {
7920 NewOpcode = AMDGPU::V_LSHLREV_B32_e64;
7921 swapOperands(Inst);
7922 }
7923 break;
7924 case AMDGPU::S_ASHR_I32:
7925 if (ST.hasOnlyRevVALUShifts()) {
7926 NewOpcode = AMDGPU::V_ASHRREV_I32_e64;
7927 swapOperands(Inst);
7928 }
7929 break;
7930 case AMDGPU::S_LSHR_B32:
7931 if (ST.hasOnlyRevVALUShifts()) {
7932 NewOpcode = AMDGPU::V_LSHRREV_B32_e64;
7933 swapOperands(Inst);
7934 }
7935 break;
7936 case AMDGPU::S_LSHL_B64:
7937 if (ST.hasOnlyRevVALUShifts()) {
7938 NewOpcode = ST.getGeneration() >= AMDGPUSubtarget::GFX12
7939 ? AMDGPU::V_LSHLREV_B64_pseudo_e64
7940 : AMDGPU::V_LSHLREV_B64_e64;
7941 swapOperands(Inst);
7942 }
7943 break;
7944 case AMDGPU::S_ASHR_I64:
7945 if (ST.hasOnlyRevVALUShifts()) {
7946 NewOpcode = AMDGPU::V_ASHRREV_I64_e64;
7947 swapOperands(Inst);
7948 }
7949 break;
7950 case AMDGPU::S_LSHR_B64:
7951 if (ST.hasOnlyRevVALUShifts()) {
7952 NewOpcode = AMDGPU::V_LSHRREV_B64_e64;
7953 swapOperands(Inst);
7954 }
7955 break;
7956
7957 case AMDGPU::S_ABS_I32:
7958 lowerScalarAbs(Worklist, Inst);
7959 Inst.eraseFromParent();
7960 return;
7961
7962 case AMDGPU::S_ABSDIFF_I32:
7963 lowerScalarAbsDiff(Worklist, Inst);
7964 Inst.eraseFromParent();
7965 return;
7966
7967 case AMDGPU::S_CBRANCH_SCC0:
7968 case AMDGPU::S_CBRANCH_SCC1: {
7969 // Clear unused bits of vcc
7970 Register CondReg = Inst.getOperand(1).getReg();
7971 bool IsSCC = CondReg == AMDGPU::SCC;
7973 BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(LMC.AndOpc), LMC.VccReg)
7974 .addReg(LMC.ExecReg)
7975 .addReg(IsSCC ? LMC.VccReg : CondReg);
7976 Inst.removeOperand(1);
7977 } break;
7978
7979 case AMDGPU::S_BFE_U64:
7980 case AMDGPU::S_BFM_B64:
7981 llvm_unreachable("Moving this op to VALU not implemented");
7982
7983 case AMDGPU::S_PACK_LL_B32_B16:
7984 case AMDGPU::S_PACK_LH_B32_B16:
7985 case AMDGPU::S_PACK_HL_B32_B16:
7986 case AMDGPU::S_PACK_HH_B32_B16:
7987 movePackToVALU(Worklist, MRI, Inst);
7988 Inst.eraseFromParent();
7989 return;
7990
7991 case AMDGPU::S_XNOR_B32:
7992 lowerScalarXnor(Worklist, Inst);
7993 Inst.eraseFromParent();
7994 return;
7995
7996 case AMDGPU::S_NAND_B32:
7997 splitScalarNotBinop(Worklist, Inst, AMDGPU::S_AND_B32);
7998 Inst.eraseFromParent();
7999 return;
8000
8001 case AMDGPU::S_NOR_B32:
8002 splitScalarNotBinop(Worklist, Inst, AMDGPU::S_OR_B32);
8003 Inst.eraseFromParent();
8004 return;
8005
8006 case AMDGPU::S_ANDN2_B32:
8007 splitScalarBinOpN2(Worklist, Inst, AMDGPU::S_AND_B32);
8008 Inst.eraseFromParent();
8009 return;
8010
8011 case AMDGPU::S_ORN2_B32:
8012 splitScalarBinOpN2(Worklist, Inst, AMDGPU::S_OR_B32);
8013 Inst.eraseFromParent();
8014 return;
8015
8016 // TODO: remove as soon as everything is ready
8017 // to replace VGPR to SGPR copy with V_READFIRSTLANEs.
8018 // S_ADD/SUB_CO_PSEUDO as well as S_UADDO/USUBO_PSEUDO
8019 // can only be selected from the uniform SDNode.
8020 case AMDGPU::S_ADD_CO_PSEUDO:
8021 case AMDGPU::S_SUB_CO_PSEUDO: {
8022 unsigned Opc = (Inst.getOpcode() == AMDGPU::S_ADD_CO_PSEUDO)
8023 ? AMDGPU::V_ADDC_U32_e64
8024 : AMDGPU::V_SUBB_U32_e64;
8025 const auto *CarryRC = RI.getWaveMaskRegClass();
8026
8027 Register CarryInReg = Inst.getOperand(4).getReg();
8028 if (!MRI.constrainRegClass(CarryInReg, CarryRC)) {
8029 Register NewCarryReg = MRI.createVirtualRegister(CarryRC);
8030 BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::COPY), NewCarryReg)
8031 .addReg(CarryInReg);
8032 }
8033
8034 Register CarryOutReg = Inst.getOperand(1).getReg();
8035
8036 Register DestReg = MRI.createVirtualRegister(RI.getEquivalentVGPRClass(
8037 MRI.getRegClass(Inst.getOperand(0).getReg())));
8038 MachineInstr *CarryOp =
8039 BuildMI(*MBB, &Inst, Inst.getDebugLoc(), get(Opc), DestReg)
8040 .addReg(CarryOutReg, RegState::Define)
8041 .add(Inst.getOperand(2))
8042 .add(Inst.getOperand(3))
8043 .addReg(CarryInReg)
8044 .addImm(0);
8045 legalizeOperands(*CarryOp);
8046 MRI.replaceRegWith(Inst.getOperand(0).getReg(), DestReg);
8047 addUsersToMoveToVALUWorklist(DestReg, MRI, Worklist);
8048 Inst.eraseFromParent();
8049 }
8050 return;
8051 case AMDGPU::S_UADDO_PSEUDO:
8052 case AMDGPU::S_USUBO_PSEUDO: {
8053 MachineOperand &Dest0 = Inst.getOperand(0);
8054 MachineOperand &Dest1 = Inst.getOperand(1);
8055 MachineOperand &Src0 = Inst.getOperand(2);
8056 MachineOperand &Src1 = Inst.getOperand(3);
8057
8058 unsigned Opc = (Inst.getOpcode() == AMDGPU::S_UADDO_PSEUDO)
8059 ? AMDGPU::V_ADD_CO_U32_e64
8060 : AMDGPU::V_SUB_CO_U32_e64;
8061 const TargetRegisterClass *NewRC =
8062 RI.getEquivalentVGPRClass(MRI.getRegClass(Dest0.getReg()));
8063 Register DestReg = MRI.createVirtualRegister(NewRC);
8064 MachineInstr *NewInstr = BuildMI(*MBB, &Inst, DL, get(Opc), DestReg)
8065 .addReg(Dest1.getReg(), RegState::Define)
8066 .add(Src0)
8067 .add(Src1)
8068 .addImm(0); // clamp bit
8069
8070 legalizeOperands(*NewInstr, MDT);
8071 MRI.replaceRegWith(Dest0.getReg(), DestReg);
8072 addUsersToMoveToVALUWorklist(DestReg, MRI, Worklist);
8073 Inst.eraseFromParent();
8074 }
8075 return;
8076 case AMDGPU::S_LSHL1_ADD_U32:
8077 case AMDGPU::S_LSHL2_ADD_U32:
8078 case AMDGPU::S_LSHL3_ADD_U32:
8079 case AMDGPU::S_LSHL4_ADD_U32: {
8080 MachineOperand &Dest = Inst.getOperand(0);
8081 MachineOperand &Src0 = Inst.getOperand(1);
8082 MachineOperand &Src1 = Inst.getOperand(2);
8083 unsigned ShiftAmt = (Opcode == AMDGPU::S_LSHL1_ADD_U32 ? 1
8084 : Opcode == AMDGPU::S_LSHL2_ADD_U32 ? 2
8085 : Opcode == AMDGPU::S_LSHL3_ADD_U32 ? 3
8086 : 4);
8087
8088 const TargetRegisterClass *NewRC =
8089 RI.getEquivalentVGPRClass(MRI.getRegClass(Dest.getReg()));
8090 Register DestReg = MRI.createVirtualRegister(NewRC);
8091 MachineInstr *NewInstr =
8092 BuildMI(*MBB, &Inst, DL, get(AMDGPU::V_LSHL_ADD_U32_e64), DestReg)
8093 .add(Src0)
8094 .addImm(ShiftAmt)
8095 .add(Src1);
8096
8097 legalizeOperands(*NewInstr, MDT);
8098 MRI.replaceRegWith(Dest.getReg(), DestReg);
8099 addUsersToMoveToVALUWorklist(DestReg, MRI, Worklist);
8100 Inst.eraseFromParent();
8101 }
8102 return;
8103 case AMDGPU::S_CSELECT_B32:
8104 case AMDGPU::S_CSELECT_B64:
8105 lowerSelect(Worklist, Inst, MDT);
8106 Inst.eraseFromParent();
8107 return;
8108 case AMDGPU::S_CMP_EQ_I32:
8109 case AMDGPU::S_CMP_LG_I32:
8110 case AMDGPU::S_CMP_GT_I32:
8111 case AMDGPU::S_CMP_GE_I32:
8112 case AMDGPU::S_CMP_LT_I32:
8113 case AMDGPU::S_CMP_LE_I32:
8114 case AMDGPU::S_CMP_EQ_U32:
8115 case AMDGPU::S_CMP_LG_U32:
8116 case AMDGPU::S_CMP_GT_U32:
8117 case AMDGPU::S_CMP_GE_U32:
8118 case AMDGPU::S_CMP_LT_U32:
8119 case AMDGPU::S_CMP_LE_U32:
8120 case AMDGPU::S_CMP_EQ_U64:
8121 case AMDGPU::S_CMP_LG_U64:
8122 case AMDGPU::S_CMP_LT_F32:
8123 case AMDGPU::S_CMP_EQ_F32:
8124 case AMDGPU::S_CMP_LE_F32:
8125 case AMDGPU::S_CMP_GT_F32:
8126 case AMDGPU::S_CMP_LG_F32:
8127 case AMDGPU::S_CMP_GE_F32:
8128 case AMDGPU::S_CMP_O_F32:
8129 case AMDGPU::S_CMP_U_F32:
8130 case AMDGPU::S_CMP_NGE_F32:
8131 case AMDGPU::S_CMP_NLG_F32:
8132 case AMDGPU::S_CMP_NGT_F32:
8133 case AMDGPU::S_CMP_NLE_F32:
8134 case AMDGPU::S_CMP_NEQ_F32:
8135 case AMDGPU::S_CMP_NLT_F32: {
8136 Register CondReg = MRI.createVirtualRegister(RI.getWaveMaskRegClass());
8137 auto NewInstr =
8138 BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(NewOpcode), CondReg)
8139 .setMIFlags(Inst.getFlags());
8140 if (AMDGPU::getNamedOperandIdx(NewOpcode, AMDGPU::OpName::src0_modifiers) >=
8141 0) {
8142 NewInstr
8143 .addImm(0) // src0_modifiers
8144 .add(Inst.getOperand(0)) // src0
8145 .addImm(0) // src1_modifiers
8146 .add(Inst.getOperand(1)) // src1
8147 .addImm(0); // clamp
8148 } else {
8149 NewInstr.add(Inst.getOperand(0)).add(Inst.getOperand(1));
8150 }
8151 legalizeOperands(*NewInstr, MDT);
8152 int SCCIdx = Inst.findRegisterDefOperandIdx(AMDGPU::SCC, /*TRI=*/nullptr);
8153 const MachineOperand &SCCOp = Inst.getOperand(SCCIdx);
8154 addSCCDefUsersToVALUWorklist(SCCOp, Inst, Worklist, CondReg);
8155 Inst.eraseFromParent();
8156 return;
8157 }
8158 case AMDGPU::S_CMP_LT_F16:
8159 case AMDGPU::S_CMP_EQ_F16:
8160 case AMDGPU::S_CMP_LE_F16:
8161 case AMDGPU::S_CMP_GT_F16:
8162 case AMDGPU::S_CMP_LG_F16:
8163 case AMDGPU::S_CMP_GE_F16:
8164 case AMDGPU::S_CMP_O_F16:
8165 case AMDGPU::S_CMP_U_F16:
8166 case AMDGPU::S_CMP_NGE_F16:
8167 case AMDGPU::S_CMP_NLG_F16:
8168 case AMDGPU::S_CMP_NGT_F16:
8169 case AMDGPU::S_CMP_NLE_F16:
8170 case AMDGPU::S_CMP_NEQ_F16:
8171 case AMDGPU::S_CMP_NLT_F16: {
8172 Register CondReg = MRI.createVirtualRegister(RI.getWaveMaskRegClass());
8173 auto NewInstr =
8174 BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(NewOpcode), CondReg)
8175 .setMIFlags(Inst.getFlags());
8176 if (AMDGPU::hasNamedOperand(NewOpcode, AMDGPU::OpName::src0_modifiers)) {
8177 NewInstr
8178 .addImm(0) // src0_modifiers
8179 .add(Inst.getOperand(0)) // src0
8180 .addImm(0) // src1_modifiers
8181 .add(Inst.getOperand(1)) // src1
8182 .addImm(0); // clamp
8183 if (AMDGPU::hasNamedOperand(NewOpcode, AMDGPU::OpName::op_sel))
8184 NewInstr.addImm(0); // op_sel0
8185 } else {
8186 NewInstr
8187 .add(Inst.getOperand(0))
8188 .add(Inst.getOperand(1));
8189 }
8190 legalizeOperandsVALUt16(*NewInstr, MRI);
8191 legalizeOperands(*NewInstr, MDT);
8192 int SCCIdx = Inst.findRegisterDefOperandIdx(AMDGPU::SCC, /*TRI=*/nullptr);
8193 const MachineOperand &SCCOp = Inst.getOperand(SCCIdx);
8194 addSCCDefUsersToVALUWorklist(SCCOp, Inst, Worklist, CondReg);
8195 Inst.eraseFromParent();
8196 return;
8197 }
8198 case AMDGPU::S_CVT_HI_F32_F16: {
8199 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8200 Register NewDst = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8201 if (ST.useRealTrue16Insts()) {
8202 BuildMI(*MBB, Inst, DL, get(AMDGPU::COPY), TmpReg)
8203 .add(Inst.getOperand(1));
8204 BuildMI(*MBB, Inst, DL, get(NewOpcode), NewDst)
8205 .addImm(0) // src0_modifiers
8206 .addReg(TmpReg, {}, AMDGPU::hi16)
8207 .addImm(0) // clamp
8208 .addImm(0) // omod
8209 .addImm(0); // op_sel0
8210 } else {
8211 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHRREV_B32_e64), TmpReg)
8212 .addImm(16)
8213 .add(Inst.getOperand(1));
8214 BuildMI(*MBB, Inst, DL, get(NewOpcode), NewDst)
8215 .addImm(0) // src0_modifiers
8216 .addReg(TmpReg)
8217 .addImm(0) // clamp
8218 .addImm(0); // omod
8219 }
8220
8221 MRI.replaceRegWith(Inst.getOperand(0).getReg(), NewDst);
8222 addUsersToMoveToVALUWorklist(NewDst, MRI, Worklist);
8223 Inst.eraseFromParent();
8224 return;
8225 }
8226 case AMDGPU::S_MINIMUM_F32:
8227 case AMDGPU::S_MAXIMUM_F32: {
8228 Register NewDst = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8229 MachineInstr *NewInstr = BuildMI(*MBB, Inst, DL, get(NewOpcode), NewDst)
8230 .addImm(0) // src0_modifiers
8231 .add(Inst.getOperand(1))
8232 .addImm(0) // src1_modifiers
8233 .add(Inst.getOperand(2))
8234 .addImm(0) // clamp
8235 .addImm(0); // omod
8236 MRI.replaceRegWith(Inst.getOperand(0).getReg(), NewDst);
8237
8238 legalizeOperands(*NewInstr, MDT);
8239 addUsersToMoveToVALUWorklist(NewDst, MRI, Worklist);
8240 Inst.eraseFromParent();
8241 return;
8242 }
8243 case AMDGPU::S_MINIMUM_F16:
8244 case AMDGPU::S_MAXIMUM_F16: {
8245 Register NewDst = MRI.createVirtualRegister(ST.useRealTrue16Insts()
8246 ? &AMDGPU::VGPR_16RegClass
8247 : &AMDGPU::VGPR_32RegClass);
8248 MachineInstr *NewInstr = BuildMI(*MBB, Inst, DL, get(NewOpcode), NewDst)
8249 .addImm(0) // src0_modifiers
8250 .add(Inst.getOperand(1))
8251 .addImm(0) // src1_modifiers
8252 .add(Inst.getOperand(2))
8253 .addImm(0) // clamp
8254 .addImm(0) // omod
8255 .addImm(0); // opsel0
8256 MRI.replaceRegWith(Inst.getOperand(0).getReg(), NewDst);
8257 legalizeOperandsVALUt16(*NewInstr, MRI);
8258 legalizeOperands(*NewInstr, MDT);
8259 addUsersToMoveToVALUWorklist(NewDst, MRI, Worklist);
8260 Inst.eraseFromParent();
8261 return;
8262 }
8263 case AMDGPU::V_S_EXP_F16_e64:
8264 case AMDGPU::V_S_LOG_F16_e64:
8265 case AMDGPU::V_S_RCP_F16_e64:
8266 case AMDGPU::V_S_RSQ_F16_e64:
8267 case AMDGPU::V_S_SQRT_F16_e64: {
8268 Register NewDst = MRI.createVirtualRegister(ST.useRealTrue16Insts()
8269 ? &AMDGPU::VGPR_16RegClass
8270 : &AMDGPU::VGPR_32RegClass);
8271 auto NewInstr = BuildMI(*MBB, Inst, DL, get(NewOpcode), NewDst)
8272 .add(Inst.getOperand(1)) // src0_modifiers
8273 .add(Inst.getOperand(2))
8274 .add(Inst.getOperand(3)) // clamp
8275 .add(Inst.getOperand(4)) // omod
8276 .setMIFlags(Inst.getFlags());
8277 if (AMDGPU::hasNamedOperand(NewOpcode, AMDGPU::OpName::op_sel))
8278 NewInstr.addImm(0); // opsel0
8279 MRI.replaceRegWith(Inst.getOperand(0).getReg(), NewDst);
8280 legalizeOperandsVALUt16(*NewInstr, MRI);
8281 legalizeOperands(*NewInstr, MDT);
8282 addUsersToMoveToVALUWorklist(NewDst, MRI, Worklist);
8283 Inst.eraseFromParent();
8284 return;
8285 }
8286 }
8287
8288 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) {
8289 // We cannot move this instruction to the VALU, so we should try to
8290 // legalize its operands instead.
8291 legalizeOperands(Inst, MDT);
8292 return;
8293 }
8294 // Handle converting generic instructions like COPY-to-SGPR into
8295 // COPY-to-VGPR.
8296 if (NewOpcode == Opcode) {
8297 Register DstReg = Inst.getOperand(0).getReg();
8298 const TargetRegisterClass *NewDstRC = getDestEquivalentVGPRClass(Inst);
8299
8300 // If it's a copy of a VGPR to a physical SGPR, insert a V_READFIRSTLANE and
8301 // hope for the best.
8302 if (Inst.isCopy() && DstReg.isPhysical() &&
8303 RI.isVGPR(MRI, Inst.getOperand(1).getReg())) {
8304 Register NewDst = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
8305 BuildMI(*Inst.getParent(), &Inst, Inst.getDebugLoc(),
8306 get(AMDGPU::V_READFIRSTLANE_B32), NewDst)
8307 .add(Inst.getOperand(1));
8308 BuildMI(*Inst.getParent(), &Inst, Inst.getDebugLoc(), get(AMDGPU::COPY),
8309 DstReg)
8310 .addReg(NewDst);
8311
8312 Inst.eraseFromParent();
8313 return;
8314 }
8315
8316 if (Inst.isCopy() && Inst.getOperand(1).getReg().isVirtual()) {
8317 Register NewDstReg = Inst.getOperand(1).getReg();
8318 const TargetRegisterClass *SrcRC = RI.getRegClassForReg(MRI, NewDstReg);
8319 if (const TargetRegisterClass *CommonRC =
8320 RI.getCommonSubClass(NewDstRC, SrcRC)) {
8321 // Instead of creating a copy where src and dst are the same register
8322 // class, we just replace all uses of dst with src. These kinds of
8323 // copies interfere with the heuristics MachineSink uses to decide
8324 // whether or not to split a critical edge. Since the pass assumes
8325 // that copies will end up as machine instructions and not be
8326 // eliminated.
8327 addUsersToMoveToVALUWorklist(DstReg, MRI, Worklist);
8328 MRI.replaceRegWith(DstReg, NewDstReg);
8329 MRI.clearKillFlags(NewDstReg);
8330 Inst.getOperand(0).setReg(DstReg);
8331
8332 if (!MRI.constrainRegClass(NewDstReg, CommonRC))
8333 llvm_unreachable("failed to constrain register");
8334
8335 Inst.eraseFromParent();
8336
8337 for (MachineOperand &UseMO :
8338 make_early_inc_range(MRI.use_operands(NewDstReg))) {
8339 MachineInstr &UseMI = *UseMO.getParent();
8340
8341 // Legalize t16 operands since replaceReg is called after
8342 // addUsersToVALU.
8344
8345 unsigned OpIdx = UseMI.getOperandNo(&UseMO);
8346 if (const TargetRegisterClass *OpRC =
8347 getRegClass(UseMI.getDesc(), OpIdx))
8348 MRI.constrainRegClass(NewDstReg, OpRC);
8349 }
8350
8351 return;
8352 }
8353 }
8354
8355 // If this is a v2s copy between 16bit and 32bit reg,
8356 // replace vgpr copy to reg_sequence/extract_subreg
8357 // This can be remove after we have sgpr16 in place
8358 if (ST.useRealTrue16Insts() && Inst.isCopy() &&
8359 Inst.getOperand(1).getReg().isVirtual() &&
8360 RI.isVGPR(MRI, Inst.getOperand(1).getReg())) {
8361 const TargetRegisterClass *SrcRegRC = getOpRegClass(Inst, 1);
8362 if (RI.getMatchingSuperRegClass(NewDstRC, SrcRegRC, AMDGPU::lo16)) {
8363 Register NewDstReg = MRI.createVirtualRegister(NewDstRC);
8364 Register Undef = MRI.createVirtualRegister(&AMDGPU::VGPR_16RegClass);
8365 BuildMI(*Inst.getParent(), &Inst, Inst.getDebugLoc(),
8366 get(AMDGPU::IMPLICIT_DEF), Undef);
8367 BuildMI(*Inst.getParent(), &Inst, Inst.getDebugLoc(),
8368 get(AMDGPU::REG_SEQUENCE), NewDstReg)
8369 .addReg(Inst.getOperand(1).getReg())
8370 .addImm(AMDGPU::lo16)
8371 .addReg(Undef)
8372 .addImm(AMDGPU::hi16);
8373 Inst.eraseFromParent();
8374 MRI.replaceRegWith(DstReg, NewDstReg);
8375 addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist);
8376 return;
8377 } else if (RI.getMatchingSuperRegClass(SrcRegRC, NewDstRC,
8378 AMDGPU::lo16)) {
8379 Inst.getOperand(1).setSubReg(AMDGPU::lo16);
8380 Register NewDstReg = MRI.createVirtualRegister(NewDstRC);
8381 MRI.replaceRegWith(DstReg, NewDstReg);
8382 addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist);
8383 return;
8384 }
8385 }
8386
8387 Register NewDstReg = MRI.createVirtualRegister(NewDstRC);
8388 MRI.replaceRegWith(DstReg, NewDstReg);
8389 legalizeOperands(Inst, MDT);
8390 addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist);
8391 return;
8392 }
8393
8394 // Use the new VALU Opcode.
8395 auto NewInstr = BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(NewOpcode))
8396 .setMIFlags(Inst.getFlags());
8397 if (isVOP3(NewOpcode) && !isVOP3(Opcode)) {
8398 // Intersperse VOP3 modifiers among the SALU operands.
8399 NewInstr->addOperand(Inst.getOperand(0));
8400 if (AMDGPU::getNamedOperandIdx(NewOpcode,
8401 AMDGPU::OpName::src0_modifiers) >= 0)
8402 NewInstr.addImm(0);
8403 if (AMDGPU::hasNamedOperand(NewOpcode, AMDGPU::OpName::src0)) {
8404 const MachineOperand &Src = Inst.getOperand(1);
8405 NewInstr->addOperand(Src);
8406 }
8407
8408 if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) {
8409 // We are converting these to a BFE, so we need to add the missing
8410 // operands for the size and offset.
8411 unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16;
8412 NewInstr.addImm(0);
8413 NewInstr.addImm(Size);
8414 } else if (Opcode == AMDGPU::S_BCNT1_I32_B32) {
8415 // The VALU version adds the second operand to the result, so insert an
8416 // extra 0 operand.
8417 NewInstr.addImm(0);
8418 } else if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) {
8419 const MachineOperand &OffsetWidthOp = Inst.getOperand(2);
8420 // If we need to move this to VGPRs, we need to unpack the second
8421 // operand back into the 2 separate ones for bit offset and width.
8422 assert(OffsetWidthOp.isImm() &&
8423 "Scalar BFE is only implemented for constant width and offset");
8424 uint32_t Imm = OffsetWidthOp.getImm();
8425
8426 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0].
8427 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16].
8428 NewInstr.addImm(Offset);
8429 NewInstr.addImm(BitWidth);
8430 } else {
8431 if (AMDGPU::getNamedOperandIdx(NewOpcode,
8432 AMDGPU::OpName::src1_modifiers) >= 0)
8433 NewInstr.addImm(0);
8434 if (AMDGPU::getNamedOperandIdx(NewOpcode, AMDGPU::OpName::src1) >= 0)
8435 NewInstr->addOperand(Inst.getOperand(2));
8436 if (AMDGPU::getNamedOperandIdx(NewOpcode,
8437 AMDGPU::OpName::src2_modifiers) >= 0)
8438 NewInstr.addImm(0);
8439 if (AMDGPU::getNamedOperandIdx(NewOpcode, AMDGPU::OpName::src2) >= 0)
8440 NewInstr->addOperand(Inst.getOperand(3));
8441 if (AMDGPU::getNamedOperandIdx(NewOpcode, AMDGPU::OpName::clamp) >= 0)
8442 NewInstr.addImm(0);
8443 if (AMDGPU::getNamedOperandIdx(NewOpcode, AMDGPU::OpName::omod) >= 0)
8444 NewInstr.addImm(0);
8445 if (AMDGPU::getNamedOperandIdx(NewOpcode, AMDGPU::OpName::op_sel) >= 0)
8446 NewInstr.addImm(0);
8447 }
8448 } else {
8449 // Just copy the SALU operands.
8450 for (const MachineOperand &Op : Inst.explicit_operands())
8451 NewInstr->addOperand(Op);
8452 }
8453
8454 // Remove any references to SCC. Vector instructions can't read from it, and
8455 // We're just about to add the implicit use / defs of VCC, and we don't want
8456 // both.
8457 for (MachineOperand &Op : Inst.implicit_operands()) {
8458 if (Op.getReg() == AMDGPU::SCC) {
8459 // Only propagate through live-def of SCC.
8460 if (Op.isDef() && !Op.isDead())
8461 addSCCDefUsersToVALUWorklist(Op, Inst, Worklist);
8462 if (Op.isUse())
8463 addSCCDefsToVALUWorklist(NewInstr, Worklist);
8464 }
8465 }
8466 Inst.eraseFromParent();
8467 Register NewDstReg;
8468 if (NewInstr->getOperand(0).isReg() && NewInstr->getOperand(0).isDef()) {
8469 Register DstReg = NewInstr->getOperand(0).getReg();
8470 assert(DstReg.isVirtual());
8471 // Update the destination register class.
8472 const TargetRegisterClass *NewDstRC = getDestEquivalentVGPRClass(*NewInstr);
8473 assert(NewDstRC);
8474 NewDstReg = MRI.createVirtualRegister(NewDstRC);
8475 MRI.replaceRegWith(DstReg, NewDstReg);
8476 }
8477 fixImplicitOperands(*NewInstr);
8478
8479 legalizeOperandsVALUt16(*NewInstr, MRI);
8480
8481 // Legalize the operands
8482 legalizeOperands(*NewInstr, MDT);
8483 if (NewDstReg)
8484 addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist);
8485}
8486
8487// Add/sub require special handling to deal with carry outs.
8488std::pair<bool, MachineBasicBlock *>
8489SIInstrInfo::moveScalarAddSub(SIInstrWorklist &Worklist, MachineInstr &Inst,
8490 MachineDominatorTree *MDT) const {
8491 if (ST.hasAddNoCarryInsts()) {
8492 // Assume there is no user of scc since we don't select this in that case.
8493 // Since scc isn't used, it doesn't really matter if the i32 or u32 variant
8494 // is used.
8495
8496 MachineBasicBlock &MBB = *Inst.getParent();
8497 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
8498
8499 Register OldDstReg = Inst.getOperand(0).getReg();
8500 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8501
8502 unsigned Opc = Inst.getOpcode();
8503 assert(Opc == AMDGPU::S_ADD_I32 || Opc == AMDGPU::S_SUB_I32);
8504
8505 unsigned NewOpc = Opc == AMDGPU::S_ADD_I32 ?
8506 AMDGPU::V_ADD_U32_e64 : AMDGPU::V_SUB_U32_e64;
8507
8508 assert(Inst.getOperand(3).getReg() == AMDGPU::SCC);
8509 Inst.removeOperand(3);
8510
8511 Inst.setDesc(get(NewOpc));
8512 Inst.addOperand(MachineOperand::CreateImm(0)); // clamp bit
8513 Inst.addImplicitDefUseOperands(*MBB.getParent());
8514 MRI.replaceRegWith(OldDstReg, ResultReg);
8515 MachineBasicBlock *NewBB = legalizeOperands(Inst, MDT);
8516
8517 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
8518 return std::pair(true, NewBB);
8519 }
8520
8521 return std::pair(false, nullptr);
8522}
8523
8524void SIInstrInfo::lowerSelect(SIInstrWorklist &Worklist, MachineInstr &Inst,
8525 MachineDominatorTree *MDT) const {
8526
8527 MachineBasicBlock &MBB = *Inst.getParent();
8528 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
8529 MachineBasicBlock::iterator MII = Inst;
8530 const DebugLoc &DL = Inst.getDebugLoc();
8531
8532 MachineOperand &Dest = Inst.getOperand(0);
8533 MachineOperand &Src0 = Inst.getOperand(1);
8534 MachineOperand &Src1 = Inst.getOperand(2);
8535 MachineOperand &Cond = Inst.getOperand(3);
8536
8537 Register CondReg = Cond.getReg();
8538 bool IsSCC = (CondReg == AMDGPU::SCC);
8539
8540 // If this is a trivial select where the condition is effectively not SCC
8541 // (CondReg is a source of copy to SCC), then the select is semantically
8542 // equivalent to copying CondReg. Hence, there is no need to create
8543 // V_CNDMASK, we can just use that and bail out.
8544 if (!IsSCC && Src0.isImm() && (Src0.getImm() == -1) && Src1.isImm() &&
8545 (Src1.getImm() == 0)) {
8546 MRI.replaceRegWith(Dest.getReg(), CondReg);
8547 return;
8548 }
8549
8550 Register NewCondReg = CondReg;
8551 if (IsSCC) {
8552 const TargetRegisterClass *TC = RI.getWaveMaskRegClass();
8553 NewCondReg = MRI.createVirtualRegister(TC);
8554
8555 // Now look for the closest SCC def if it is a copy
8556 // replacing the CondReg with the COPY source register
8557 bool CopyFound = false;
8558 for (MachineInstr &CandI :
8560 Inst.getParent()->rend())) {
8561 if (CandI.findRegisterDefOperandIdx(AMDGPU::SCC, &RI, false, false) !=
8562 -1) {
8563 if (CandI.isCopy() && CandI.getOperand(0).getReg() == AMDGPU::SCC) {
8564 BuildMI(MBB, MII, DL, get(AMDGPU::COPY), NewCondReg)
8565 .addReg(CandI.getOperand(1).getReg());
8566 CopyFound = true;
8567 }
8568 break;
8569 }
8570 }
8571 if (!CopyFound) {
8572 // SCC def is not a copy
8573 // Insert a trivial select instead of creating a copy, because a copy from
8574 // SCC would semantically mean just copying a single bit, but we may need
8575 // the result to be a vector condition mask that needs preserving.
8576 unsigned Opcode =
8577 ST.isWave64() ? AMDGPU::S_CSELECT_B64 : AMDGPU::S_CSELECT_B32;
8578 auto NewSelect =
8579 BuildMI(MBB, MII, DL, get(Opcode), NewCondReg).addImm(-1).addImm(0);
8580 NewSelect->getOperand(3).setIsUndef(Cond.isUndef());
8581 }
8582 }
8583
8584 Register NewDestReg = MRI.createVirtualRegister(
8585 RI.getEquivalentVGPRClass(MRI.getRegClass(Dest.getReg())));
8586 MachineInstr *NewInst;
8587 if (Inst.getOpcode() == AMDGPU::S_CSELECT_B32) {
8588 NewInst = BuildMI(MBB, MII, DL, get(AMDGPU::V_CNDMASK_B32_e64), NewDestReg)
8589 .addImm(0)
8590 .add(Src1) // False
8591 .addImm(0)
8592 .add(Src0) // True
8593 .addReg(NewCondReg);
8594 } else {
8595 NewInst =
8596 BuildMI(MBB, MII, DL, get(AMDGPU::V_CNDMASK_B64_PSEUDO), NewDestReg)
8597 .add(Src1) // False
8598 .add(Src0) // True
8599 .addReg(NewCondReg);
8600 }
8601 MRI.replaceRegWith(Dest.getReg(), NewDestReg);
8602 legalizeOperands(*NewInst, MDT);
8603 addUsersToMoveToVALUWorklist(NewDestReg, MRI, Worklist);
8604}
8605
8606void SIInstrInfo::lowerScalarAbs(SIInstrWorklist &Worklist,
8607 MachineInstr &Inst) const {
8608 MachineBasicBlock &MBB = *Inst.getParent();
8609 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
8610 MachineBasicBlock::iterator MII = Inst;
8611 const DebugLoc &DL = Inst.getDebugLoc();
8612
8613 MachineOperand &Dest = Inst.getOperand(0);
8614 MachineOperand &Src = Inst.getOperand(1);
8615 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8616 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8617
8618 unsigned SubOp = ST.hasAddNoCarryInsts() ? AMDGPU::V_SUB_U32_e32
8619 : AMDGPU::V_SUB_CO_U32_e32;
8620
8621 BuildMI(MBB, MII, DL, get(SubOp), TmpReg)
8622 .addImm(0)
8623 .addReg(Src.getReg());
8624
8625 BuildMI(MBB, MII, DL, get(AMDGPU::V_MAX_I32_e64), ResultReg)
8626 .addReg(Src.getReg())
8627 .addReg(TmpReg);
8628
8629 MRI.replaceRegWith(Dest.getReg(), ResultReg);
8630 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
8631}
8632
8633void SIInstrInfo::lowerScalarAbsDiff(SIInstrWorklist &Worklist,
8634 MachineInstr &Inst) const {
8635 MachineBasicBlock &MBB = *Inst.getParent();
8636 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
8637 MachineBasicBlock::iterator MII = Inst;
8638 const DebugLoc &DL = Inst.getDebugLoc();
8639
8640 MachineOperand &Dest = Inst.getOperand(0);
8641 MachineOperand &Src1 = Inst.getOperand(1);
8642 MachineOperand &Src2 = Inst.getOperand(2);
8643 Register SubResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8644 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8645 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8646
8647 unsigned SubOp = ST.hasAddNoCarryInsts() ? AMDGPU::V_SUB_U32_e32
8648 : AMDGPU::V_SUB_CO_U32_e32;
8649
8650 BuildMI(MBB, MII, DL, get(SubOp), SubResultReg)
8651 .addReg(Src1.getReg())
8652 .addReg(Src2.getReg());
8653
8654 BuildMI(MBB, MII, DL, get(SubOp), TmpReg).addImm(0).addReg(SubResultReg);
8655
8656 BuildMI(MBB, MII, DL, get(AMDGPU::V_MAX_I32_e64), ResultReg)
8657 .addReg(SubResultReg)
8658 .addReg(TmpReg);
8659
8660 MRI.replaceRegWith(Dest.getReg(), ResultReg);
8661 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
8662}
8663
8664void SIInstrInfo::lowerScalarXnor(SIInstrWorklist &Worklist,
8665 MachineInstr &Inst) const {
8666 MachineBasicBlock &MBB = *Inst.getParent();
8667 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
8668 MachineBasicBlock::iterator MII = Inst;
8669 const DebugLoc &DL = Inst.getDebugLoc();
8670
8671 MachineOperand &Dest = Inst.getOperand(0);
8672 MachineOperand &Src0 = Inst.getOperand(1);
8673 MachineOperand &Src1 = Inst.getOperand(2);
8674
8675 if (ST.hasDLInsts()) {
8676 Register NewDest = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8677 legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src0, MRI, DL);
8678 legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src1, MRI, DL);
8679
8680 BuildMI(MBB, MII, DL, get(AMDGPU::V_XNOR_B32_e64), NewDest)
8681 .add(Src0)
8682 .add(Src1);
8683
8684 MRI.replaceRegWith(Dest.getReg(), NewDest);
8685 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist);
8686 } else {
8687 // Using the identity !(x ^ y) == (!x ^ y) == (x ^ !y), we can
8688 // invert either source and then perform the XOR. If either source is a
8689 // scalar register, then we can leave the inversion on the scalar unit to
8690 // achieve a better distribution of scalar and vector instructions.
8691 bool Src0IsSGPR = Src0.isReg() &&
8692 RI.isSGPRClass(MRI.getRegClass(Src0.getReg()));
8693 bool Src1IsSGPR = Src1.isReg() &&
8694 RI.isSGPRClass(MRI.getRegClass(Src1.getReg()));
8695 MachineInstr *Xor;
8696 Register Temp = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
8697 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
8698
8699 // Build a pair of scalar instructions and add them to the work list.
8700 // The next iteration over the work list will lower these to the vector
8701 // unit as necessary.
8702 if (Src0IsSGPR) {
8703 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Temp).add(Src0);
8704 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), NewDest)
8705 .addReg(Temp)
8706 .add(Src1);
8707 } else if (Src1IsSGPR) {
8708 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Temp).add(Src1);
8709 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), NewDest)
8710 .add(Src0)
8711 .addReg(Temp);
8712 } else {
8713 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), Temp)
8714 .add(Src0)
8715 .add(Src1);
8716 MachineInstr *Not =
8717 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), NewDest).addReg(Temp);
8718 Worklist.insert(Not);
8719 }
8720
8721 MRI.replaceRegWith(Dest.getReg(), NewDest);
8722
8723 Worklist.insert(Xor);
8724
8725 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist);
8726 }
8727}
8728
8729void SIInstrInfo::splitScalarNotBinop(SIInstrWorklist &Worklist,
8730 MachineInstr &Inst,
8731 unsigned Opcode) const {
8732 MachineBasicBlock &MBB = *Inst.getParent();
8733 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
8734 MachineBasicBlock::iterator MII = Inst;
8735 const DebugLoc &DL = Inst.getDebugLoc();
8736
8737 MachineOperand &Dest = Inst.getOperand(0);
8738 MachineOperand &Src0 = Inst.getOperand(1);
8739 MachineOperand &Src1 = Inst.getOperand(2);
8740
8741 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
8742 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
8743
8744 MachineInstr &Op = *BuildMI(MBB, MII, DL, get(Opcode), Interm)
8745 .add(Src0)
8746 .add(Src1);
8747
8748 MachineInstr &Not = *BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), NewDest)
8749 .addReg(Interm);
8750
8751 Worklist.insert(&Op);
8752 Worklist.insert(&Not);
8753
8754 MRI.replaceRegWith(Dest.getReg(), NewDest);
8755 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist);
8756}
8757
8758void SIInstrInfo::splitScalarBinOpN2(SIInstrWorklist &Worklist,
8759 MachineInstr &Inst,
8760 unsigned Opcode) const {
8761 MachineBasicBlock &MBB = *Inst.getParent();
8762 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
8763 MachineBasicBlock::iterator MII = Inst;
8764 const DebugLoc &DL = Inst.getDebugLoc();
8765
8766 MachineOperand &Dest = Inst.getOperand(0);
8767 MachineOperand &Src0 = Inst.getOperand(1);
8768 MachineOperand &Src1 = Inst.getOperand(2);
8769
8770 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
8771 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
8772
8773 MachineInstr &Not = *BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Interm)
8774 .add(Src1);
8775
8776 MachineInstr &Op = *BuildMI(MBB, MII, DL, get(Opcode), NewDest)
8777 .add(Src0)
8778 .addReg(Interm);
8779
8780 Worklist.insert(&Not);
8781 Worklist.insert(&Op);
8782
8783 MRI.replaceRegWith(Dest.getReg(), NewDest);
8784 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist);
8785}
8786
8787void SIInstrInfo::splitScalar64BitUnaryOp(SIInstrWorklist &Worklist,
8788 MachineInstr &Inst, unsigned Opcode,
8789 bool Swap) const {
8790 MachineBasicBlock &MBB = *Inst.getParent();
8791 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
8792
8793 MachineOperand &Dest = Inst.getOperand(0);
8794 MachineOperand &Src0 = Inst.getOperand(1);
8795 const DebugLoc &DL = Inst.getDebugLoc();
8796
8797 MachineBasicBlock::iterator MII = Inst;
8798
8799 const MCInstrDesc &InstDesc = get(Opcode);
8800 const TargetRegisterClass *Src0RC = Src0.isReg() ?
8801 MRI.getRegClass(Src0.getReg()) :
8802 &AMDGPU::SGPR_32RegClass;
8803
8804 const TargetRegisterClass *Src0SubRC =
8805 RI.getSubRegisterClass(Src0RC, AMDGPU::sub0);
8806
8807 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
8808 AMDGPU::sub0, Src0SubRC);
8809
8810 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
8811 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC);
8812 const TargetRegisterClass *NewDestSubRC =
8813 RI.getSubRegisterClass(NewDestRC, AMDGPU::sub0);
8814
8815 Register DestSub0 = MRI.createVirtualRegister(NewDestSubRC);
8816 MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0).add(SrcReg0Sub0);
8817
8818 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
8819 AMDGPU::sub1, Src0SubRC);
8820
8821 Register DestSub1 = MRI.createVirtualRegister(NewDestSubRC);
8822 MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1).add(SrcReg0Sub1);
8823
8824 if (Swap)
8825 std::swap(DestSub0, DestSub1);
8826
8827 Register FullDestReg = MRI.createVirtualRegister(NewDestRC);
8828 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
8829 .addReg(DestSub0)
8830 .addImm(AMDGPU::sub0)
8831 .addReg(DestSub1)
8832 .addImm(AMDGPU::sub1);
8833
8834 MRI.replaceRegWith(Dest.getReg(), FullDestReg);
8835
8836 Worklist.insert(&LoHalf);
8837 Worklist.insert(&HiHalf);
8838
8839 // We don't need to legalizeOperands here because for a single operand, src0
8840 // will support any kind of input.
8841
8842 // Move all users of this moved value.
8843 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
8844}
8845
8846// There is not a vector equivalent of s_mul_u64. For this reason, we need to
8847// split the s_mul_u64 in 32-bit vector multiplications.
8848void SIInstrInfo::splitScalarSMulU64(SIInstrWorklist &Worklist,
8849 MachineInstr &Inst,
8850 MachineDominatorTree *MDT) const {
8851 MachineBasicBlock &MBB = *Inst.getParent();
8852 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
8853
8854 Register FullDestReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
8855 Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8856 Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8857
8858 MachineOperand &Dest = Inst.getOperand(0);
8859 MachineOperand &Src0 = Inst.getOperand(1);
8860 MachineOperand &Src1 = Inst.getOperand(2);
8861 const DebugLoc &DL = Inst.getDebugLoc();
8862 MachineBasicBlock::iterator MII = Inst;
8863
8864 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0.getReg());
8865 const TargetRegisterClass *Src1RC = MRI.getRegClass(Src1.getReg());
8866 const TargetRegisterClass *Src0SubRC =
8867 RI.getSubRegisterClass(Src0RC, AMDGPU::sub0);
8868 if (RI.isSGPRClass(Src0SubRC))
8869 Src0SubRC = RI.getEquivalentVGPRClass(Src0SubRC);
8870 const TargetRegisterClass *Src1SubRC =
8871 RI.getSubRegisterClass(Src1RC, AMDGPU::sub0);
8872 if (RI.isSGPRClass(Src1SubRC))
8873 Src1SubRC = RI.getEquivalentVGPRClass(Src1SubRC);
8874
8875 // First, we extract the low 32-bit and high 32-bit values from each of the
8876 // operands.
8877 MachineOperand Op0L =
8878 buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, AMDGPU::sub0, Src0SubRC);
8879 MachineOperand Op1L =
8880 buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, AMDGPU::sub0, Src1SubRC);
8881 MachineOperand Op0H =
8882 buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, AMDGPU::sub1, Src0SubRC);
8883 MachineOperand Op1H =
8884 buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, AMDGPU::sub1, Src1SubRC);
8885
8886 // The multilication is done as follows:
8887 //
8888 // Op1H Op1L
8889 // * Op0H Op0L
8890 // --------------------
8891 // Op1H*Op0L Op1L*Op0L
8892 // + Op1H*Op0H Op1L*Op0H
8893 // -----------------------------------------
8894 // (Op1H*Op0L + Op1L*Op0H + carry) Op1L*Op0L
8895 //
8896 // We drop Op1H*Op0H because the result of the multiplication is a 64-bit
8897 // value and that would overflow.
8898 // The low 32-bit value is Op1L*Op0L.
8899 // The high 32-bit value is Op1H*Op0L + Op1L*Op0H + carry (from Op1L*Op0L).
8900
8901 Register Op1L_Op0H_Reg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8902 MachineInstr *Op1L_Op0H =
8903 BuildMI(MBB, MII, DL, get(AMDGPU::V_MUL_LO_U32_e64), Op1L_Op0H_Reg)
8904 .add(Op1L)
8905 .add(Op0H);
8906
8907 Register Op1H_Op0L_Reg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8908 MachineInstr *Op1H_Op0L =
8909 BuildMI(MBB, MII, DL, get(AMDGPU::V_MUL_LO_U32_e64), Op1H_Op0L_Reg)
8910 .add(Op1H)
8911 .add(Op0L);
8912
8913 Register CarryReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8914 MachineInstr *Carry =
8915 BuildMI(MBB, MII, DL, get(AMDGPU::V_MUL_HI_U32_e64), CarryReg)
8916 .add(Op1L)
8917 .add(Op0L);
8918
8919 MachineInstr *LoHalf =
8920 BuildMI(MBB, MII, DL, get(AMDGPU::V_MUL_LO_U32_e64), DestSub0)
8921 .add(Op1L)
8922 .add(Op0L);
8923
8924 Register AddReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8925 MachineInstr *Add = BuildMI(MBB, MII, DL, get(AMDGPU::V_ADD_U32_e32), AddReg)
8926 .addReg(Op1L_Op0H_Reg)
8927 .addReg(Op1H_Op0L_Reg);
8928
8929 MachineInstr *HiHalf =
8930 BuildMI(MBB, MII, DL, get(AMDGPU::V_ADD_U32_e32), DestSub1)
8931 .addReg(AddReg)
8932 .addReg(CarryReg);
8933
8934 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
8935 .addReg(DestSub0)
8936 .addImm(AMDGPU::sub0)
8937 .addReg(DestSub1)
8938 .addImm(AMDGPU::sub1);
8939
8940 MRI.replaceRegWith(Dest.getReg(), FullDestReg);
8941
8942 // Try to legalize the operands in case we need to swap the order to keep it
8943 // valid.
8944 legalizeOperands(*Op1L_Op0H, MDT);
8945 legalizeOperands(*Op1H_Op0L, MDT);
8946 legalizeOperands(*Carry, MDT);
8947 legalizeOperands(*LoHalf, MDT);
8948 legalizeOperands(*Add, MDT);
8949 legalizeOperands(*HiHalf, MDT);
8950
8951 // Move all users of this moved value.
8952 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
8953}
8954
8955// Lower S_MUL_U64_U32_PSEUDO/S_MUL_I64_I32_PSEUDO in two 32-bit vector
8956// multiplications.
8957void SIInstrInfo::splitScalarSMulPseudo(SIInstrWorklist &Worklist,
8958 MachineInstr &Inst,
8959 MachineDominatorTree *MDT) const {
8960 MachineBasicBlock &MBB = *Inst.getParent();
8961 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
8962
8963 Register FullDestReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
8964 Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8965 Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
8966
8967 MachineOperand &Dest = Inst.getOperand(0);
8968 MachineOperand &Src0 = Inst.getOperand(1);
8969 MachineOperand &Src1 = Inst.getOperand(2);
8970 const DebugLoc &DL = Inst.getDebugLoc();
8971 MachineBasicBlock::iterator MII = Inst;
8972
8973 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0.getReg());
8974 const TargetRegisterClass *Src1RC = MRI.getRegClass(Src1.getReg());
8975 const TargetRegisterClass *Src0SubRC =
8976 RI.getSubRegisterClass(Src0RC, AMDGPU::sub0);
8977 if (RI.isSGPRClass(Src0SubRC))
8978 Src0SubRC = RI.getEquivalentVGPRClass(Src0SubRC);
8979 const TargetRegisterClass *Src1SubRC =
8980 RI.getSubRegisterClass(Src1RC, AMDGPU::sub0);
8981 if (RI.isSGPRClass(Src1SubRC))
8982 Src1SubRC = RI.getEquivalentVGPRClass(Src1SubRC);
8983
8984 // First, we extract the low 32-bit and high 32-bit values from each of the
8985 // operands.
8986 MachineOperand Op0L =
8987 buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, AMDGPU::sub0, Src0SubRC);
8988 MachineOperand Op1L =
8989 buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, AMDGPU::sub0, Src1SubRC);
8990
8991 unsigned Opc = Inst.getOpcode();
8992 unsigned NewOpc = Opc == AMDGPU::S_MUL_U64_U32_PSEUDO
8993 ? AMDGPU::V_MUL_HI_U32_e64
8994 : AMDGPU::V_MUL_HI_I32_e64;
8995 MachineInstr *HiHalf =
8996 BuildMI(MBB, MII, DL, get(NewOpc), DestSub1).add(Op1L).add(Op0L);
8997
8998 MachineInstr *LoHalf =
8999 BuildMI(MBB, MII, DL, get(AMDGPU::V_MUL_LO_U32_e64), DestSub0)
9000 .add(Op1L)
9001 .add(Op0L);
9002
9003 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
9004 .addReg(DestSub0)
9005 .addImm(AMDGPU::sub0)
9006 .addReg(DestSub1)
9007 .addImm(AMDGPU::sub1);
9008
9009 MRI.replaceRegWith(Dest.getReg(), FullDestReg);
9010
9011 // Try to legalize the operands in case we need to swap the order to keep it
9012 // valid.
9013 legalizeOperands(*HiHalf, MDT);
9014 legalizeOperands(*LoHalf, MDT);
9015
9016 // Move all users of this moved value.
9017 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
9018}
9019
9020void SIInstrInfo::splitScalar64BitBinaryOp(SIInstrWorklist &Worklist,
9021 MachineInstr &Inst, unsigned Opcode,
9022 MachineDominatorTree *MDT) const {
9023 MachineBasicBlock &MBB = *Inst.getParent();
9024 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
9025
9026 MachineOperand &Dest = Inst.getOperand(0);
9027 MachineOperand &Src0 = Inst.getOperand(1);
9028 MachineOperand &Src1 = Inst.getOperand(2);
9029 const DebugLoc &DL = Inst.getDebugLoc();
9030
9031 MachineBasicBlock::iterator MII = Inst;
9032
9033 const MCInstrDesc &InstDesc = get(Opcode);
9034 const TargetRegisterClass *Src0RC = Src0.isReg() ?
9035 MRI.getRegClass(Src0.getReg()) :
9036 &AMDGPU::SGPR_32RegClass;
9037
9038 const TargetRegisterClass *Src0SubRC =
9039 RI.getSubRegisterClass(Src0RC, AMDGPU::sub0);
9040 const TargetRegisterClass *Src1RC = Src1.isReg() ?
9041 MRI.getRegClass(Src1.getReg()) :
9042 &AMDGPU::SGPR_32RegClass;
9043
9044 const TargetRegisterClass *Src1SubRC =
9045 RI.getSubRegisterClass(Src1RC, AMDGPU::sub0);
9046
9047 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
9048 AMDGPU::sub0, Src0SubRC);
9049 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
9050 AMDGPU::sub0, Src1SubRC);
9051 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
9052 AMDGPU::sub1, Src0SubRC);
9053 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
9054 AMDGPU::sub1, Src1SubRC);
9055
9056 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
9057 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC);
9058 const TargetRegisterClass *NewDestSubRC =
9059 RI.getSubRegisterClass(NewDestRC, AMDGPU::sub0);
9060
9061 Register DestSub0 = MRI.createVirtualRegister(NewDestSubRC);
9062 MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0)
9063 .add(SrcReg0Sub0)
9064 .add(SrcReg1Sub0);
9065
9066 Register DestSub1 = MRI.createVirtualRegister(NewDestSubRC);
9067 MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1)
9068 .add(SrcReg0Sub1)
9069 .add(SrcReg1Sub1);
9070
9071 Register FullDestReg = MRI.createVirtualRegister(NewDestRC);
9072 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
9073 .addReg(DestSub0)
9074 .addImm(AMDGPU::sub0)
9075 .addReg(DestSub1)
9076 .addImm(AMDGPU::sub1);
9077
9078 MRI.replaceRegWith(Dest.getReg(), FullDestReg);
9079
9080 Worklist.insert(&LoHalf);
9081 Worklist.insert(&HiHalf);
9082
9083 // Move all users of this moved value.
9084 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
9085}
9086
9087void SIInstrInfo::splitScalar64BitXnor(SIInstrWorklist &Worklist,
9088 MachineInstr &Inst,
9089 MachineDominatorTree *MDT) const {
9090 MachineBasicBlock &MBB = *Inst.getParent();
9091 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
9092
9093 MachineOperand &Dest = Inst.getOperand(0);
9094 MachineOperand &Src0 = Inst.getOperand(1);
9095 MachineOperand &Src1 = Inst.getOperand(2);
9096 const DebugLoc &DL = Inst.getDebugLoc();
9097
9098 MachineBasicBlock::iterator MII = Inst;
9099
9100 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
9101
9102 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
9103
9104 MachineOperand* Op0;
9105 MachineOperand* Op1;
9106
9107 if (Src0.isReg() && RI.isSGPRReg(MRI, Src0.getReg())) {
9108 Op0 = &Src0;
9109 Op1 = &Src1;
9110 } else {
9111 Op0 = &Src1;
9112 Op1 = &Src0;
9113 }
9114
9115 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B64), Interm)
9116 .add(*Op0);
9117
9118 Register NewDest = MRI.createVirtualRegister(DestRC);
9119
9120 MachineInstr &Xor = *BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B64), NewDest)
9121 .addReg(Interm)
9122 .add(*Op1);
9123
9124 MRI.replaceRegWith(Dest.getReg(), NewDest);
9125
9126 Worklist.insert(&Xor);
9127}
9128
9129void SIInstrInfo::splitScalar64BitBCNT(SIInstrWorklist &Worklist,
9130 MachineInstr &Inst) const {
9131 MachineBasicBlock &MBB = *Inst.getParent();
9132 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
9133
9134 MachineBasicBlock::iterator MII = Inst;
9135 const DebugLoc &DL = Inst.getDebugLoc();
9136
9137 MachineOperand &Dest = Inst.getOperand(0);
9138 MachineOperand &Src = Inst.getOperand(1);
9139
9140 const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e64);
9141 const TargetRegisterClass *SrcRC = Src.isReg() ?
9142 MRI.getRegClass(Src.getReg()) :
9143 &AMDGPU::SGPR_32RegClass;
9144
9145 Register MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
9146 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
9147
9148 const TargetRegisterClass *SrcSubRC =
9149 RI.getSubRegisterClass(SrcRC, AMDGPU::sub0);
9150
9151 MachineOperand SrcRegSub0 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC,
9152 AMDGPU::sub0, SrcSubRC);
9153 MachineOperand SrcRegSub1 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC,
9154 AMDGPU::sub1, SrcSubRC);
9155
9156 BuildMI(MBB, MII, DL, InstDesc, MidReg).add(SrcRegSub0).addImm(0);
9157
9158 BuildMI(MBB, MII, DL, InstDesc, ResultReg).add(SrcRegSub1).addReg(MidReg);
9159
9160 MRI.replaceRegWith(Dest.getReg(), ResultReg);
9161
9162 // We don't need to legalize operands here. src0 for either instruction can be
9163 // an SGPR, and the second input is unused or determined here.
9164 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
9165}
9166
9167void SIInstrInfo::splitScalar64BitBFE(SIInstrWorklist &Worklist,
9168 MachineInstr &Inst) const {
9169 MachineBasicBlock &MBB = *Inst.getParent();
9170 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
9171 MachineBasicBlock::iterator MII = Inst;
9172 const DebugLoc &DL = Inst.getDebugLoc();
9173
9174 MachineOperand &Dest = Inst.getOperand(0);
9175 uint32_t Imm = Inst.getOperand(2).getImm();
9176 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0].
9177 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16].
9178
9179 (void) Offset;
9180
9181 // Only sext_inreg cases handled.
9182 assert(Inst.getOpcode() == AMDGPU::S_BFE_I64 && BitWidth <= 32 &&
9183 Offset == 0 && "Not implemented");
9184
9185 if (BitWidth < 32) {
9186 Register MidRegLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
9187 Register MidRegHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
9188 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
9189
9190 BuildMI(MBB, MII, DL, get(AMDGPU::V_BFE_I32_e64), MidRegLo)
9191 .addReg(Inst.getOperand(1).getReg(), {}, AMDGPU::sub0)
9192 .addImm(0)
9193 .addImm(BitWidth);
9194
9195 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e32), MidRegHi)
9196 .addImm(31)
9197 .addReg(MidRegLo);
9198
9199 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg)
9200 .addReg(MidRegLo)
9201 .addImm(AMDGPU::sub0)
9202 .addReg(MidRegHi)
9203 .addImm(AMDGPU::sub1);
9204
9205 MRI.replaceRegWith(Dest.getReg(), ResultReg);
9206 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
9207 return;
9208 }
9209
9210 MachineOperand &Src = Inst.getOperand(1);
9211 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
9212 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
9213
9214 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e64), TmpReg)
9215 .addImm(31)
9216 .addReg(Src.getReg(), {}, AMDGPU::sub0);
9217
9218 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg)
9219 .addReg(Src.getReg(), {}, AMDGPU::sub0)
9220 .addImm(AMDGPU::sub0)
9221 .addReg(TmpReg)
9222 .addImm(AMDGPU::sub1);
9223
9224 MRI.replaceRegWith(Dest.getReg(), ResultReg);
9225 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
9226}
9227
9228void SIInstrInfo::splitScalar64BitCountOp(SIInstrWorklist &Worklist,
9229 MachineInstr &Inst, unsigned Opcode,
9230 MachineDominatorTree *MDT) const {
9231 // (S_FLBIT_I32_B64 hi:lo) ->
9232 // -> (umin (V_FFBH_U32_e32 hi), (uaddsat (V_FFBH_U32_e32 lo), 32))
9233 // (S_FF1_I32_B64 hi:lo) ->
9234 // ->(umin (uaddsat (V_FFBL_B32_e32 hi), 32) (V_FFBL_B32_e32 lo))
9235
9236 MachineBasicBlock &MBB = *Inst.getParent();
9237 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
9238 MachineBasicBlock::iterator MII = Inst;
9239 const DebugLoc &DL = Inst.getDebugLoc();
9240
9241 MachineOperand &Dest = Inst.getOperand(0);
9242 MachineOperand &Src = Inst.getOperand(1);
9243
9244 const MCInstrDesc &InstDesc = get(Opcode);
9245
9246 bool IsCtlz = Opcode == AMDGPU::V_FFBH_U32_e32;
9247 unsigned OpcodeAdd = ST.hasAddNoCarryInsts() ? AMDGPU::V_ADD_U32_e64
9248 : AMDGPU::V_ADD_CO_U32_e32;
9249
9250 const TargetRegisterClass *SrcRC =
9251 Src.isReg() ? MRI.getRegClass(Src.getReg()) : &AMDGPU::SGPR_32RegClass;
9252 const TargetRegisterClass *SrcSubRC =
9253 RI.getSubRegisterClass(SrcRC, AMDGPU::sub0);
9254
9255 MachineOperand SrcRegSub0 =
9256 buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, AMDGPU::sub0, SrcSubRC);
9257 MachineOperand SrcRegSub1 =
9258 buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, AMDGPU::sub1, SrcSubRC);
9259
9260 Register MidReg1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
9261 Register MidReg2 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
9262 Register MidReg3 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
9263 Register MidReg4 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
9264
9265 BuildMI(MBB, MII, DL, InstDesc, MidReg1).add(SrcRegSub0);
9266
9267 BuildMI(MBB, MII, DL, InstDesc, MidReg2).add(SrcRegSub1);
9268
9269 BuildMI(MBB, MII, DL, get(OpcodeAdd), MidReg3)
9270 .addReg(IsCtlz ? MidReg1 : MidReg2)
9271 .addImm(32)
9272 .addImm(1); // enable clamp
9273
9274 BuildMI(MBB, MII, DL, get(AMDGPU::V_MIN_U32_e64), MidReg4)
9275 .addReg(MidReg3)
9276 .addReg(IsCtlz ? MidReg2 : MidReg1);
9277
9278 MRI.replaceRegWith(Dest.getReg(), MidReg4);
9279
9280 addUsersToMoveToVALUWorklist(MidReg4, MRI, Worklist);
9281}
9282
9283void SIInstrInfo::addUsersToMoveToVALUWorklist(
9284 Register DstReg, MachineRegisterInfo &MRI,
9285 SIInstrWorklist &Worklist) const {
9286 for (MachineOperand &MO : make_early_inc_range(MRI.use_operands(DstReg))) {
9287 MachineInstr &UseMI = *MO.getParent();
9288
9289 unsigned OpNo = 0;
9290
9291 switch (UseMI.getOpcode()) {
9292 case AMDGPU::COPY:
9293 case AMDGPU::WQM:
9294 case AMDGPU::SOFT_WQM:
9295 case AMDGPU::STRICT_WWM:
9296 case AMDGPU::STRICT_WQM:
9297 case AMDGPU::REG_SEQUENCE:
9298 case AMDGPU::PHI:
9299 case AMDGPU::INSERT_SUBREG:
9300 break;
9301 default:
9302 OpNo = MO.getOperandNo();
9303 break;
9304 }
9305
9306 const TargetRegisterClass *OpRC = getOpRegClass(UseMI, OpNo);
9307 MRI.constrainRegClass(DstReg, OpRC);
9308
9309 if (!RI.hasVectorRegisters(OpRC))
9310 Worklist.insert(&UseMI);
9311 else
9312 // Legalization could change user list.
9313 legalizeOperandsVALUt16(UseMI, OpNo, MRI);
9314 }
9315}
9316
9317void SIInstrInfo::movePackToVALU(SIInstrWorklist &Worklist,
9319 MachineInstr &Inst) const {
9320 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
9321 MachineBasicBlock *MBB = Inst.getParent();
9322 MachineOperand &Src0 = Inst.getOperand(1);
9323 MachineOperand &Src1 = Inst.getOperand(2);
9324 const DebugLoc &DL = Inst.getDebugLoc();
9325
9326 if (ST.useRealTrue16Insts()) {
9327 Register SrcReg0, SrcReg1;
9328 if (!Src0.isReg() || !RI.isVGPR(MRI, Src0.getReg())) {
9329 SrcReg0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
9330 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), SrcReg0).add(Src0);
9331 } else {
9332 SrcReg0 = Src0.getReg();
9333 }
9334
9335 if (!Src1.isReg() || !RI.isVGPR(MRI, Src1.getReg())) {
9336 SrcReg1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
9337 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), SrcReg1).add(Src1);
9338 } else {
9339 SrcReg1 = Src1.getReg();
9340 }
9341
9342 bool isSrc0Reg16 = MRI.constrainRegClass(SrcReg0, &AMDGPU::VGPR_16RegClass);
9343 bool isSrc1Reg16 = MRI.constrainRegClass(SrcReg1, &AMDGPU::VGPR_16RegClass);
9344
9345 auto NewMI = BuildMI(*MBB, Inst, DL, get(AMDGPU::REG_SEQUENCE), ResultReg);
9346 switch (Inst.getOpcode()) {
9347 case AMDGPU::S_PACK_LL_B32_B16:
9348 NewMI
9349 .addReg(SrcReg0, {},
9350 isSrc0Reg16 ? AMDGPU::NoSubRegister : AMDGPU::lo16)
9351 .addImm(AMDGPU::lo16)
9352 .addReg(SrcReg1, {},
9353 isSrc1Reg16 ? AMDGPU::NoSubRegister : AMDGPU::lo16)
9354 .addImm(AMDGPU::hi16);
9355 break;
9356 case AMDGPU::S_PACK_LH_B32_B16:
9357 NewMI
9358 .addReg(SrcReg0, {},
9359 isSrc0Reg16 ? AMDGPU::NoSubRegister : AMDGPU::lo16)
9360 .addImm(AMDGPU::lo16)
9361 .addReg(SrcReg1, {}, AMDGPU::hi16)
9362 .addImm(AMDGPU::hi16);
9363 break;
9364 case AMDGPU::S_PACK_HL_B32_B16:
9365 NewMI.addReg(SrcReg0, {}, AMDGPU::hi16)
9366 .addImm(AMDGPU::lo16)
9367 .addReg(SrcReg1, {},
9368 isSrc1Reg16 ? AMDGPU::NoSubRegister : AMDGPU::lo16)
9369 .addImm(AMDGPU::hi16);
9370 break;
9371 case AMDGPU::S_PACK_HH_B32_B16:
9372 NewMI.addReg(SrcReg0, {}, AMDGPU::hi16)
9373 .addImm(AMDGPU::lo16)
9374 .addReg(SrcReg1, {}, AMDGPU::hi16)
9375 .addImm(AMDGPU::hi16);
9376 break;
9377 default:
9378 llvm_unreachable("unhandled s_pack_* instruction");
9379 }
9380
9381 MachineOperand &Dest = Inst.getOperand(0);
9382 MRI.replaceRegWith(Dest.getReg(), ResultReg);
9383 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
9384 return;
9385 }
9386
9387 switch (Inst.getOpcode()) {
9388 case AMDGPU::S_PACK_LL_B32_B16: {
9389 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
9390 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
9391
9392 // FIXME: Can do a lot better if we know the high bits of src0 or src1 are
9393 // 0.
9394 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg)
9395 .addImm(0xffff);
9396
9397 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_B32_e64), TmpReg)
9398 .addReg(ImmReg, RegState::Kill)
9399 .add(Src0);
9400
9401 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHL_OR_B32_e64), ResultReg)
9402 .add(Src1)
9403 .addImm(16)
9404 .addReg(TmpReg, RegState::Kill);
9405 break;
9406 }
9407 case AMDGPU::S_PACK_LH_B32_B16: {
9408 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
9409 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg)
9410 .addImm(0xffff);
9411 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_BFI_B32_e64), ResultReg)
9412 .addReg(ImmReg, RegState::Kill)
9413 .add(Src0)
9414 .add(Src1);
9415 break;
9416 }
9417 case AMDGPU::S_PACK_HL_B32_B16: {
9418 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
9419 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHRREV_B32_e64), TmpReg)
9420 .addImm(16)
9421 .add(Src0);
9422 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHL_OR_B32_e64), ResultReg)
9423 .add(Src1)
9424 .addImm(16)
9425 .addReg(TmpReg, RegState::Kill);
9426 break;
9427 }
9428 case AMDGPU::S_PACK_HH_B32_B16: {
9429 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
9430 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
9431 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHRREV_B32_e64), TmpReg)
9432 .addImm(16)
9433 .add(Src0);
9434 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg)
9435 .addImm(0xffff0000);
9436 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_OR_B32_e64), ResultReg)
9437 .add(Src1)
9438 .addReg(ImmReg, RegState::Kill)
9439 .addReg(TmpReg, RegState::Kill);
9440 break;
9441 }
9442 default:
9443 llvm_unreachable("unhandled s_pack_* instruction");
9444 }
9445
9446 MachineOperand &Dest = Inst.getOperand(0);
9447 MRI.replaceRegWith(Dest.getReg(), ResultReg);
9448 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
9449}
9450
9451void SIInstrInfo::addSCCDefUsersToVALUWorklist(const MachineOperand &Op,
9452 MachineInstr &SCCDefInst,
9453 SIInstrWorklist &Worklist,
9454 Register NewCond) const {
9455
9456 // Ensure that def inst defines SCC, which is still live.
9457 assert(Op.isReg() && Op.getReg() == AMDGPU::SCC && Op.isDef() &&
9458 !Op.isDead() && Op.getParent() == &SCCDefInst);
9459 SmallVector<MachineInstr *, 4> CopyToDelete;
9460 // This assumes that all the users of SCC are in the same block
9461 // as the SCC def.
9462 for (MachineInstr &MI : // Skip the def inst itself.
9463 make_range(std::next(MachineBasicBlock::iterator(SCCDefInst)),
9464 SCCDefInst.getParent()->end())) {
9465 // Check if SCC is used first.
9466 int SCCIdx = MI.findRegisterUseOperandIdx(AMDGPU::SCC, &RI, false);
9467 if (SCCIdx != -1) {
9468 if (MI.isCopy()) {
9469 MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
9470 Register DestReg = MI.getOperand(0).getReg();
9471
9472 MRI.replaceRegWith(DestReg, NewCond);
9473 CopyToDelete.push_back(&MI);
9474 } else {
9475
9476 if (NewCond.isValid())
9477 MI.getOperand(SCCIdx).setReg(NewCond);
9478
9479 Worklist.insert(&MI);
9480 }
9481 }
9482 // Exit if we find another SCC def.
9483 if (MI.findRegisterDefOperandIdx(AMDGPU::SCC, &RI, false, false) != -1)
9484 break;
9485 }
9486 for (auto &Copy : CopyToDelete)
9487 Copy->eraseFromParent();
9488}
9489
9490// Instructions that use SCC may be converted to VALU instructions. When that
9491// happens, the SCC register is changed to VCC_LO. The instruction that defines
9492// SCC must be changed to an instruction that defines VCC. This function makes
9493// sure that the instruction that defines SCC is added to the moveToVALU
9494// worklist.
9495void SIInstrInfo::addSCCDefsToVALUWorklist(MachineInstr *SCCUseInst,
9496 SIInstrWorklist &Worklist) const {
9497 // Look for a preceding instruction that either defines VCC or SCC. If VCC
9498 // then there is nothing to do because the defining instruction has been
9499 // converted to a VALU already. If SCC then that instruction needs to be
9500 // converted to a VALU.
9501 for (MachineInstr &MI :
9502 make_range(std::next(MachineBasicBlock::reverse_iterator(SCCUseInst)),
9503 SCCUseInst->getParent()->rend())) {
9504 if (MI.modifiesRegister(AMDGPU::VCC, &RI))
9505 break;
9506 if (MI.definesRegister(AMDGPU::SCC, &RI)) {
9507 Worklist.insert(&MI);
9508 break;
9509 }
9510 }
9511}
9512
9513const TargetRegisterClass *SIInstrInfo::getDestEquivalentVGPRClass(
9514 const MachineInstr &Inst) const {
9515 const TargetRegisterClass *NewDstRC = getOpRegClass(Inst, 0);
9516
9517 switch (Inst.getOpcode()) {
9518 // For target instructions, getOpRegClass just returns the virtual register
9519 // class associated with the operand, so we need to find an equivalent VGPR
9520 // register class in order to move the instruction to the VALU.
9521 case AMDGPU::COPY:
9522 case AMDGPU::PHI:
9523 case AMDGPU::REG_SEQUENCE:
9524 case AMDGPU::INSERT_SUBREG:
9525 case AMDGPU::WQM:
9526 case AMDGPU::SOFT_WQM:
9527 case AMDGPU::STRICT_WWM:
9528 case AMDGPU::STRICT_WQM: {
9529 const TargetRegisterClass *SrcRC = getOpRegClass(Inst, 1);
9530 if (RI.isAGPRClass(SrcRC)) {
9531 if (RI.isAGPRClass(NewDstRC))
9532 return nullptr;
9533
9534 switch (Inst.getOpcode()) {
9535 case AMDGPU::PHI:
9536 case AMDGPU::REG_SEQUENCE:
9537 case AMDGPU::INSERT_SUBREG:
9538 NewDstRC = RI.getEquivalentAGPRClass(NewDstRC);
9539 break;
9540 default:
9541 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC);
9542 }
9543
9544 if (!NewDstRC)
9545 return nullptr;
9546 } else {
9547 if (RI.isVGPRClass(NewDstRC) || NewDstRC == &AMDGPU::VReg_1RegClass)
9548 return nullptr;
9549
9550 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC);
9551 if (!NewDstRC)
9552 return nullptr;
9553 }
9554
9555 return NewDstRC;
9556 }
9557 default:
9558 return NewDstRC;
9559 }
9560}
9561
9562// Find the one SGPR operand we are allowed to use.
9563Register SIInstrInfo::findUsedSGPR(const MachineInstr &MI,
9564 int OpIndices[3]) const {
9565 const MCInstrDesc &Desc = MI.getDesc();
9566
9567 // Find the one SGPR operand we are allowed to use.
9568 //
9569 // First we need to consider the instruction's operand requirements before
9570 // legalizing. Some operands are required to be SGPRs, such as implicit uses
9571 // of VCC, but we are still bound by the constant bus requirement to only use
9572 // one.
9573 //
9574 // If the operand's class is an SGPR, we can never move it.
9575
9576 Register SGPRReg = findImplicitSGPRRead(MI);
9577 if (SGPRReg)
9578 return SGPRReg;
9579
9580 Register UsedSGPRs[3] = {Register()};
9581 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
9582
9583 for (unsigned i = 0; i < 3; ++i) {
9584 int Idx = OpIndices[i];
9585 if (Idx == -1)
9586 break;
9587
9588 const MachineOperand &MO = MI.getOperand(Idx);
9589 if (!MO.isReg())
9590 continue;
9591
9592 // Is this operand statically required to be an SGPR based on the operand
9593 // constraints?
9594 const TargetRegisterClass *OpRC =
9595 RI.getRegClass(getOpRegClassID(Desc.operands()[Idx]));
9596 bool IsRequiredSGPR = RI.isSGPRClass(OpRC);
9597 if (IsRequiredSGPR)
9598 return MO.getReg();
9599
9600 // If this could be a VGPR or an SGPR, Check the dynamic register class.
9601 Register Reg = MO.getReg();
9602 const TargetRegisterClass *RegRC = MRI.getRegClass(Reg);
9603 if (RI.isSGPRClass(RegRC))
9604 UsedSGPRs[i] = Reg;
9605 }
9606
9607 // We don't have a required SGPR operand, so we have a bit more freedom in
9608 // selecting operands to move.
9609
9610 // Try to select the most used SGPR. If an SGPR is equal to one of the
9611 // others, we choose that.
9612 //
9613 // e.g.
9614 // V_FMA_F32 v0, s0, s0, s0 -> No moves
9615 // V_FMA_F32 v0, s0, s1, s0 -> Move s1
9616
9617 // TODO: If some of the operands are 64-bit SGPRs and some 32, we should
9618 // prefer those.
9619
9620 if (UsedSGPRs[0]) {
9621 if (UsedSGPRs[0] == UsedSGPRs[1] || UsedSGPRs[0] == UsedSGPRs[2])
9622 SGPRReg = UsedSGPRs[0];
9623 }
9624
9625 if (!SGPRReg && UsedSGPRs[1]) {
9626 if (UsedSGPRs[1] == UsedSGPRs[2])
9627 SGPRReg = UsedSGPRs[1];
9628 }
9629
9630 return SGPRReg;
9631}
9632
9634 AMDGPU::OpName OperandName) const {
9635 if (OperandName == AMDGPU::OpName::NUM_OPERAND_NAMES)
9636 return nullptr;
9637
9638 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OperandName);
9639 if (Idx == -1)
9640 return nullptr;
9641
9642 return &MI.getOperand(Idx);
9643}
9644
9646 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) {
9647 int64_t Format = ST.getGeneration() >= AMDGPUSubtarget::GFX11
9650 return (Format << 44) |
9651 (1ULL << 56) | // RESOURCE_LEVEL = 1
9652 (3ULL << 60); // OOB_SELECT = 3
9653 }
9654
9655 uint64_t RsrcDataFormat = AMDGPU::RSRC_DATA_FORMAT;
9656 if (ST.isAmdHsaOS()) {
9657 // Set ATC = 1. GFX9 doesn't have this bit.
9658 if (ST.getGeneration() <= AMDGPUSubtarget::VOLCANIC_ISLANDS)
9659 RsrcDataFormat |= (1ULL << 56);
9660
9661 // Set MTYPE = 2 (MTYPE_UC = uncached). GFX9 doesn't have this.
9662 // BTW, it disables TC L2 and therefore decreases performance.
9663 if (ST.getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS)
9664 RsrcDataFormat |= (2ULL << 59);
9665 }
9666
9667 return RsrcDataFormat;
9668}
9669
9673 0xffffffff; // Size;
9674
9675 // GFX9 doesn't have ELEMENT_SIZE.
9676 if (ST.getGeneration() <= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
9677 uint64_t EltSizeValue = Log2_32(ST.getMaxPrivateElementSize(true)) - 1;
9678 Rsrc23 |= EltSizeValue << AMDGPU::RSRC_ELEMENT_SIZE_SHIFT;
9679 }
9680
9681 // IndexStride = 64 / 32.
9682 uint64_t IndexStride = ST.isWave64() ? 3 : 2;
9683 Rsrc23 |= IndexStride << AMDGPU::RSRC_INDEX_STRIDE_SHIFT;
9684
9685 // If TID_ENABLE is set, DATA_FORMAT specifies stride bits [14:17].
9686 // Clear them unless we want a huge stride.
9687 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS &&
9688 ST.getGeneration() <= AMDGPUSubtarget::GFX9)
9689 Rsrc23 &= ~AMDGPU::RSRC_DATA_FORMAT;
9690
9691 return Rsrc23;
9692}
9693
9695 unsigned Opc = MI.getOpcode();
9696
9697 return isSMRD(Opc);
9698}
9699
9701 return get(Opc).mayLoad() &&
9702 (isMUBUF(Opc) || isMTBUF(Opc) || isMIMG(Opc) || isFLAT(Opc));
9703}
9704
9706 int &FrameIndex) const {
9707 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::vaddr);
9708 if (!Addr || !Addr->isFI())
9709 return Register();
9710
9711 assert(!MI.memoperands_empty() &&
9712 (*MI.memoperands_begin())->getAddrSpace() == AMDGPUAS::PRIVATE_ADDRESS);
9713
9714 FrameIndex = Addr->getIndex();
9715 return getNamedOperand(MI, AMDGPU::OpName::vdata)->getReg();
9716}
9717
9719 int &FrameIndex) const {
9720 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::addr);
9721 assert(Addr && Addr->isFI());
9722 FrameIndex = Addr->getIndex();
9723 return getNamedOperand(MI, AMDGPU::OpName::data)->getReg();
9724}
9725
9727 int &FrameIndex) const {
9728 if (!MI.mayLoad())
9729 return Register();
9730
9731 if (isMUBUF(MI) || isVGPRSpill(MI))
9732 return isStackAccess(MI, FrameIndex);
9733
9734 if (isSGPRSpill(MI))
9735 return isSGPRStackAccess(MI, FrameIndex);
9736
9737 return Register();
9738}
9739
9741 int &FrameIndex) const {
9742 if (!MI.mayStore())
9743 return Register();
9744
9745 if (isMUBUF(MI) || isVGPRSpill(MI))
9746 return isStackAccess(MI, FrameIndex);
9747
9748 if (isSGPRSpill(MI))
9749 return isSGPRStackAccess(MI, FrameIndex);
9750
9751 return Register();
9752}
9753
9755 unsigned Size = 0;
9757 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end();
9758 while (++I != E && I->isInsideBundle()) {
9759 assert(!I->isBundle() && "No nested bundle!");
9761 }
9762
9763 return Size;
9764}
9765
9767 unsigned Opc = MI.getOpcode();
9769 unsigned DescSize = Desc.getSize();
9770
9771 // If we have a definitive size, we can use it. Otherwise we need to inspect
9772 // the operands to know the size.
9773 if (isFixedSize(MI)) {
9774 unsigned Size = DescSize;
9775
9776 // If we hit the buggy offset, an extra nop will be inserted in MC so
9777 // estimate the worst case.
9778 if (MI.isBranch() && ST.hasOffset3fBug())
9779 Size += 4;
9780
9781 return Size;
9782 }
9783
9784 // Instructions may have a 32-bit literal encoded after them. Check
9785 // operands that could ever be literals.
9786 if (isVALU(MI) || isSALU(MI)) {
9787 if (isDPP(MI))
9788 return DescSize;
9789 bool HasLiteral = false;
9790 unsigned LiteralSize = 4;
9791 for (int I = 0, E = MI.getNumExplicitOperands(); I != E; ++I) {
9792 const MachineOperand &Op = MI.getOperand(I);
9793 const MCOperandInfo &OpInfo = Desc.operands()[I];
9794 if (!Op.isReg() && !isInlineConstant(Op, OpInfo)) {
9795 HasLiteral = true;
9796 if (ST.has64BitLiterals()) {
9797 switch (OpInfo.OperandType) {
9798 default:
9799 break;
9801 if (!AMDGPU::isValid32BitLiteral(Op.getImm(), true))
9802 LiteralSize = 8;
9803 break;
9805 // A 32-bit literal is only valid when the value fits in BOTH signed
9806 // and unsigned 32-bit ranges [0, 2^31-1], matching the MC code
9807 // emitter's getLit64Encoding logic. This is because of the lack of
9808 // abilility to tell signedness of the literal, therefore we need to
9809 // be conservative and assume values outside this range require a
9810 // 64-bit literal encoding (8 bytes).
9811 if (!Op.isImm() || !isInt<32>(Op.getImm()) ||
9812 !isUInt<32>(Op.getImm()))
9813 LiteralSize = 8;
9814 break;
9815 }
9816 }
9817 break;
9818 }
9819 }
9820 return HasLiteral ? DescSize + LiteralSize : DescSize;
9821 }
9822
9823 // Check whether we have extra NSA words.
9824 if (isMIMG(MI)) {
9825 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0);
9826 if (VAddr0Idx < 0)
9827 return 8;
9828
9829 int RSrcIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::srsrc);
9830 return 8 + 4 * ((RSrcIdx - VAddr0Idx + 2) / 4);
9831 }
9832
9833 switch (Opc) {
9834 case TargetOpcode::BUNDLE:
9835 return getInstBundleSize(MI);
9836 case TargetOpcode::INLINEASM:
9837 case TargetOpcode::INLINEASM_BR: {
9838 const MachineFunction *MF = MI.getMF();
9839 const char *AsmStr = MI.getOperand(0).getSymbolName();
9840 return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo(), &ST);
9841 }
9842 default:
9843 if (MI.isMetaInstruction())
9844 return 0;
9845
9846 // If D16 Pseudo inst, get correct MC code size
9847 const auto *D16Info = AMDGPU::getT16D16Helper(Opc);
9848 if (D16Info) {
9849 // Assume d16_lo/hi inst are always in same size
9850 unsigned LoInstOpcode = D16Info->LoOp;
9851 const MCInstrDesc &Desc = getMCOpcodeFromPseudo(LoInstOpcode);
9852 DescSize = Desc.getSize();
9853 }
9854
9855 // If FMA Pseudo inst, get correct MC code size
9856 if (Opc == AMDGPU::V_FMA_MIX_F16_t16 || Opc == AMDGPU::V_FMA_MIX_BF16_t16) {
9857 // All potential lowerings are the same size; arbitrarily pick one.
9858 const MCInstrDesc &Desc = getMCOpcodeFromPseudo(AMDGPU::V_FMA_MIXLO_F16);
9859 DescSize = Desc.getSize();
9860 }
9861
9862 return DescSize;
9863 }
9864}
9865
9867 if (!isFLAT(MI))
9868 return false;
9869
9870 if (MI.memoperands_empty())
9871 return true;
9872
9873 for (const MachineMemOperand *MMO : MI.memoperands()) {
9874 if (MMO->getAddrSpace() == AMDGPUAS::FLAT_ADDRESS)
9875 return true;
9876 }
9877 return false;
9878}
9879
9882 static const std::pair<int, const char *> TargetIndices[] = {
9883 {AMDGPU::TI_CONSTDATA_START, "amdgpu-constdata-start"},
9884 {AMDGPU::TI_SCRATCH_RSRC_DWORD0, "amdgpu-scratch-rsrc-dword0"},
9885 {AMDGPU::TI_SCRATCH_RSRC_DWORD1, "amdgpu-scratch-rsrc-dword1"},
9886 {AMDGPU::TI_SCRATCH_RSRC_DWORD2, "amdgpu-scratch-rsrc-dword2"},
9887 {AMDGPU::TI_SCRATCH_RSRC_DWORD3, "amdgpu-scratch-rsrc-dword3"}};
9888 return ArrayRef(TargetIndices);
9889}
9890
9891/// This is used by the post-RA scheduler (SchedulePostRAList.cpp). The
9892/// post-RA version of misched uses CreateTargetMIHazardRecognizer.
9898
9899/// This is the hazard recognizer used at -O0 by the PostRAHazardRecognizer
9900/// pass.
9906
9907// Called during:
9908// - pre-RA scheduling and post-RA scheduling
9911 const ScheduleDAGMI *DAG) const {
9912 // Borrowed from Arm Target
9913 // We would like to restrict this hazard recognizer to only
9914 // post-RA scheduling; we can tell that we're post-RA because we don't
9915 // track VRegLiveness.
9916 if (!DAG->hasVRegLiveness())
9917 return new GCNHazardRecognizer(DAG->MF);
9919}
9920
9921std::pair<unsigned, unsigned>
9923 return std::pair(TF & MO_MASK, TF & ~MO_MASK);
9924}
9925
9928 static const std::pair<unsigned, const char *> TargetFlags[] = {
9929 {MO_GOTPCREL, "amdgpu-gotprel"},
9930 {MO_GOTPCREL32_LO, "amdgpu-gotprel32-lo"},
9931 {MO_GOTPCREL32_HI, "amdgpu-gotprel32-hi"},
9932 {MO_GOTPCREL64, "amdgpu-gotprel64"},
9933 {MO_REL32_LO, "amdgpu-rel32-lo"},
9934 {MO_REL32_HI, "amdgpu-rel32-hi"},
9935 {MO_REL64, "amdgpu-rel64"},
9936 {MO_ABS32_LO, "amdgpu-abs32-lo"},
9937 {MO_ABS32_HI, "amdgpu-abs32-hi"},
9938 {MO_ABS64, "amdgpu-abs64"},
9939 };
9940
9941 return ArrayRef(TargetFlags);
9942}
9943
9946 static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] =
9947 {
9948 {MONoClobber, "amdgpu-noclobber"},
9949 {MOLastUse, "amdgpu-last-use"},
9950 {MOCooperative, "amdgpu-cooperative"},
9951 {MOThreadPrivate, "amdgpu-thread-private"},
9952 };
9953
9954 return ArrayRef(TargetFlags);
9955}
9956
9958 const MachineFunction &MF) const {
9960 assert(SrcReg.isVirtual());
9961 if (MFI->checkFlag(SrcReg, AMDGPU::VirtRegFlag::WWM_REG))
9962 return AMDGPU::WWM_COPY;
9963
9964 return AMDGPU::COPY;
9965}
9966
9968 uint32_t Opcode = MI.getOpcode();
9969 // Check if it is SGPR spill or wwm-register spill Opcode.
9970 if (isSGPRSpill(Opcode) || isWWMRegSpillOpcode(Opcode))
9971 return true;
9972
9973 const MachineFunction *MF = MI.getMF();
9974 const MachineRegisterInfo &MRI = MF->getRegInfo();
9976
9977 // See if this is Liverange split instruction inserted for SGPR or
9978 // wwm-register. The implicit def inserted for wwm-registers should also be
9979 // included as they can appear at the bb begin.
9980 bool IsLRSplitInst = MI.getFlag(MachineInstr::LRSplit);
9981 if (!IsLRSplitInst && Opcode != AMDGPU::IMPLICIT_DEF)
9982 return false;
9983
9984 Register Reg = MI.getOperand(0).getReg();
9985 if (RI.isSGPRClass(RI.getRegClassForReg(MRI, Reg)))
9986 return IsLRSplitInst;
9987
9988 return MFI->isWWMReg(Reg);
9989}
9990
9992 Register Reg) const {
9993 // We need to handle instructions which may be inserted during register
9994 // allocation to handle the prolog. The initial prolog instruction may have
9995 // been separated from the start of the block by spills and copies inserted
9996 // needed by the prolog. However, the insertions for scalar registers can
9997 // always be placed at the BB top as they are independent of the exec mask
9998 // value.
9999 bool IsNullOrVectorRegister = true;
10000 if (Reg) {
10001 const MachineFunction *MF = MI.getMF();
10002 const MachineRegisterInfo &MRI = MF->getRegInfo();
10003 IsNullOrVectorRegister = !RI.isSGPRClass(RI.getRegClassForReg(MRI, Reg));
10004 }
10005
10006 return IsNullOrVectorRegister &&
10007 (canAddToBBProlog(MI) ||
10008 (!MI.isTerminator() && MI.getOpcode() != AMDGPU::COPY &&
10009 MI.modifiesRegister(AMDGPU::EXEC, &RI)));
10010}
10011
10015 const DebugLoc &DL,
10016 Register DestReg) const {
10017 if (ST.hasAddNoCarryInsts())
10018 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e64), DestReg);
10019
10020 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
10021 Register UnusedCarry = MRI.createVirtualRegister(RI.getBoolRC());
10022 MRI.setRegAllocationHint(UnusedCarry, 0, RI.getVCC());
10023
10024 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_CO_U32_e64), DestReg)
10025 .addReg(UnusedCarry, RegState::Define | RegState::Dead);
10026}
10027
10030 const DebugLoc &DL,
10031 Register DestReg,
10032 RegScavenger &RS) const {
10033 if (ST.hasAddNoCarryInsts())
10034 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e32), DestReg);
10035
10036 // If available, prefer to use vcc.
10037 Register UnusedCarry = !RS.isRegUsed(AMDGPU::VCC)
10038 ? Register(RI.getVCC())
10039 : RS.scavengeRegisterBackwards(
10040 *RI.getBoolRC(), I, /* RestoreAfter */ false,
10041 0, /* AllowSpill */ false);
10042
10043 // TODO: Users need to deal with this.
10044 if (!UnusedCarry.isValid())
10045 return MachineInstrBuilder();
10046
10047 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_CO_U32_e64), DestReg)
10048 .addReg(UnusedCarry, RegState::Define | RegState::Dead);
10049}
10050
10051bool SIInstrInfo::isKillTerminator(unsigned Opcode) {
10052 switch (Opcode) {
10053 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR:
10054 case AMDGPU::SI_KILL_I1_TERMINATOR:
10055 return true;
10056 default:
10057 return false;
10058 }
10059}
10060
10062 switch (Opcode) {
10063 case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO:
10064 return get(AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR);
10065 case AMDGPU::SI_KILL_I1_PSEUDO:
10066 return get(AMDGPU::SI_KILL_I1_TERMINATOR);
10067 default:
10068 llvm_unreachable("invalid opcode, expected SI_KILL_*_PSEUDO");
10069 }
10070}
10071
10072bool SIInstrInfo::isLegalMUBUFImmOffset(unsigned Imm) const {
10073 return Imm <= getMaxMUBUFImmOffset(ST);
10074}
10075
10077 // GFX12 field is non-negative 24-bit signed byte offset.
10078 const unsigned OffsetBits =
10079 ST.getGeneration() >= AMDGPUSubtarget::GFX12 ? 23 : 12;
10080 return (1 << OffsetBits) - 1;
10081}
10082
10084 if (!ST.isWave32())
10085 return;
10086
10087 if (MI.isInlineAsm())
10088 return;
10089
10090 if (MI.getNumOperands() < MI.getNumExplicitOperands())
10091 return;
10092
10093 for (auto &Op : MI.implicit_operands()) {
10094 if (Op.isReg() && Op.getReg() == AMDGPU::VCC)
10095 Op.setReg(AMDGPU::VCC_LO);
10096 }
10097}
10098
10100 if (!isSMRD(MI))
10101 return false;
10102
10103 // Check that it is using a buffer resource.
10104 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sbase);
10105 if (Idx == -1) // e.g. s_memtime
10106 return false;
10107
10108 const int16_t RCID = getOpRegClassID(MI.getDesc().operands()[Idx]);
10109 return RI.getRegClass(RCID)->hasSubClassEq(&AMDGPU::SGPR_128RegClass);
10110}
10111
10112// Given Imm, split it into the values to put into the SOffset and ImmOffset
10113// fields in an MUBUF instruction. Return false if it is not possible (due to a
10114// hardware bug needing a workaround).
10115//
10116// The required alignment ensures that individual address components remain
10117// aligned if they are aligned to begin with. It also ensures that additional
10118// offsets within the given alignment can be added to the resulting ImmOffset.
10120 uint32_t &ImmOffset, Align Alignment) const {
10121 const uint32_t MaxOffset = SIInstrInfo::getMaxMUBUFImmOffset(ST);
10122 const uint32_t MaxImm = alignDown(MaxOffset, Alignment.value());
10123 uint32_t Overflow = 0;
10124
10125 if (Imm > MaxImm) {
10126 if (Imm <= MaxImm + 64) {
10127 // Use an SOffset inline constant for 4..64
10128 Overflow = Imm - MaxImm;
10129 Imm = MaxImm;
10130 } else {
10131 // Try to keep the same value in SOffset for adjacent loads, so that
10132 // the corresponding register contents can be re-used.
10133 //
10134 // Load values with all low-bits (except for alignment bits) set into
10135 // SOffset, so that a larger range of values can be covered using
10136 // s_movk_i32.
10137 //
10138 // Atomic operations fail to work correctly when individual address
10139 // components are unaligned, even if their sum is aligned.
10140 uint32_t High = (Imm + Alignment.value()) & ~MaxOffset;
10141 uint32_t Low = (Imm + Alignment.value()) & MaxOffset;
10142 Imm = Low;
10143 Overflow = High - Alignment.value();
10144 }
10145 }
10146
10147 if (Overflow > 0) {
10148 // There is a hardware bug in SI and CI which prevents address clamping in
10149 // MUBUF instructions from working correctly with SOffsets. The immediate
10150 // offset is unaffected.
10151 if (ST.getGeneration() <= AMDGPUSubtarget::SEA_ISLANDS)
10152 return false;
10153
10154 // It is not possible to set immediate in SOffset field on some targets.
10155 if (ST.hasRestrictedSOffset())
10156 return false;
10157 }
10158
10159 ImmOffset = Imm;
10160 SOffset = Overflow;
10161 return true;
10162}
10163
10164// Depending on the used address space and instructions, some immediate offsets
10165// are allowed and some are not.
10166// Pre-GFX12, flat instruction offsets can only be non-negative, global and
10167// scratch instruction offsets can also be negative. On GFX12, offsets can be
10168// negative for all variants.
10169//
10170// There are several bugs related to these offsets:
10171// On gfx10.1, flat instructions that go into the global address space cannot
10172// use an offset.
10173//
10174// For scratch instructions, the address can be either an SGPR or a VGPR.
10175// The following offsets can be used, depending on the architecture (x means
10176// cannot be used):
10177// +----------------------------+------+------+
10178// | Address-Mode | SGPR | VGPR |
10179// +----------------------------+------+------+
10180// | gfx9 | | |
10181// | negative, 4-aligned offset | x | ok |
10182// | negative, unaligned offset | x | ok |
10183// +----------------------------+------+------+
10184// | gfx10 | | |
10185// | negative, 4-aligned offset | ok | ok |
10186// | negative, unaligned offset | ok | x |
10187// +----------------------------+------+------+
10188// | gfx10.3 | | |
10189// | negative, 4-aligned offset | ok | ok |
10190// | negative, unaligned offset | ok | ok |
10191// +----------------------------+------+------+
10192//
10193// This function ignores the addressing mode, so if an offset cannot be used in
10194// one addressing mode, it is considered illegal.
10195bool SIInstrInfo::isLegalFLATOffset(int64_t Offset, unsigned AddrSpace,
10196 uint64_t FlatVariant) const {
10197 // TODO: Should 0 be special cased?
10198 if (!ST.hasFlatInstOffsets())
10199 return false;
10200
10201 if (ST.hasFlatSegmentOffsetBug() && FlatVariant == SIInstrFlags::FLAT &&
10202 (AddrSpace == AMDGPUAS::FLAT_ADDRESS ||
10203 AddrSpace == AMDGPUAS::GLOBAL_ADDRESS))
10204 return false;
10205
10206 if (ST.hasNegativeUnalignedScratchOffsetBug() &&
10207 FlatVariant == SIInstrFlags::FlatScratch && Offset < 0 &&
10208 (Offset % 4) != 0) {
10209 return false;
10210 }
10211
10212 bool AllowNegative = allowNegativeFlatOffset(FlatVariant);
10213 unsigned N = AMDGPU::getNumFlatOffsetBits(ST);
10214 return isIntN(N, Offset) && (AllowNegative || Offset >= 0);
10215}
10216
10217// See comment on SIInstrInfo::isLegalFLATOffset for what is legal and what not.
10218std::pair<int64_t, int64_t>
10219SIInstrInfo::splitFlatOffset(int64_t COffsetVal, unsigned AddrSpace,
10220 uint64_t FlatVariant) const {
10221 int64_t RemainderOffset = COffsetVal;
10222 int64_t ImmField = 0;
10223
10224 bool AllowNegative = allowNegativeFlatOffset(FlatVariant);
10225 const unsigned NumBits = AMDGPU::getNumFlatOffsetBits(ST) - 1;
10226
10227 if (AllowNegative) {
10228 // Use signed division by a power of two to truncate towards 0.
10229 int64_t D = 1LL << NumBits;
10230 RemainderOffset = (COffsetVal / D) * D;
10231 ImmField = COffsetVal - RemainderOffset;
10232
10233 if (ST.hasNegativeUnalignedScratchOffsetBug() &&
10234 FlatVariant == SIInstrFlags::FlatScratch && ImmField < 0 &&
10235 (ImmField % 4) != 0) {
10236 // Make ImmField a multiple of 4
10237 RemainderOffset += ImmField % 4;
10238 ImmField -= ImmField % 4;
10239 }
10240 } else if (COffsetVal >= 0) {
10241 ImmField = COffsetVal & maskTrailingOnes<uint64_t>(NumBits);
10242 RemainderOffset = COffsetVal - ImmField;
10243 }
10244
10245 assert(isLegalFLATOffset(ImmField, AddrSpace, FlatVariant));
10246 assert(RemainderOffset + ImmField == COffsetVal);
10247 return {ImmField, RemainderOffset};
10248}
10249
10251 if (ST.hasNegativeScratchOffsetBug() &&
10252 FlatVariant == SIInstrFlags::FlatScratch)
10253 return false;
10254
10255 return FlatVariant != SIInstrFlags::FLAT || AMDGPU::isGFX12Plus(ST);
10256}
10257
10258static unsigned subtargetEncodingFamily(const GCNSubtarget &ST) {
10259 switch (ST.getGeneration()) {
10260 default:
10261 break;
10264 return SIEncodingFamily::SI;
10267 return SIEncodingFamily::VI;
10271 return ST.isGFX1170() ? SIEncodingFamily::GFX1170 : SIEncodingFamily::GFX11;
10273 return ST.hasGFX1250Insts() ? SIEncodingFamily::GFX1250
10277 }
10278 llvm_unreachable("Unknown subtarget generation!");
10279}
10280
10281bool SIInstrInfo::isAsmOnlyOpcode(int MCOp) const {
10282 switch(MCOp) {
10283 // These opcodes use indirect register addressing so
10284 // they need special handling by codegen (currently missing).
10285 // Therefore it is too risky to allow these opcodes
10286 // to be selected by dpp combiner or sdwa peepholer.
10287 case AMDGPU::V_MOVRELS_B32_dpp_gfx10:
10288 case AMDGPU::V_MOVRELS_B32_sdwa_gfx10:
10289 case AMDGPU::V_MOVRELD_B32_dpp_gfx10:
10290 case AMDGPU::V_MOVRELD_B32_sdwa_gfx10:
10291 case AMDGPU::V_MOVRELSD_B32_dpp_gfx10:
10292 case AMDGPU::V_MOVRELSD_B32_sdwa_gfx10:
10293 case AMDGPU::V_MOVRELSD_2_B32_dpp_gfx10:
10294 case AMDGPU::V_MOVRELSD_2_B32_sdwa_gfx10:
10295 return true;
10296 default:
10297 return false;
10298 }
10299}
10300
10301#define GENERATE_RENAMED_GFX9_CASES(OPCODE) \
10302 case OPCODE##_dpp: \
10303 case OPCODE##_e32: \
10304 case OPCODE##_e64: \
10305 case OPCODE##_e64_dpp: \
10306 case OPCODE##_sdwa:
10307
10308static bool isRenamedInGFX9(int Opcode) {
10309 switch (Opcode) {
10310 GENERATE_RENAMED_GFX9_CASES(AMDGPU::V_ADDC_U32)
10311 GENERATE_RENAMED_GFX9_CASES(AMDGPU::V_ADD_CO_U32)
10312 GENERATE_RENAMED_GFX9_CASES(AMDGPU::V_ADD_U32)
10313 GENERATE_RENAMED_GFX9_CASES(AMDGPU::V_SUBBREV_U32)
10314 GENERATE_RENAMED_GFX9_CASES(AMDGPU::V_SUBB_U32)
10315 GENERATE_RENAMED_GFX9_CASES(AMDGPU::V_SUBREV_CO_U32)
10316 GENERATE_RENAMED_GFX9_CASES(AMDGPU::V_SUBREV_U32)
10317 GENERATE_RENAMED_GFX9_CASES(AMDGPU::V_SUB_CO_U32)
10318 GENERATE_RENAMED_GFX9_CASES(AMDGPU::V_SUB_U32)
10319 //
10320 case AMDGPU::V_DIV_FIXUP_F16_gfx9_e64:
10321 case AMDGPU::V_DIV_FIXUP_F16_gfx9_fake16_e64:
10322 case AMDGPU::V_FMA_F16_gfx9_e64:
10323 case AMDGPU::V_FMA_F16_gfx9_fake16_e64:
10324 case AMDGPU::V_INTERP_P2_F16:
10325 case AMDGPU::V_MAD_F16_e64:
10326 case AMDGPU::V_MAD_U16_e64:
10327 case AMDGPU::V_MAD_I16_e64:
10328 return true;
10329 default:
10330 return false;
10331 }
10332}
10333
10334int SIInstrInfo::pseudoToMCOpcode(int Opcode) const {
10335 assert(Opcode == (int)SIInstrInfo::getNonSoftWaitcntOpcode(Opcode) &&
10336 "SIInsertWaitcnts should have promoted soft waitcnt instructions!");
10337
10338 unsigned Gen = subtargetEncodingFamily(ST);
10339
10340 if (ST.getGeneration() == AMDGPUSubtarget::GFX9 && isRenamedInGFX9(Opcode))
10342
10343 // Adjust the encoding family to GFX80 for D16 buffer instructions when the
10344 // subtarget has UnpackedD16VMem feature.
10345 // TODO: remove this when we discard GFX80 encoding.
10346 if (ST.hasUnpackedD16VMem() && (get(Opcode).TSFlags & SIInstrFlags::D16Buf))
10348
10349 if (get(Opcode).TSFlags & SIInstrFlags::SDWA) {
10350 switch (ST.getGeneration()) {
10351 default:
10353 break;
10356 break;
10359 break;
10360 }
10361 }
10362
10363 if (isMAI(Opcode)) {
10364 int MFMAOp = AMDGPU::getMFMAEarlyClobberOp(Opcode);
10365 if (MFMAOp != -1)
10366 Opcode = MFMAOp;
10367 }
10368
10369 int32_t MCOp = AMDGPU::getMCOpcode(Opcode, Gen);
10370
10371 if (MCOp == AMDGPU::INSTRUCTION_LIST_END && ST.isGFX1170())
10373
10374 if (MCOp == AMDGPU::INSTRUCTION_LIST_END && ST.hasGFX1250Insts())
10376
10377 // -1 means that Opcode is already a native instruction.
10378 if (MCOp == -1)
10379 return Opcode;
10380
10381 if (ST.hasGFX90AInsts()) {
10382 uint32_t NMCOp = AMDGPU::INSTRUCTION_LIST_END;
10383 if (ST.hasGFX940Insts())
10385 if (NMCOp == AMDGPU::INSTRUCTION_LIST_END)
10387 if (NMCOp == AMDGPU::INSTRUCTION_LIST_END)
10389 if (NMCOp != AMDGPU::INSTRUCTION_LIST_END)
10390 MCOp = NMCOp;
10391 }
10392
10393 // INSTRUCTION_LIST_END means that Opcode is a pseudo instruction that has no
10394 // encoding in the given subtarget generation.
10395 if (MCOp == AMDGPU::INSTRUCTION_LIST_END)
10396 return -1;
10397
10398 if (isAsmOnlyOpcode(MCOp))
10399 return -1;
10400
10401 return MCOp;
10402}
10403
10404static
10406 assert(RegOpnd.isReg());
10407 return RegOpnd.isUndef() ? TargetInstrInfo::RegSubRegPair() :
10408 getRegSubRegPair(RegOpnd);
10409}
10410
10413 assert(MI.isRegSequence());
10414 for (unsigned I = 0, E = (MI.getNumOperands() - 1)/ 2; I < E; ++I)
10415 if (MI.getOperand(1 + 2 * I + 1).getImm() == SubReg) {
10416 auto &RegOp = MI.getOperand(1 + 2 * I);
10417 return getRegOrUndef(RegOp);
10418 }
10420}
10421
10422// Try to find the definition of reg:subreg in subreg-manipulation pseudos
10423// Following a subreg of reg:subreg isn't supported
10426 if (!RSR.SubReg)
10427 return false;
10428 switch (MI.getOpcode()) {
10429 default: break;
10430 case AMDGPU::REG_SEQUENCE:
10431 RSR = getRegSequenceSubReg(MI, RSR.SubReg);
10432 return true;
10433 // EXTRACT_SUBREG ins't supported as this would follow a subreg of subreg
10434 case AMDGPU::INSERT_SUBREG:
10435 if (RSR.SubReg == (unsigned)MI.getOperand(3).getImm())
10436 // inserted the subreg we're looking for
10437 RSR = getRegOrUndef(MI.getOperand(2));
10438 else { // the subreg in the rest of the reg
10439 auto R1 = getRegOrUndef(MI.getOperand(1));
10440 if (R1.SubReg) // subreg of subreg isn't supported
10441 return false;
10442 RSR.Reg = R1.Reg;
10443 }
10444 return true;
10445 }
10446 return false;
10447}
10448
10450 const MachineRegisterInfo &MRI) {
10451 assert(MRI.isSSA());
10452 if (!P.Reg.isVirtual())
10453 return nullptr;
10454
10455 auto RSR = P;
10456 auto *DefInst = MRI.getVRegDef(RSR.Reg);
10457 while (auto *MI = DefInst) {
10458 DefInst = nullptr;
10459 switch (MI->getOpcode()) {
10460 case AMDGPU::COPY:
10461 case AMDGPU::V_MOV_B32_e32: {
10462 auto &Op1 = MI->getOperand(1);
10463 if (Op1.isReg() && Op1.getReg().isVirtual()) {
10464 if (Op1.isUndef())
10465 return nullptr;
10466 RSR = getRegSubRegPair(Op1);
10467 DefInst = MRI.getVRegDef(RSR.Reg);
10468 }
10469 break;
10470 }
10471 default:
10472 if (followSubRegDef(*MI, RSR)) {
10473 if (!RSR.Reg)
10474 return nullptr;
10475 DefInst = MRI.getVRegDef(RSR.Reg);
10476 }
10477 }
10478 if (!DefInst)
10479 return MI;
10480 }
10481 return nullptr;
10482}
10483
10485 Register VReg,
10486 const MachineInstr &DefMI,
10487 const MachineInstr &UseMI) {
10488 assert(MRI.isSSA() && "Must be run on SSA");
10489
10490 auto *TRI = MRI.getTargetRegisterInfo();
10491 auto *DefBB = DefMI.getParent();
10492
10493 // Don't bother searching between blocks, although it is possible this block
10494 // doesn't modify exec.
10495 if (UseMI.getParent() != DefBB)
10496 return true;
10497
10498 const int MaxInstScan = 20;
10499 int NumInst = 0;
10500
10501 // Stop scan at the use.
10502 auto E = UseMI.getIterator();
10503 for (auto I = std::next(DefMI.getIterator()); I != E; ++I) {
10504 if (I->isDebugInstr())
10505 continue;
10506
10507 if (++NumInst > MaxInstScan)
10508 return true;
10509
10510 if (I->modifiesRegister(AMDGPU::EXEC, TRI))
10511 return true;
10512 }
10513
10514 return false;
10515}
10516
10518 Register VReg,
10519 const MachineInstr &DefMI) {
10520 assert(MRI.isSSA() && "Must be run on SSA");
10521
10522 auto *TRI = MRI.getTargetRegisterInfo();
10523 auto *DefBB = DefMI.getParent();
10524
10525 const int MaxUseScan = 10;
10526 int NumUse = 0;
10527
10528 for (auto &Use : MRI.use_nodbg_operands(VReg)) {
10529 auto &UseInst = *Use.getParent();
10530 // Don't bother searching between blocks, although it is possible this block
10531 // doesn't modify exec.
10532 if (UseInst.getParent() != DefBB || UseInst.isPHI())
10533 return true;
10534
10535 if (++NumUse > MaxUseScan)
10536 return true;
10537 }
10538
10539 if (NumUse == 0)
10540 return false;
10541
10542 const int MaxInstScan = 20;
10543 int NumInst = 0;
10544
10545 // Stop scan when we have seen all the uses.
10546 for (auto I = std::next(DefMI.getIterator()); ; ++I) {
10547 assert(I != DefBB->end());
10548
10549 if (I->isDebugInstr())
10550 continue;
10551
10552 if (++NumInst > MaxInstScan)
10553 return true;
10554
10555 for (const MachineOperand &Op : I->operands()) {
10556 // We don't check reg masks here as they're used only on calls:
10557 // 1. EXEC is only considered const within one BB
10558 // 2. Call should be a terminator instruction if present in a BB
10559
10560 if (!Op.isReg())
10561 continue;
10562
10563 Register Reg = Op.getReg();
10564 if (Op.isUse()) {
10565 if (Reg == VReg && --NumUse == 0)
10566 return false;
10567 } else if (TRI->regsOverlap(Reg, AMDGPU::EXEC))
10568 return true;
10569 }
10570 }
10571}
10572
10575 const DebugLoc &DL, Register Src, Register Dst) const {
10576 auto Cur = MBB.begin();
10577 if (Cur != MBB.end())
10578 do {
10579 if (!Cur->isPHI() && Cur->readsRegister(Dst, /*TRI=*/nullptr))
10580 return BuildMI(MBB, Cur, DL, get(TargetOpcode::COPY), Dst).addReg(Src);
10581 ++Cur;
10582 } while (Cur != MBB.end() && Cur != LastPHIIt);
10583
10584 return TargetInstrInfo::createPHIDestinationCopy(MBB, LastPHIIt, DL, Src,
10585 Dst);
10586}
10587
10590 const DebugLoc &DL, Register Src, unsigned SrcSubReg, Register Dst) const {
10591 if (InsPt != MBB.end() &&
10592 (InsPt->getOpcode() == AMDGPU::SI_IF ||
10593 InsPt->getOpcode() == AMDGPU::SI_ELSE ||
10594 InsPt->getOpcode() == AMDGPU::SI_IF_BREAK) &&
10595 InsPt->definesRegister(Src, /*TRI=*/nullptr)) {
10596 InsPt++;
10597 return BuildMI(MBB, InsPt, DL,
10598 get(AMDGPU::LaneMaskConstants::get(ST).MovTermOpc), Dst)
10599 .addReg(Src, {}, SrcSubReg)
10600 .addReg(AMDGPU::EXEC, RegState::Implicit);
10601 }
10602 return TargetInstrInfo::createPHISourceCopy(MBB, InsPt, DL, Src, SrcSubReg,
10603 Dst);
10604}
10605
10606bool llvm::SIInstrInfo::isWave32() const { return ST.isWave32(); }
10607
10610 MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS,
10611 VirtRegMap *VRM) const {
10612 // This is a bit of a hack (copied from AArch64). Consider this instruction:
10613 //
10614 // %0:sreg_32 = COPY $m0
10615 //
10616 // We explicitly chose SReg_32 for the virtual register so such a copy might
10617 // be eliminated by RegisterCoalescer. However, that may not be possible, and
10618 // %0 may even spill. We can't spill $m0 normally (it would require copying to
10619 // a numbered SGPR anyway), and since it is in the SReg_32 register class,
10620 // TargetInstrInfo::foldMemoryOperand() is going to try.
10621 // A similar issue also exists with spilling and reloading $exec registers.
10622 //
10623 // To prevent that, constrain the %0 register class here.
10624 if (isFullCopyInstr(MI)) {
10625 Register DstReg = MI.getOperand(0).getReg();
10626 Register SrcReg = MI.getOperand(1).getReg();
10627 if ((DstReg.isVirtual() || SrcReg.isVirtual()) &&
10628 (DstReg.isVirtual() != SrcReg.isVirtual())) {
10629 MachineRegisterInfo &MRI = MF.getRegInfo();
10630 Register VirtReg = DstReg.isVirtual() ? DstReg : SrcReg;
10631 const TargetRegisterClass *RC = MRI.getRegClass(VirtReg);
10632 if (RC->hasSuperClassEq(&AMDGPU::SReg_32RegClass)) {
10633 MRI.constrainRegClass(VirtReg, &AMDGPU::SReg_32_XM0_XEXECRegClass);
10634 return nullptr;
10635 }
10636 if (RC->hasSuperClassEq(&AMDGPU::SReg_64RegClass)) {
10637 MRI.constrainRegClass(VirtReg, &AMDGPU::SReg_64_XEXECRegClass);
10638 return nullptr;
10639 }
10640 }
10641 }
10642
10643 return nullptr;
10644}
10645
10647 const MachineInstr &MI,
10648 unsigned *PredCost) const {
10649 if (MI.isBundle()) {
10651 MachineBasicBlock::const_instr_iterator E(MI.getParent()->instr_end());
10652 unsigned Lat = 0, Count = 0;
10653 for (++I; I != E && I->isBundledWithPred(); ++I) {
10654 ++Count;
10655 Lat = std::max(Lat, SchedModel.computeInstrLatency(&*I));
10656 }
10657 return Lat + Count - 1;
10658 }
10659
10660 return SchedModel.computeInstrLatency(&MI);
10661}
10662
10663const MachineOperand &
10665 if (const MachineOperand *CallAddrOp =
10666 getNamedOperand(MI, AMDGPU::OpName::src0))
10667 return *CallAddrOp;
10669}
10670
10673 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
10674 unsigned Opcode = MI.getOpcode();
10675
10676 auto HandleAddrSpaceCast = [this, &MRI](const MachineInstr &MI) {
10677 Register Dst = MI.getOperand(0).getReg();
10678 Register Src = isa<GIntrinsic>(MI) ? MI.getOperand(2).getReg()
10679 : MI.getOperand(1).getReg();
10680 LLT DstTy = MRI.getType(Dst);
10681 LLT SrcTy = MRI.getType(Src);
10682 unsigned DstAS = DstTy.getAddressSpace();
10683 unsigned SrcAS = SrcTy.getAddressSpace();
10684 return SrcAS == AMDGPUAS::PRIVATE_ADDRESS &&
10685 DstAS == AMDGPUAS::FLAT_ADDRESS &&
10686 ST.hasGloballyAddressableScratch()
10689 };
10690
10691 // If the target supports globally addressable scratch, the mapping from
10692 // scratch memory to the flat aperture changes therefore an address space cast
10693 // is no longer uniform.
10694 if (Opcode == TargetOpcode::G_ADDRSPACE_CAST)
10695 return HandleAddrSpaceCast(MI);
10696
10697 if (auto *GI = dyn_cast<GIntrinsic>(&MI)) {
10698 auto IID = GI->getIntrinsicID();
10703
10704 switch (IID) {
10705 case Intrinsic::amdgcn_addrspacecast_nonnull:
10706 return HandleAddrSpaceCast(MI);
10707 case Intrinsic::amdgcn_if:
10708 case Intrinsic::amdgcn_else:
10709 // FIXME: Uniform if second result
10710 break;
10711 }
10712
10714 }
10715
10716 // Loads from the private and flat address spaces are divergent, because
10717 // threads can execute the load instruction with the same inputs and get
10718 // different results.
10719 //
10720 // All other loads are not divergent, because if threads issue loads with the
10721 // same arguments, they will always get the same result.
10722 if (Opcode == AMDGPU::G_LOAD || Opcode == AMDGPU::G_ZEXTLOAD ||
10723 Opcode == AMDGPU::G_SEXTLOAD) {
10724 if (MI.memoperands_empty())
10725 return InstructionUniformity::NeverUniform; // conservative assumption
10726
10727 if (llvm::any_of(MI.memoperands(), [](const MachineMemOperand *mmo) {
10728 return mmo->getAddrSpace() == AMDGPUAS::PRIVATE_ADDRESS ||
10729 mmo->getAddrSpace() == AMDGPUAS::FLAT_ADDRESS;
10730 })) {
10731 // At least one MMO in a non-global address space.
10733 }
10735 }
10736
10737 if (SIInstrInfo::isGenericAtomicRMWOpcode(Opcode) ||
10738 Opcode == AMDGPU::G_ATOMIC_CMPXCHG ||
10739 Opcode == AMDGPU::G_ATOMIC_CMPXCHG_WITH_SUCCESS ||
10740 AMDGPU::isGenericAtomic(Opcode)) {
10742 }
10744}
10745
10747 if (!Formatter)
10748 Formatter = std::make_unique<AMDGPUMIRFormatter>(ST);
10749 return Formatter.get();
10750}
10751
10754
10755 if (isNeverUniform(MI))
10757
10758 unsigned opcode = MI.getOpcode();
10759 if (opcode == AMDGPU::V_READLANE_B32 ||
10760 opcode == AMDGPU::V_READFIRSTLANE_B32 ||
10761 opcode == AMDGPU::SI_RESTORE_S32_FROM_VGPR)
10763
10764 if (isCopyInstr(MI)) {
10765 const MachineOperand &srcOp = MI.getOperand(1);
10766 if (srcOp.isReg() && srcOp.getReg().isPhysical()) {
10767 const TargetRegisterClass *regClass =
10768 RI.getPhysRegBaseClass(srcOp.getReg());
10769 return RI.isSGPRClass(regClass) ? InstructionUniformity::AlwaysUniform
10771 }
10773 }
10774
10775 // GMIR handling
10776 if (MI.isPreISelOpcode())
10778
10779 // Atomics are divergent because they are executed sequentially: when an
10780 // atomic operation refers to the same address in each thread, then each
10781 // thread after the first sees the value written by the previous thread as
10782 // original value.
10783
10784 if (isAtomic(MI))
10786
10787 // Loads from the private and flat address spaces are divergent, because
10788 // threads can execute the load instruction with the same inputs and get
10789 // different results.
10790 if (isFLAT(MI) && MI.mayLoad()) {
10791 if (MI.memoperands_empty())
10792 return InstructionUniformity::NeverUniform; // conservative assumption
10793
10794 if (llvm::any_of(MI.memoperands(), [](const MachineMemOperand *mmo) {
10795 return mmo->getAddrSpace() == AMDGPUAS::PRIVATE_ADDRESS ||
10796 mmo->getAddrSpace() == AMDGPUAS::FLAT_ADDRESS;
10797 })) {
10798 // At least one MMO in a non-global address space.
10800 }
10801
10803 }
10804
10805 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
10806 const AMDGPURegisterBankInfo *RBI = ST.getRegBankInfo();
10807
10808 // FIXME: It's conceptually broken to report this for an instruction, and not
10809 // a specific def operand. For inline asm in particular, there could be mixed
10810 // uniform and divergent results.
10811 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
10812 const MachineOperand &SrcOp = MI.getOperand(I);
10813 if (!SrcOp.isReg())
10814 continue;
10815
10816 Register Reg = SrcOp.getReg();
10817 if (!Reg || !SrcOp.readsReg())
10818 continue;
10819
10820 // If RegBank is null, this is unassigned or an unallocatable special
10821 // register, which are all scalars.
10822 const RegisterBank *RegBank = RBI->getRegBank(Reg, MRI, RI);
10823 if (RegBank && RegBank->getID() != AMDGPU::SGPRRegBankID)
10825 }
10826
10827 // TODO: Uniformity check condtions above can be rearranged for more
10828 // redability
10829
10830 // TODO: amdgcn.{ballot, [if]cmp} should be AlwaysUniform, but they are
10831 // currently turned into no-op COPYs by SelectionDAG ISel and are
10832 // therefore no longer recognizable.
10833
10835}
10836
10838 switch (MF.getFunction().getCallingConv()) {
10840 return 1;
10842 return 2;
10844 return 3;
10848 const Function &F = MF.getFunction();
10849 F.getContext().diagnose(DiagnosticInfoUnsupported(
10850 F, "ds_ordered_count unsupported for this calling conv"));
10851 [[fallthrough]];
10852 }
10855 case CallingConv::C:
10856 case CallingConv::Fast:
10857 default:
10858 // Assume other calling conventions are various compute callable functions
10859 return 0;
10860 }
10861}
10862
10864 Register &SrcReg2, int64_t &CmpMask,
10865 int64_t &CmpValue) const {
10866 if (!MI.getOperand(0).isReg() || MI.getOperand(0).getSubReg())
10867 return false;
10868
10869 switch (MI.getOpcode()) {
10870 default:
10871 break;
10872 case AMDGPU::S_CMP_EQ_U32:
10873 case AMDGPU::S_CMP_EQ_I32:
10874 case AMDGPU::S_CMP_LG_U32:
10875 case AMDGPU::S_CMP_LG_I32:
10876 case AMDGPU::S_CMP_LT_U32:
10877 case AMDGPU::S_CMP_LT_I32:
10878 case AMDGPU::S_CMP_GT_U32:
10879 case AMDGPU::S_CMP_GT_I32:
10880 case AMDGPU::S_CMP_LE_U32:
10881 case AMDGPU::S_CMP_LE_I32:
10882 case AMDGPU::S_CMP_GE_U32:
10883 case AMDGPU::S_CMP_GE_I32:
10884 case AMDGPU::S_CMP_EQ_U64:
10885 case AMDGPU::S_CMP_LG_U64:
10886 SrcReg = MI.getOperand(0).getReg();
10887 if (MI.getOperand(1).isReg()) {
10888 if (MI.getOperand(1).getSubReg())
10889 return false;
10890 SrcReg2 = MI.getOperand(1).getReg();
10891 CmpValue = 0;
10892 } else if (MI.getOperand(1).isImm()) {
10893 SrcReg2 = Register();
10894 CmpValue = MI.getOperand(1).getImm();
10895 } else {
10896 return false;
10897 }
10898 CmpMask = ~0;
10899 return true;
10900 case AMDGPU::S_CMPK_EQ_U32:
10901 case AMDGPU::S_CMPK_EQ_I32:
10902 case AMDGPU::S_CMPK_LG_U32:
10903 case AMDGPU::S_CMPK_LG_I32:
10904 case AMDGPU::S_CMPK_LT_U32:
10905 case AMDGPU::S_CMPK_LT_I32:
10906 case AMDGPU::S_CMPK_GT_U32:
10907 case AMDGPU::S_CMPK_GT_I32:
10908 case AMDGPU::S_CMPK_LE_U32:
10909 case AMDGPU::S_CMPK_LE_I32:
10910 case AMDGPU::S_CMPK_GE_U32:
10911 case AMDGPU::S_CMPK_GE_I32:
10912 SrcReg = MI.getOperand(0).getReg();
10913 SrcReg2 = Register();
10914 CmpValue = MI.getOperand(1).getImm();
10915 CmpMask = ~0;
10916 return true;
10917 }
10918
10919 return false;
10920}
10921
10923 for (MachineBasicBlock *S : MBB->successors()) {
10924 if (S->isLiveIn(AMDGPU::SCC))
10925 return false;
10926 }
10927 return true;
10928}
10929
10930// Invert all uses of SCC following SCCDef because SCCDef may be deleted and
10931// (incoming SCC) = !(SCC defined by SCCDef).
10932// Return true if all uses can be re-written, false otherwise.
10933bool SIInstrInfo::invertSCCUse(MachineInstr *SCCDef) const {
10934 MachineBasicBlock *MBB = SCCDef->getParent();
10935 SmallVector<MachineInstr *> InvertInstr;
10936 bool SCCIsDead = false;
10937
10938 // Scan instructions for SCC uses that need to be inverted until SCC is dead.
10939 constexpr unsigned ScanLimit = 12;
10940 unsigned Count = 0;
10941 for (MachineInstr &MI :
10942 make_range(std::next(MachineBasicBlock::iterator(SCCDef)), MBB->end())) {
10943 if (++Count > ScanLimit)
10944 return false;
10945 if (MI.readsRegister(AMDGPU::SCC, &RI)) {
10946 if (MI.getOpcode() == AMDGPU::S_CSELECT_B32 ||
10947 MI.getOpcode() == AMDGPU::S_CSELECT_B64 ||
10948 MI.getOpcode() == AMDGPU::S_CBRANCH_SCC0 ||
10949 MI.getOpcode() == AMDGPU::S_CBRANCH_SCC1)
10950 InvertInstr.push_back(&MI);
10951 else
10952 return false;
10953 }
10954 if (MI.definesRegister(AMDGPU::SCC, &RI)) {
10955 SCCIsDead = true;
10956 break;
10957 }
10958 }
10959 if (!SCCIsDead && isSCCDeadOnExit(MBB))
10960 SCCIsDead = true;
10961
10962 // SCC may have more uses. Can't invert all of them.
10963 if (!SCCIsDead)
10964 return false;
10965
10966 // Invert uses
10967 for (MachineInstr *MI : InvertInstr) {
10968 if (MI->getOpcode() == AMDGPU::S_CSELECT_B32 ||
10969 MI->getOpcode() == AMDGPU::S_CSELECT_B64) {
10970 swapOperands(*MI);
10971 } else if (MI->getOpcode() == AMDGPU::S_CBRANCH_SCC0 ||
10972 MI->getOpcode() == AMDGPU::S_CBRANCH_SCC1) {
10973 MI->setDesc(get(MI->getOpcode() == AMDGPU::S_CBRANCH_SCC0
10974 ? AMDGPU::S_CBRANCH_SCC1
10975 : AMDGPU::S_CBRANCH_SCC0));
10976 } else {
10977 llvm_unreachable("SCC used but no inversion handling");
10978 }
10979 }
10980 return true;
10981}
10982
10983// SCC is already valid after SCCValid.
10984// SCCRedefine will redefine SCC to the same value already available after
10985// SCCValid. If there are no intervening SCC conflicts delete SCCRedefine and
10986// update kill/dead flags if necessary.
10987bool SIInstrInfo::optimizeSCC(MachineInstr *SCCValid, MachineInstr *SCCRedefine,
10988 bool NeedInversion) const {
10989 MachineInstr *KillsSCC = nullptr;
10990 if (SCCValid->getParent() != SCCRedefine->getParent())
10991 return false;
10992 for (MachineInstr &MI : make_range(std::next(SCCValid->getIterator()),
10993 SCCRedefine->getIterator())) {
10994 if (MI.modifiesRegister(AMDGPU::SCC, &RI))
10995 return false;
10996 if (MI.killsRegister(AMDGPU::SCC, &RI))
10997 KillsSCC = &MI;
10998 }
10999 if (NeedInversion && !invertSCCUse(SCCRedefine))
11000 return false;
11001 if (MachineOperand *SccDef =
11002 SCCValid->findRegisterDefOperand(AMDGPU::SCC, /*TRI=*/nullptr))
11003 SccDef->setIsDead(false);
11004 if (KillsSCC)
11005 KillsSCC->clearRegisterKills(AMDGPU::SCC, /*TRI=*/nullptr);
11006 SCCRedefine->eraseFromParent();
11007 return true;
11008}
11009
11010static bool foldableSelect(const MachineInstr &Def) {
11011 if (Def.getOpcode() != AMDGPU::S_CSELECT_B32 &&
11012 Def.getOpcode() != AMDGPU::S_CSELECT_B64)
11013 return false;
11014 bool Op1IsNonZeroImm =
11015 Def.getOperand(1).isImm() && Def.getOperand(1).getImm() != 0;
11016 bool Op2IsZeroImm =
11017 Def.getOperand(2).isImm() && Def.getOperand(2).getImm() == 0;
11018 if (!Op1IsNonZeroImm || !Op2IsZeroImm)
11019 return false;
11020 return true;
11021}
11022
11023static bool setsSCCIfResultIsZero(const MachineInstr &Def, bool &NeedInversion,
11024 unsigned &NewDefOpc) {
11025 // S_ADD_U32 X, 1 sets SCC on carryout which can only happen if result==0.
11026 // S_ADD_I32 X, 1 can be converted to S_ADD_U32 X, 1 if SCC is dead.
11027 if (Def.getOpcode() != AMDGPU::S_ADD_I32 &&
11028 Def.getOpcode() != AMDGPU::S_ADD_U32)
11029 return false;
11030 const MachineOperand &AddSrc1 = Def.getOperand(1);
11031 const MachineOperand &AddSrc2 = Def.getOperand(2);
11032 int64_t addend;
11033
11034 if ((!AddSrc1.isImm() || AddSrc1.getImm() != 1) &&
11035 (!AddSrc2.isImm() || AddSrc2.getImm() != 1) &&
11036 (!getFoldableImm(&AddSrc1, addend) || addend != 1) &&
11037 (!getFoldableImm(&AddSrc2, addend) || addend != 1))
11038 return false;
11039
11040 if (Def.getOpcode() == AMDGPU::S_ADD_I32) {
11041 const MachineOperand *SccDef =
11042 Def.findRegisterDefOperand(AMDGPU::SCC, /*TRI=*/nullptr);
11043 if (!SccDef->isDead())
11044 return false;
11045 NewDefOpc = AMDGPU::S_ADD_U32;
11046 }
11047 NeedInversion = !NeedInversion;
11048 return true;
11049}
11050
11052 Register SrcReg2, int64_t CmpMask,
11053 int64_t CmpValue,
11054 const MachineRegisterInfo *MRI) const {
11055 if (!SrcReg || SrcReg.isPhysical())
11056 return false;
11057
11058 if (SrcReg2 && !getFoldableImm(SrcReg2, *MRI, CmpValue))
11059 return false;
11060
11061 const auto optimizeCmpSelect = [&CmpInstr, SrcReg, CmpValue, MRI,
11062 this](bool NeedInversion) -> bool {
11063 if (CmpValue != 0)
11064 return false;
11065
11066 MachineInstr *Def = MRI->getVRegDef(SrcReg);
11067 if (!Def)
11068 return false;
11069
11070 // For S_OP that set SCC = DST!=0, do the transformation
11071 //
11072 // s_cmp_[lg|eq]_* (S_OP ...), 0 => (S_OP ...)
11073 //
11074 // For (S_OP ...) that set SCC = DST==0, invert NeedInversion and
11075 // do the transformation:
11076 //
11077 // s_cmp_[lg|eq]_* (S_OP ...), 0 => (S_OP ...)
11078 //
11079 // If foldableSelect, s_cmp_lg_* is redundant because the SCC input value
11080 // for S_CSELECT* already has the same value that will be calculated by
11081 // s_cmp_lg_*
11082 //
11083 // s_cmp_[lg|eq]_* (S_CSELECT* (non-zero imm), 0), 0 => (S_CSELECT*
11084 // (non-zero imm), 0)
11085
11086 unsigned NewDefOpc = Def->getOpcode();
11087 if (!setsSCCIfResultIsNonZero(*Def) &&
11088 !setsSCCIfResultIsZero(*Def, NeedInversion, NewDefOpc) &&
11089 !foldableSelect(*Def))
11090 return false;
11091
11092 if (!optimizeSCC(Def, &CmpInstr, NeedInversion))
11093 return false;
11094
11095 if (NewDefOpc != Def->getOpcode())
11096 Def->setDesc(get(NewDefOpc));
11097
11098 // If s_or_b32 result, sY, is unused (i.e. it is effectively a 64-bit
11099 // s_cmp_lg of a register pair) and the inputs are the hi and lo-halves of a
11100 // 64-bit foldableSelect then delete s_or_b32 in the sequence:
11101 // sX = s_cselect_b64 (non-zero imm), 0
11102 // sLo = copy sX.sub0
11103 // sHi = copy sX.sub1
11104 // sY = s_or_b32 sLo, sHi
11105 if (Def->getOpcode() == AMDGPU::S_OR_B32 &&
11106 MRI->use_nodbg_empty(Def->getOperand(0).getReg())) {
11107 const MachineOperand &OrOpnd1 = Def->getOperand(1);
11108 const MachineOperand &OrOpnd2 = Def->getOperand(2);
11109 if (OrOpnd1.isReg() && OrOpnd2.isReg()) {
11110 MachineInstr *Def1 = MRI->getVRegDef(OrOpnd1.getReg());
11111 MachineInstr *Def2 = MRI->getVRegDef(OrOpnd2.getReg());
11112 if (Def1 && Def1->getOpcode() == AMDGPU::COPY && Def2 &&
11113 Def2->getOpcode() == AMDGPU::COPY && Def1->getOperand(1).isReg() &&
11114 Def2->getOperand(1).isReg() &&
11115 Def1->getOperand(1).getSubReg() == AMDGPU::sub0 &&
11116 Def2->getOperand(1).getSubReg() == AMDGPU::sub1 &&
11117 Def1->getOperand(1).getReg() == Def2->getOperand(1).getReg()) {
11118 MachineInstr *Select = MRI->getVRegDef(Def1->getOperand(1).getReg());
11119 if (Select && foldableSelect(*Select))
11120 optimizeSCC(Select, Def, /*NeedInversion=*/false);
11121 }
11122 }
11123 }
11124 return true;
11125 };
11126
11127 const auto optimizeCmpAnd = [&CmpInstr, SrcReg, CmpValue, MRI,
11128 this](int64_t ExpectedValue, unsigned SrcSize,
11129 bool IsReversible, bool IsSigned) -> bool {
11130 // s_cmp_eq_u32 (s_and_b32 $src, 1 << n), 1 << n => s_and_b32 $src, 1 << n
11131 // s_cmp_eq_i32 (s_and_b32 $src, 1 << n), 1 << n => s_and_b32 $src, 1 << n
11132 // s_cmp_ge_u32 (s_and_b32 $src, 1 << n), 1 << n => s_and_b32 $src, 1 << n
11133 // s_cmp_ge_i32 (s_and_b32 $src, 1 << n), 1 << n => s_and_b32 $src, 1 << n
11134 // s_cmp_eq_u64 (s_and_b64 $src, 1 << n), 1 << n => s_and_b64 $src, 1 << n
11135 // s_cmp_lg_u32 (s_and_b32 $src, 1 << n), 0 => s_and_b32 $src, 1 << n
11136 // s_cmp_lg_i32 (s_and_b32 $src, 1 << n), 0 => s_and_b32 $src, 1 << n
11137 // s_cmp_gt_u32 (s_and_b32 $src, 1 << n), 0 => s_and_b32 $src, 1 << n
11138 // s_cmp_gt_i32 (s_and_b32 $src, 1 << n), 0 => s_and_b32 $src, 1 << n
11139 // s_cmp_lg_u64 (s_and_b64 $src, 1 << n), 0 => s_and_b64 $src, 1 << n
11140 //
11141 // Signed ge/gt are not used for the sign bit.
11142 //
11143 // If result of the AND is unused except in the compare:
11144 // s_and_b(32|64) $src, 1 << n => s_bitcmp1_b(32|64) $src, n
11145 //
11146 // s_cmp_eq_u32 (s_and_b32 $src, 1 << n), 0 => s_bitcmp0_b32 $src, n
11147 // s_cmp_eq_i32 (s_and_b32 $src, 1 << n), 0 => s_bitcmp0_b32 $src, n
11148 // s_cmp_eq_u64 (s_and_b64 $src, 1 << n), 0 => s_bitcmp0_b64 $src, n
11149 // s_cmp_lg_u32 (s_and_b32 $src, 1 << n), 1 << n => s_bitcmp0_b32 $src, n
11150 // s_cmp_lg_i32 (s_and_b32 $src, 1 << n), 1 << n => s_bitcmp0_b32 $src, n
11151 // s_cmp_lg_u64 (s_and_b64 $src, 1 << n), 1 << n => s_bitcmp0_b64 $src, n
11152
11153 MachineInstr *Def = MRI->getVRegDef(SrcReg);
11154 if (!Def)
11155 return false;
11156
11157 if (Def->getOpcode() != AMDGPU::S_AND_B32 &&
11158 Def->getOpcode() != AMDGPU::S_AND_B64)
11159 return false;
11160
11161 int64_t Mask;
11162 const auto isMask = [&Mask, SrcSize](const MachineOperand *MO) -> bool {
11163 if (MO->isImm())
11164 Mask = MO->getImm();
11165 else if (!getFoldableImm(MO, Mask))
11166 return false;
11167 Mask &= maxUIntN(SrcSize);
11168 return isPowerOf2_64(Mask);
11169 };
11170
11171 MachineOperand *SrcOp = &Def->getOperand(1);
11172 if (isMask(SrcOp))
11173 SrcOp = &Def->getOperand(2);
11174 else if (isMask(&Def->getOperand(2)))
11175 SrcOp = &Def->getOperand(1);
11176 else
11177 return false;
11178
11179 // A valid Mask is required to have a single bit set, hence a non-zero and
11180 // power-of-two value. This verifies that we will not do 64-bit shift below.
11181 assert(llvm::has_single_bit<uint64_t>(Mask) && "Invalid mask.");
11182 unsigned BitNo = llvm::countr_zero((uint64_t)Mask);
11183 if (IsSigned && BitNo == SrcSize - 1)
11184 return false;
11185
11186 ExpectedValue <<= BitNo;
11187
11188 bool IsReversedCC = false;
11189 if (CmpValue != ExpectedValue) {
11190 if (!IsReversible)
11191 return false;
11192 IsReversedCC = CmpValue == (ExpectedValue ^ Mask);
11193 if (!IsReversedCC)
11194 return false;
11195 }
11196
11197 Register DefReg = Def->getOperand(0).getReg();
11198 if (IsReversedCC && !MRI->hasOneNonDBGUse(DefReg))
11199 return false;
11200
11201 if (!optimizeSCC(Def, &CmpInstr, /*NeedInversion=*/false))
11202 return false;
11203
11204 if (!MRI->use_nodbg_empty(DefReg)) {
11205 assert(!IsReversedCC);
11206 return true;
11207 }
11208
11209 // Replace AND with unused result with a S_BITCMP.
11210 MachineBasicBlock *MBB = Def->getParent();
11211
11212 unsigned NewOpc = (SrcSize == 32) ? IsReversedCC ? AMDGPU::S_BITCMP0_B32
11213 : AMDGPU::S_BITCMP1_B32
11214 : IsReversedCC ? AMDGPU::S_BITCMP0_B64
11215 : AMDGPU::S_BITCMP1_B64;
11216
11217 BuildMI(*MBB, Def, Def->getDebugLoc(), get(NewOpc))
11218 .add(*SrcOp)
11219 .addImm(BitNo);
11220 Def->eraseFromParent();
11221
11222 return true;
11223 };
11224
11225 switch (CmpInstr.getOpcode()) {
11226 default:
11227 break;
11228 case AMDGPU::S_CMP_EQ_U32:
11229 case AMDGPU::S_CMP_EQ_I32:
11230 case AMDGPU::S_CMPK_EQ_U32:
11231 case AMDGPU::S_CMPK_EQ_I32:
11232 return optimizeCmpAnd(1, 32, true, false) ||
11233 optimizeCmpSelect(/*NeedInversion=*/true);
11234 case AMDGPU::S_CMP_GE_U32:
11235 case AMDGPU::S_CMPK_GE_U32:
11236 return optimizeCmpAnd(1, 32, false, false);
11237 case AMDGPU::S_CMP_GE_I32:
11238 case AMDGPU::S_CMPK_GE_I32:
11239 return optimizeCmpAnd(1, 32, false, true);
11240 case AMDGPU::S_CMP_EQ_U64:
11241 return optimizeCmpAnd(1, 64, true, false);
11242 case AMDGPU::S_CMP_LG_U32:
11243 case AMDGPU::S_CMP_LG_I32:
11244 case AMDGPU::S_CMPK_LG_U32:
11245 case AMDGPU::S_CMPK_LG_I32:
11246 return optimizeCmpAnd(0, 32, true, false) ||
11247 optimizeCmpSelect(/*NeedInversion=*/false);
11248 case AMDGPU::S_CMP_GT_U32:
11249 case AMDGPU::S_CMPK_GT_U32:
11250 return optimizeCmpAnd(0, 32, false, false);
11251 case AMDGPU::S_CMP_GT_I32:
11252 case AMDGPU::S_CMPK_GT_I32:
11253 return optimizeCmpAnd(0, 32, false, true);
11254 case AMDGPU::S_CMP_LG_U64:
11255 return optimizeCmpAnd(0, 64, true, false) ||
11256 optimizeCmpSelect(/*NeedInversion=*/false);
11257 }
11258
11259 return false;
11260}
11261
11263 AMDGPU::OpName OpName) const {
11264 if (!ST.needsAlignedVGPRs())
11265 return;
11266
11267 int OpNo = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OpName);
11268 if (OpNo < 0)
11269 return;
11270 MachineOperand &Op = MI.getOperand(OpNo);
11271 if (getOpSize(MI, OpNo) > 4)
11272 return;
11273
11274 // Add implicit aligned super-reg to force alignment on the data operand.
11275 const DebugLoc &DL = MI.getDebugLoc();
11276 MachineBasicBlock *BB = MI.getParent();
11278 Register DataReg = Op.getReg();
11279 bool IsAGPR = RI.isAGPR(MRI, DataReg);
11281 IsAGPR ? &AMDGPU::AGPR_32RegClass : &AMDGPU::VGPR_32RegClass);
11282 BuildMI(*BB, MI, DL, get(AMDGPU::IMPLICIT_DEF), Undef);
11283 Register NewVR =
11284 MRI.createVirtualRegister(IsAGPR ? &AMDGPU::AReg_64_Align2RegClass
11285 : &AMDGPU::VReg_64_Align2RegClass);
11286 BuildMI(*BB, MI, DL, get(AMDGPU::REG_SEQUENCE), NewVR)
11287 .addReg(DataReg, {}, Op.getSubReg())
11288 .addImm(AMDGPU::sub0)
11289 .addReg(Undef)
11290 .addImm(AMDGPU::sub1);
11291 Op.setReg(NewVR);
11292 Op.setSubReg(AMDGPU::sub0);
11293 MI.addOperand(MachineOperand::CreateReg(NewVR, false, true));
11294}
11295
11297 if (isIGLP(*MI))
11298 return false;
11299
11301}
11302
11304 if (!isWMMA(MI) && !isSWMMAC(MI))
11305 return false;
11306
11307 if (ST.hasGFX1250Insts())
11308 return AMDGPU::getWMMAIsXDL(MI.getOpcode());
11309
11310 return true;
11311}
11312
11314 unsigned Opcode = MI.getOpcode();
11315
11316 if (AMDGPU::isGFX12Plus(ST))
11317 return isDOT(MI) || isXDLWMMA(MI);
11318
11319 if (!isMAI(MI) || isDGEMM(Opcode) ||
11320 Opcode == AMDGPU::V_ACCVGPR_WRITE_B32_e64 ||
11321 Opcode == AMDGPU::V_ACCVGPR_READ_B32_e64)
11322 return false;
11323
11324 if (!ST.hasGFX940Insts())
11325 return true;
11326
11327 return AMDGPU::getMAIIsGFX940XDL(Opcode);
11328}
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
static const TargetRegisterClass * getRegClass(const MachineInstr &MI, Register Reg)
unsigned RegSize
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Contains the definition of a TargetInstrInfo class that is common to all AMD GPUs.
AMDGPU Register Bank Select
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
AMD GCN specific subclass of TargetSubtarget.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
std::pair< Instruction::BinaryOps, Value * > OffsetOp
Find all possible pairs (BinOp, RHS) that BinOp V, RHS can be simplified.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
static bool isUndef(const MachineInstr &MI)
TargetInstrInfo::RegSubRegPair RegSubRegPair
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
MachineInstr unsigned OpIdx
uint64_t High
uint64_t IntrinsicInst * II
#define P(N)
R600 Clause Merge
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
This file declares the machine register scavenger class.
static cl::opt< bool > Fix16BitCopies("amdgpu-fix-16-bit-physreg-copies", cl::desc("Fix copies between 32 and 16 bit registers by extending to 32 bit"), cl::init(true), cl::ReallyHidden)
static void expandSGPRCopy(const SIInstrInfo &TII, MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc, const TargetRegisterClass *RC, bool Forward)
static unsigned getNewFMAInst(const GCNSubtarget &ST, unsigned Opc)
static void indirectCopyToAGPR(const SIInstrInfo &TII, MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc, RegScavenger &RS, bool RegsOverlap, Register ImpDefSuperReg=Register(), Register ImpUseSuperReg=Register())
Handle copying from SGPR to AGPR, or from AGPR to AGPR on GFX908.
static unsigned getIndirectSGPRWriteMovRelPseudo32(unsigned VecSize)
static bool compareMachineOp(const MachineOperand &Op0, const MachineOperand &Op1)
static bool isStride64(unsigned Opc)
#define GENERATE_RENAMED_GFX9_CASES(OPCODE)
static std::tuple< unsigned, unsigned > extractRsrcPtr(const SIInstrInfo &TII, MachineInstr &MI, MachineOperand &Rsrc)
static bool followSubRegDef(MachineInstr &MI, TargetInstrInfo::RegSubRegPair &RSR)
static unsigned getIndirectSGPRWriteMovRelPseudo64(unsigned VecSize)
static MachineInstr * swapImmOperands(MachineInstr &MI, MachineOperand &NonRegOp1, MachineOperand &NonRegOp2)
static MachineBasicBlock * loadScalarOperandsFromVGPR(const SIInstrInfo &TII, MachineInstr &MI, ArrayRef< MachineOperand * > ScalarOps, MachineDominatorTree *MDT, MachineBasicBlock::iterator Begin=nullptr, MachineBasicBlock::iterator End=nullptr)
static void copyFlagsToImplicitVCC(MachineInstr &MI, const MachineOperand &Orig)
static void emitLoadScalarOpsFromVGPRLoop(const SIInstrInfo &TII, MachineRegisterInfo &MRI, MachineBasicBlock &LoopBB, MachineBasicBlock &BodyBB, const DebugLoc &DL, ArrayRef< MachineOperand * > ScalarOps)
static bool offsetsDoNotOverlap(LocationSize WidthA, int OffsetA, LocationSize WidthB, int OffsetB)
static unsigned getWWMRegSpillSaveOpcode(unsigned Size, bool IsVectorSuperClass)
static bool memOpsHaveSameBaseOperands(ArrayRef< const MachineOperand * > BaseOps1, ArrayRef< const MachineOperand * > BaseOps2)
static unsigned getWWMRegSpillRestoreOpcode(unsigned Size, bool IsVectorSuperClass)
static bool setsSCCIfResultIsZero(const MachineInstr &Def, bool &NeedInversion, unsigned &NewDefOpc)
static bool isSCCDeadOnExit(MachineBasicBlock *MBB)
static bool getFoldableImm(Register Reg, const MachineRegisterInfo &MRI, int64_t &Imm, MachineInstr **DefMI=nullptr)
static unsigned getIndirectVGPRWriteMovRelPseudoOpc(unsigned VecSize)
static unsigned subtargetEncodingFamily(const GCNSubtarget &ST)
static void preserveCondRegFlags(MachineOperand &CondReg, const MachineOperand &OrigCond)
static Register findImplicitSGPRRead(const MachineInstr &MI)
static unsigned getNewFMAAKInst(const GCNSubtarget &ST, unsigned Opc)
static cl::opt< unsigned > BranchOffsetBits("amdgpu-s-branch-bits", cl::ReallyHidden, cl::init(16), cl::desc("Restrict range of branch instructions (DEBUG)"))
static void updateLiveVariables(LiveVariables *LV, MachineInstr &MI, MachineInstr &NewMI)
static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, ArrayRef< const MachineOperand * > BaseOps1, const MachineInstr &MI2, ArrayRef< const MachineOperand * > BaseOps2)
static unsigned getSGPRSpillRestoreOpcode(unsigned Size)
static bool isRegOrFI(const MachineOperand &MO)
static unsigned getSGPRSpillSaveOpcode(unsigned Size)
static constexpr AMDGPU::OpName ModifierOpNames[]
static unsigned getVGPRSpillSaveOpcode(unsigned Size)
static void reportIllegalCopy(const SIInstrInfo *TII, MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc, const char *Msg="illegal VGPR to SGPR copy")
static MachineInstr * swapRegAndNonRegOperand(MachineInstr &MI, MachineOperand &RegOp, MachineOperand &NonRegOp)
static bool shouldReadExec(const MachineInstr &MI)
static unsigned getNewFMAMKInst(const GCNSubtarget &ST, unsigned Opc)
static bool isRenamedInGFX9(int Opcode)
static TargetInstrInfo::RegSubRegPair getRegOrUndef(const MachineOperand &RegOpnd)
static bool changesVGPRIndexingMode(const MachineInstr &MI)
static bool isSubRegOf(const SIRegisterInfo &TRI, const MachineOperand &SuperVec, const MachineOperand &SubReg)
static bool foldableSelect(const MachineInstr &Def)
static bool nodesHaveSameOperandValue(SDNode *N0, SDNode *N1, AMDGPU::OpName OpName)
Returns true if both nodes have the same value for the given operand Op, or if both nodes do not have...
static unsigned getAVSpillSaveOpcode(unsigned Size)
static unsigned getNumOperandsNoGlue(SDNode *Node)
static bool canRemat(const MachineInstr &MI)
static unsigned getAVSpillRestoreOpcode(unsigned Size)
static unsigned getVGPRSpillRestoreOpcode(unsigned Size)
Interface definition for SIInstrInfo.
bool IsDead
This file contains some templates that are useful if you are working with the STL at all.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:487
#define LLVM_DEBUG(...)
Definition Debug.h:114
static const LaneMaskConstants & get(const GCNSubtarget &ST)
static LLVM_ABI Semantics SemanticsToEnum(const llvm::fltSemantics &Sem)
Definition APFloat.cpp:145
Class for arbitrary precision integers.
Definition APInt.h:78
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1577
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
const T & front() const
front - Get the first element.
Definition ArrayRef.h:145
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
uint64_t getZExtValue() const
A debug info location.
Definition DebugLoc.h:123
Diagnostic information for unsupported feature in backend.
void changeImmediateDominator(DomTreeNodeBase< NodeT > *N, DomTreeNodeBase< NodeT > *NewIDom)
changeImmediateDominator - This method is used to update the dominator tree information when a node's...
DomTreeNodeBase< NodeT > * addNewBlock(NodeT *BB, NodeT *DomBB)
Add a new node to the dominator tree information.
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:272
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:358
CycleT * getCycle(const BlockT *Block) const
Find the innermost cycle containing a given block.
void getExitingBlocks(SmallVectorImpl< BlockT * > &TmpStorage) const
Return all blocks of this cycle that have successor outside of this cycle.
bool contains(const BlockT *Block) const
Return whether Block is contained in the cycle.
const GenericCycle * getParentCycle() const
Itinerary data supplied by a subtarget to be used by a target.
constexpr unsigned getAddressSpace() const
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
LiveInterval - This class represents the liveness of a register, or stack slot.
bool hasInterval(Register Reg) const
SlotIndex getInstructionIndex(const MachineInstr &Instr) const
Returns the base index of the given instruction.
LiveInterval & getInterval(Register Reg)
LLVM_ABI bool shrinkToUses(LiveInterval *li, SmallVectorImpl< MachineInstr * > *dead=nullptr)
After removing some uses of a register, shrink its live range to just the remaining uses.
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
This class represents the liveness of a register, stack slot, etc.
LLVM_ABI void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
LLVM_ABI VarInfo & getVarInfo(Register Reg)
getVarInfo - Return the VarInfo structure for the specified VIRTUAL register.
bool hasValue() const
static LocationSize precise(uint64_t Value)
TypeSize getValue() const
static const MCBinaryExpr * createAnd(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition MCExpr.h:348
static const MCBinaryExpr * createAShr(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition MCExpr.h:418
static const MCBinaryExpr * createSub(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition MCExpr.h:428
static LLVM_ABI const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition MCExpr.cpp:212
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
ArrayRef< MCOperandInfo > operands() const
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
unsigned getSize() const
Return the number of bytes in the encoding of this instruction, or zero if the encoding size cannot b...
ArrayRef< MCPhysReg > implicit_uses() const
Return a list of registers that are potentially read by any instance of this machine instruction.
unsigned getOpcode() const
Return the opcode number for this descriptor.
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition MCInstrDesc.h:86
uint8_t OperandType
Information about the type of the operand.
Definition MCInstrDesc.h:98
int16_t RegClass
This specifies the register class enumeration of the operand if the operand is a register.
Definition MCInstrDesc.h:92
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.h:214
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition MCSymbol.h:42
LLVM_ABI void setVariableValue(const MCExpr *Value)
Definition MCSymbol.cpp:50
Helper class for constructing bundles of MachineInstrs.
MachineBasicBlock::instr_iterator begin() const
Return an iterator to the first bundled instruction.
MIBundleBuilder & append(MachineInstr *MI)
Insert MI into MBB by appending it to the instructions in the bundle.
MIRFormater - Interface to format MIR operand based on target.
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
LLVM_ABI MCSymbol * getSymbol() const
Return the MCSymbol for this basic block.
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
LLVM_ABI iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
Instructions::const_iterator const_instr_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
iterator_range< succ_iterator > successors()
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
@ LQR_Dead
Register is known to be fully dead.
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
void push_back(MachineBasicBlock *MBB)
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addUse(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addDef(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register definition operand.
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
bool isCopy() const
const MachineBasicBlock * getParent() const
LLVM_ABI void addImplicitDefUseOperands(MachineFunction &MF)
Add all implicit def and use operands to this instruction.
bool isBundle() const
LLVM_ABI void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
LLVM_ABI unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
mop_range implicit_operands()
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
LLVM_ABI bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
void untieRegOperand(unsigned OpIdx)
Break any tie involving OpIdx.
LLVM_ABI void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
LLVM_ABI void eraseFromBundle()
Unlink 'this' from its basic block and delete it.
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
mop_range explicit_operands()
LLVM_ABI void tieOperands(unsigned DefIdx, unsigned UseIdx)
Add a tie between the register operands at DefIdx and UseIdx.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
LLVM_ABI bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
bool isMoveImmediate(QueryType Type=IgnoreBundle) const
Return true if this instruction is a move immediate (including conditional moves) instruction.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
LLVM_ABI void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
LLVM_ABI void setPostInstrSymbol(MachineFunction &MF, MCSymbol *Symbol)
Set a symbol that will be emitted just after the instruction itself.
LLVM_ABI void clearRegisterKills(Register Reg, const TargetRegisterInfo *RegInfo)
Clear all kill flags affecting Reg.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
LLVM_ABI int findRegisterDefOperandIdx(Register Reg, const TargetRegisterInfo *TRI, bool isDead=false, bool Overlap=false) const
Returns the operand index that is a def of the specified register or -1 if it is not found.
MachineOperand * findRegisterDefOperand(Register Reg, const TargetRegisterInfo *TRI, bool isDead=false, bool Overlap=false)
Wrapper for findRegisterDefOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
void setSubReg(unsigned subReg)
unsigned getSubReg() const
LLVM_ABI unsigned getOperandNo() const
Returns the index of this operand in the instruction that it belongs to.
const GlobalValue * getGlobal() const
void setImplicit(bool Val=true)
LLVM_ABI void ChangeToFrameIndex(int Idx, unsigned TargetFlags=0)
Replace this operand with a frame index.
void setImm(int64_t immVal)
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void setIsDead(bool Val=true)
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
LLVM_ABI void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
LLVM_ABI void ChangeToGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)
ChangeToGA - Replace this operand with a new global address operand.
void setIsKill(bool Val=true)
LLVM_ABI void ChangeToRegister(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isDebug=false)
ChangeToRegister - Replace this operand with a new register operand of the specified value.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
void setOffset(int64_t Offset)
unsigned getTargetFlags() const
static MachineOperand CreateImm(int64_t Val)
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
void setIsUndef(bool Val=true)
Register getReg() const
getReg - Returns the register number.
bool isTargetIndex() const
isTargetIndex - Tests if this is a MO_TargetIndex operand.
void setTargetFlags(unsigned F)
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
LLVM_ABI bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
@ MO_Immediate
Immediate operand.
@ MO_Register
Register operand.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
int64_t getOffset() const
Return the offset from the symbol in this operand.
bool isFPImm() const
isFPImm - Tests if this is a MO_FPImmediate operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI bool hasOneNonDBGUse(Register RegNo) const
hasOneNonDBGUse - Return true if there is exactly one non-Debug use of the specified register.
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
LLVM_ABI void clearKillFlags(Register Reg) const
clearKillFlags - Iterate over all the uses of the given register and clear the kill flag from the Mac...
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
iterator_range< use_nodbg_iterator > use_nodbg_operands(Register Reg) const
bool use_nodbg_empty(Register RegNo) const
use_nodbg_empty - Return true if there are no non-Debug instructions using the specified register.
LLVM_ABI void moveOperands(MachineOperand *Dst, MachineOperand *Src, unsigned NumOps)
Move NumOps operands from Src to Dst, updating use-def lists as needed.
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
bool reservedRegsFrozen() const
reservedRegsFrozen - Returns true after freezeReservedRegs() was called to ensure the set of reserved...
LLVM_ABI void clearVirtRegs()
clearVirtRegs - Remove all virtual registers (after physreg assignment).
iterator_range< use_instr_nodbg_iterator > use_nodbg_instructions(Register Reg) const
void setRegAllocationHint(Register VReg, unsigned Type, Register PrefReg)
setRegAllocationHint - Specify a register allocation hint for the specified virtual register.
LLVM_ABI void setRegClass(Register Reg, const TargetRegisterClass *RC)
setRegClass - Set the register class of the specified virtual register.
void setSimpleHint(Register VReg, Register PrefReg)
Specify the preferred (target independent) register allocation hint for the specified virtual registe...
const TargetRegisterInfo * getTargetRegisterInfo() const
LLVM_ABI Register cloneVirtualRegister(Register VReg, StringRef Name="")
Create and return a new virtual register in the function with the same attributes as the given regist...
LLVM_ABI const TargetRegisterClass * constrainRegClass(Register Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
iterator_range< use_iterator > use_operands(Register Reg) const
LLVM_ABI void removeRegOperandFromUseList(MachineOperand *MO)
Remove MO from its use-def list.
LLVM_ABI void replaceRegWith(Register FromReg, Register ToReg)
replaceRegWith - Replace all instances of FromReg with ToReg in the machine function.
LLVM_ABI void addRegOperandToUseList(MachineOperand *MO)
Add MO to the linked list of operands for its register.
LLVM_ABI MachineInstr * getUniqueVRegDef(Register Reg) const
getUniqueVRegDef - Return the unique machine instr that defines the specified virtual register or nul...
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
This class implements the register bank concept.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
MCRegister asMCReg() const
Utility to check-convert this value to a MCRegister.
Definition Register.h:107
constexpr bool isValid() const
Definition Register.h:112
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition Register.h:79
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
const SDValue & getOperand(unsigned Num) const
uint64_t getConstantOperandVal(unsigned Num) const
Helper method returns the integer value of a ConstantSDNode operand.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
bool isLegalMUBUFImmOffset(unsigned Imm) const
bool isInlineConstant(const APInt &Imm) const
void legalizeOperandsVOP3(MachineRegisterInfo &MRI, MachineInstr &MI) const
Fix operands in MI to satisfy constant bus requirements.
bool canAddToBBProlog(const MachineInstr &MI) const
static bool isDS(const MachineInstr &MI)
MachineBasicBlock * legalizeOperands(MachineInstr &MI, MachineDominatorTree *MDT=nullptr) const
Legalize all operands in this instruction.
bool areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1, int64_t &Offset0, int64_t &Offset1) const override
unsigned getLiveRangeSplitOpcode(Register Reg, const MachineFunction &MF) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &LdSt, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const final
Register isSGPRStackAccess(const MachineInstr &MI, int &FrameIndex) const
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
static bool isNeverUniform(const MachineInstr &MI)
bool isXDLWMMA(const MachineInstr &MI) const
bool isBasicBlockPrologue(const MachineInstr &MI, Register Reg=Register()) const override
bool isSpill(uint32_t Opcode) const
uint64_t getDefaultRsrcDataFormat() const
static bool isSOPP(const MachineInstr &MI)
InstructionUniformity getGenericInstructionUniformity(const MachineInstr &MI) const
bool mayAccessScratch(const MachineInstr &MI) const
bool isIGLP(unsigned Opcode) const
static bool isFLATScratch(const MachineInstr &MI)
const MCInstrDesc & getIndirectRegWriteMovRelPseudo(unsigned VecSize, unsigned EltSize, bool IsSGPR) const
MachineInstrBuilder getAddNoCarry(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register DestReg) const
Return a partially built integer add instruction without carry.
bool mayAccessFlatAddressSpace(const MachineInstr &MI) const
bool shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1, int64_t Offset0, int64_t Offset1, unsigned NumLoads) const override
bool splitMUBUFOffset(uint32_t Imm, uint32_t &SOffset, uint32_t &ImmOffset, Align Alignment=Align(4)) const
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
void moveToVALU(SIInstrWorklist &Worklist, MachineDominatorTree *MDT) const
Replace the instructions opcode with the equivalent VALU opcode.
static bool isSMRD(const MachineInstr &MI)
void restoreExec(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register Reg, SlotIndexes *Indexes=nullptr) const
bool usesConstantBus(const MachineRegisterInfo &MRI, const MachineOperand &MO, const MCOperandInfo &OpInfo) const
Returns true if this operand uses the constant bus.
static unsigned getMaxMUBUFImmOffset(const GCNSubtarget &ST)
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
static unsigned getFoldableCopySrcIdx(const MachineInstr &MI)
unsigned getOpSize(uint32_t Opcode, unsigned OpNo) const
Return the size in bytes of the operand OpNo on the given.
void legalizeOperandsFLAT(MachineRegisterInfo &MRI, MachineInstr &MI) const
bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t CmpMask, int64_t CmpValue, const MachineRegisterInfo *MRI) const override
static std::optional< int64_t > extractSubregFromImm(int64_t ImmVal, unsigned SubRegIndex)
Return the extracted immediate value in a subregister use from a constant materialized in a super reg...
Register isStackAccess(const MachineInstr &MI, int &FrameIndex) const
static bool isMTBUF(const MachineInstr &MI)
const MCInstrDesc & getIndirectGPRIDXPseudo(unsigned VecSize, bool IsIndirectSrc) const
void insertReturn(MachineBasicBlock &MBB) const
static bool isDGEMM(unsigned Opcode)
static bool isEXP(const MachineInstr &MI)
static bool isSALU(const MachineInstr &MI)
static bool setsSCCIfResultIsNonZero(const MachineInstr &MI)
const MIRFormatter * getMIRFormatter() const override
void legalizeGenericOperand(MachineBasicBlock &InsertMBB, MachineBasicBlock::iterator I, const TargetRegisterClass *DstRC, MachineOperand &Op, MachineRegisterInfo &MRI, const DebugLoc &DL) const
MachineInstr * buildShrunkInst(MachineInstr &MI, unsigned NewOpcode) const
unsigned getInstBundleSize(const MachineInstr &MI) const
static bool isVOP2(const MachineInstr &MI)
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify=false) const override
static bool isSDWA(const MachineInstr &MI)
InstructionUniformity getInstructionUniformity(const MachineInstr &MI) const final
const MCInstrDesc & getKillTerminatorFromPseudo(unsigned Opcode) const
void insertNoops(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned Quantity) const override
static bool isGather4(const MachineInstr &MI)
MachineInstr * getWholeWaveFunctionSetup(MachineFunction &MF) const
bool isLegalVSrcOperand(const MachineRegisterInfo &MRI, const MCOperandInfo &OpInfo, const MachineOperand &MO) const
Check if MO would be a valid operand for the given operand definition OpInfo.
static bool isDOT(const MachineInstr &MI)
MachineInstr * createPHISourceCopy(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt, const DebugLoc &DL, Register Src, unsigned SrcSubReg, Register Dst) const override
bool hasModifiers(unsigned Opcode) const
Return true if this instruction has any modifiers.
bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const override
static bool isSWMMAC(const MachineInstr &MI)
ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *II, const ScheduleDAGMI *DAG) const override
bool isWave32() const
bool isHighLatencyDef(int Opc) const override
void legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const
Legalize the OpIndex operand of this instruction by inserting a MOV.
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
static bool isVOPC(const MachineInstr &MI)
void removeModOperands(MachineInstr &MI) const
std::pair< int64_t, int64_t > splitFlatOffset(int64_t COffsetVal, unsigned AddrSpace, uint64_t FlatVariant) const
Split COffsetVal into {immediate offset field, remainder offset} values.
unsigned getVectorRegSpillRestoreOpcode(Register Reg, const TargetRegisterClass *RC, unsigned Size, const SIMachineFunctionInfo &MFI) const
bool isXDL(const MachineInstr &MI) const
static bool isVIMAGE(const MachineInstr &MI)
void enforceOperandRCAlignment(MachineInstr &MI, AMDGPU::OpName OpName) const
static bool isSOP2(const MachineInstr &MI)
static bool isGWS(const MachineInstr &MI)
bool isLegalAV64PseudoImm(uint64_t Imm) const
Check if this immediate value can be used for AV_MOV_B64_IMM_PSEUDO.
bool isNeverCoissue(MachineInstr &MI) const
static bool isBUF(const MachineInstr &MI)
bool hasModifiersSet(const MachineInstr &MI, AMDGPU::OpName OpName) const
const TargetRegisterClass * getPreferredSelectRegClass(unsigned Size) const
bool isLegalToSwap(const MachineInstr &MI, unsigned fromIdx, unsigned toIdx) const
static bool isFLATGlobal(const MachineInstr &MI)
bool isGlobalMemoryObject(const MachineInstr *MI) const override
static bool isVSAMPLE(const MachineInstr &MI)
bool isBufferSMRD(const MachineInstr &MI) const
static bool isKillTerminator(unsigned Opcode)
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx0, unsigned &SrcOpIdx1) const override
void insertScratchExecCopy(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register Reg, bool IsSCCLive, SlotIndexes *Indexes=nullptr) const
bool hasVALU32BitEncoding(unsigned Opcode) const
Return true if this 64-bit VALU instruction has a 32-bit encoding.
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, unsigned SubIdx, const MachineInstr &Orig) const override
unsigned getMovOpcode(const TargetRegisterClass *DstRC) const
unsigned buildExtractSubReg(MachineBasicBlock::iterator MI, MachineRegisterInfo &MRI, const MachineOperand &SuperReg, const TargetRegisterClass *SuperRC, unsigned SubIdx, const TargetRegisterClass *SubRC) const
void legalizeOperandsVOP2(MachineRegisterInfo &MRI, MachineInstr &MI) const
Legalize operands in MI by either commuting it or inserting a copy of src1.
bool foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, MachineRegisterInfo *MRI) const final
static bool isTRANS(const MachineInstr &MI)
static bool isImage(const MachineInstr &MI)
static bool isSOPK(const MachineInstr &MI)
const TargetRegisterClass * getOpRegClass(const MachineInstr &MI, unsigned OpNo) const
Return the correct register class for OpNo.
MachineBasicBlock * insertSimulatedTrap(MachineRegisterInfo &MRI, MachineBasicBlock &MBB, MachineInstr &MI, const DebugLoc &DL) const
Build instructions that simulate the behavior of a s_trap 2 instructions for hardware (namely,...
static unsigned getNonSoftWaitcntOpcode(unsigned Opcode)
static unsigned getDSShaderTypeValue(const MachineFunction &MF)
static bool isFoldableCopy(const MachineInstr &MI)
bool mayAccessLDSThroughFlat(const MachineInstr &MI) const
bool isIgnorableUse(const MachineOperand &MO) const override
static bool isMUBUF(const MachineInstr &MI)
bool expandPostRAPseudo(MachineInstr &MI) const override
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &CmpMask, int64_t &CmpValue) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, Register VReg, unsigned SubReg=0, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
static bool isSegmentSpecificFLAT(const MachineInstr &MI)
bool isReMaterializableImpl(const MachineInstr &MI) const override
static bool isVOP3(const MCInstrDesc &Desc)
bool physRegUsesConstantBus(const MachineOperand &Reg) const
static bool isF16PseudoScalarTrans(unsigned Opcode)
void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register DstReg, ArrayRef< MachineOperand > Cond, Register TrueReg, Register FalseReg) const override
bool mayAccessVMEMThroughFlat(const MachineInstr &MI) const
static bool isDPP(const MachineInstr &MI)
bool analyzeBranchImpl(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const
static bool isMFMA(const MachineInstr &MI)
bool isLowLatencyInstruction(const MachineInstr &MI) const
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
If the specific machine instruction is a instruction that moves/copies value from one register to ano...
void mutateAndCleanupImplicit(MachineInstr &MI, const MCInstrDesc &NewDesc) const
static bool isMAI(const MCInstrDesc &Desc)
static bool usesLGKM_CNT(const MachineInstr &MI)
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
void legalizeOperandsVALUt16(MachineInstr &Inst, MachineRegisterInfo &MRI) const
Fix operands in Inst to fix 16bit SALU to VALU lowering.
void moveToVALUImpl(SIInstrWorklist &Worklist, MachineDominatorTree *MDT, MachineInstr &Inst) const
bool isImmOperandLegal(const MCInstrDesc &InstDesc, unsigned OpNo, const MachineOperand &MO) const
bool canShrink(const MachineInstr &MI, const MachineRegisterInfo &MRI) const
const MachineOperand & getCalleeOperand(const MachineInstr &MI) const override
bool isAsmOnlyOpcode(int MCOp) const
Check if this instruction should only be used by assembler.
bool isAlwaysGDS(uint32_t Opcode) const
static bool isVGPRSpill(const MachineInstr &MI)
ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, const ScheduleDAG *DAG) const override
This is used by the post-RA scheduler (SchedulePostRAList.cpp).
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
bool isLegalFLATOffset(int64_t Offset, unsigned AddrSpace, uint64_t FlatVariant) const
Returns if Offset is legal for the subtarget as the offset to a FLAT encoded instruction with the giv...
unsigned getInstrLatency(const InstrItineraryData *ItinData, const MachineInstr &MI, unsigned *PredCost=nullptr) const override
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
int64_t getNamedImmOperand(const MachineInstr &MI, AMDGPU::OpName OperandName) const
Get required immediate operand.
ArrayRef< std::pair< int, const char * > > getSerializableTargetIndices() const override
bool regUsesConstantBus(const MachineOperand &Reg, const MachineRegisterInfo &MRI) const
static bool isMIMG(const MachineInstr &MI)
MachineOperand buildExtractSubRegOrImm(MachineBasicBlock::iterator MI, MachineRegisterInfo &MRI, const MachineOperand &SuperReg, const TargetRegisterClass *SuperRC, unsigned SubIdx, const TargetRegisterClass *SubRC) const
bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
bool isLegalRegOperand(const MachineRegisterInfo &MRI, const MCOperandInfo &OpInfo, const MachineOperand &MO) const
Check if MO (a register operand) is a legal register for the given operand description or operand ind...
bool allowNegativeFlatOffset(uint64_t FlatVariant) const
Returns true if negative offsets are allowed for the given FlatVariant.
static unsigned getNumWaitStates(const MachineInstr &MI)
Return the number of wait states that result from executing this instruction.
unsigned getVectorRegSpillSaveOpcode(Register Reg, const TargetRegisterClass *RC, unsigned Size, const SIMachineFunctionInfo &MFI) const
unsigned getVALUOp(const MachineInstr &MI) const
static bool modifiesModeRegister(const MachineInstr &MI)
Return true if the instruction modifies the mode register.q.
Register readlaneVGPRToSGPR(Register SrcReg, MachineInstr &UseMI, MachineRegisterInfo &MRI, const TargetRegisterClass *DstRC=nullptr) const
Copy a value from a VGPR (SrcReg) to SGPR.
bool hasDivergentBranch(const MachineBasicBlock *MBB) const
Return whether the block terminate with divergent branch.
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
void fixImplicitOperands(MachineInstr &MI) const
bool moveFlatAddrToVGPR(MachineInstr &Inst) const
Change SADDR form of a FLAT Inst to its VADDR form if saddr operand was moved to VGPR.
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DestReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
bool swapSourceModifiers(MachineInstr &MI, MachineOperand &Src0, AMDGPU::OpName Src0OpName, MachineOperand &Src1, AMDGPU::OpName Src1OpName) const
Register insertNE(MachineBasicBlock *MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register SrcReg, int Value) const
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
bool hasUnwantedEffectsWhenEXECEmpty(const MachineInstr &MI) const
This function is used to determine if an instruction can be safely executed under EXEC = 0 without ha...
bool getConstValDefinedInReg(const MachineInstr &MI, const Register Reg, int64_t &ImmVal) const override
static bool isAtomic(const MachineInstr &MI)
bool canInsertSelect(const MachineBasicBlock &MBB, ArrayRef< MachineOperand > Cond, Register DstReg, Register TrueReg, Register FalseReg, int &CondCycles, int &TrueCycles, int &FalseCycles) const override
bool isLiteralOperandLegal(const MCInstrDesc &InstDesc, const MCOperandInfo &OpInfo) const
static bool isWWMRegSpillOpcode(uint32_t Opcode)
static bool sopkIsZext(unsigned Opcode)
static bool isSGPRSpill(const MachineInstr &MI)
static bool isWMMA(const MachineInstr &MI)
ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags() const override
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
bool mayReadEXEC(const MachineRegisterInfo &MRI, const MachineInstr &MI) const
Returns true if the instruction could potentially depend on the value of exec.
void legalizeOperandsSMRD(MachineRegisterInfo &MRI, MachineInstr &MI) const
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
void insertVectorSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register DstReg, ArrayRef< MachineOperand > Cond, Register TrueReg, Register FalseReg) const
void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const override
std::pair< MachineInstr *, MachineInstr * > expandMovDPP64(MachineInstr &MI) const
Register insertEQ(MachineBasicBlock *MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register SrcReg, int Value) const
static bool isSOPC(const MachineInstr &MI)
static bool isFLAT(const MachineInstr &MI)
static bool isVALU(const MachineInstr &MI)
bool isBarrier(unsigned Opcode) const
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx0, unsigned OpIdx1) const override
int pseudoToMCOpcode(int Opcode) const
Return a target-specific opcode if Opcode is a pseudo instruction.
const MCInstrDesc & getMCOpcodeFromPseudo(unsigned Opcode) const
Return the descriptor of the target-specific machine instruction that corresponds to the specified ps...
bool isLegalGFX12PlusPackedMathFP32Operand(const MachineRegisterInfo &MRI, const MachineInstr &MI, unsigned SrcN, const MachineOperand *MO=nullptr) const
Check if MO would be a legal operand for gfx12+ packed math FP32 instructions.
static bool usesVM_CNT(const MachineInstr &MI)
MachineInstr * createPHIDestinationCopy(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt, const DebugLoc &DL, Register Src, Register Dst) const override
static bool isFixedSize(const MachineInstr &MI)
bool isSafeToSink(MachineInstr &MI, MachineBasicBlock *SuccToSinkTo, MachineCycleInfo *CI) const override
LLVM_READONLY int commuteOpcode(unsigned Opc) const
uint64_t getScratchRsrcWords23() const
LLVM_READONLY MachineOperand * getNamedOperand(MachineInstr &MI, AMDGPU::OpName OperandName) const
Returns the operand named Op.
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
bool isOperandLegal(const MachineInstr &MI, unsigned OpIdx, const MachineOperand *MO=nullptr) const
Check if MO is a legal operand if it was the OpIdx Operand for MI.
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
std::optional< int64_t > getImmOrMaterializedImm(MachineOperand &Op) const
static bool isLDSDMA(const MachineInstr &MI)
static bool isVOP1(const MachineInstr &MI)
SIInstrInfo(const GCNSubtarget &ST)
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
bool hasAnyModifiersSet(const MachineInstr &MI) const
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
void setHasSpilledVGPRs(bool Spill=true)
bool isWWMReg(Register Reg) const
bool checkFlag(Register Reg, uint8_t Flag) const
void setHasSpilledSGPRs(bool Spill=true)
unsigned getScratchReservedForDynamicVGPRs() const
static unsigned getSubRegFromChannel(unsigned Channel, unsigned NumRegs=1)
ArrayRef< int16_t > getRegSplitParts(const TargetRegisterClass *RC, unsigned EltSize) const
unsigned getHWRegIndex(MCRegister Reg) const
unsigned getRegPressureLimit(const TargetRegisterClass *RC, MachineFunction &MF) const override
unsigned getChannelFromSubReg(unsigned SubReg) const
static bool isAGPRClass(const TargetRegisterClass *RC)
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
virtual bool hasVRegLiveness() const
Return true if this DAG supports VReg liveness and RegPressure.
MachineFunction & MF
Machine function.
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
SlotIndex - An opaque wrapper around machine indexes.
Definition SlotIndexes.h:66
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
SlotIndexes pass.
SlotIndex insertMachineInstrInMaps(MachineInstr &MI, bool Late=false)
Insert the given machine instruction into the mapping.
Implements a dense probed hash-table based set with some number of buckets stored inline.
Definition DenseSet.h:291
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
int64_t getImm() const
Register getReg() const
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
virtual ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *, const ScheduleDAGMI *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual MachineInstr * createPHIDestinationCopy(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt, const DebugLoc &DL, Register Src, Register Dst) const
During PHI eleimination lets target to make necessary checks and insert the copy to the PHI destinati...
virtual bool isReMaterializableImpl(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual const MachineOperand & getCalleeOperand(const MachineInstr &MI) const
Returns the callee operand from the given MI.
virtual void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, unsigned SubIdx, const MachineInstr &Orig) const
Re-issue the specified 'original' instruction at the specific location targeting a new destination re...
virtual MachineInstr * createPHISourceCopy(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt, const DebugLoc &DL, Register Src, unsigned SrcSubReg, Register Dst) const
During PHI eleimination lets target to make necessary checks and insert the copy to the PHI destinati...
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual bool isGlobalMemoryObject(const MachineInstr *MI) const
Returns true if MI is an instruction we are unable to reason about (like a call or something with unm...
virtual bool expandPostRAPseudo(MachineInstr &MI) const
This function is called for all pseudo instructions that remain after register allocation.
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
bool hasSuperClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a super-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
LLVM Value Representation.
Definition Value.h:75
std::pair< iterator, bool > insert(const ValueT &V)
Definition DenseSet.h:202
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
Definition DenseSet.h:180
self_iterator getIterator()
Definition ilist_node.h:123
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ REGION_ADDRESS
Address space for region memory. (GDS)
@ LOCAL_ADDRESS
Address space for local memory.
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
unsigned encodeFieldSaSdst(unsigned Encoded, unsigned SaSdst)
bool isPackedFP32Inst(unsigned Opc)
bool isInlinableLiteralBF16(int16_t Literal, bool HasInv2Pi)
const uint64_t RSRC_DATA_FORMAT
bool isPKFMACF16InlineConstant(uint32_t Literal, bool IsGFX11Plus)
LLVM_READONLY const MIMGInfo * getMIMGInfo(unsigned Opc)
bool isInlinableLiteralFP16(int16_t Literal, bool HasInv2Pi)
bool getWMMAIsXDL(unsigned Opc)
unsigned mapWMMA2AddrTo3AddrOpcode(unsigned Opc)
bool isInlinableLiteralV2I16(uint32_t Literal)
bool isHi16Reg(MCRegister Reg, const MCRegisterInfo &MRI)
bool isInlinableLiteralV2BF16(uint32_t Literal)
LLVM_READONLY int32_t getCommuteRev(uint32_t Opcode)
LLVM_READONLY int32_t getCommuteOrig(uint32_t Opcode)
unsigned getNumFlatOffsetBits(const MCSubtargetInfo &ST)
For pre-GFX12 FLAT instructions the offset must be positive; MSB is ignored and forced to zero.
bool isGFX12Plus(const MCSubtargetInfo &STI)
bool isInlinableLiteralV2F16(uint32_t Literal)
bool isValid32BitLiteral(uint64_t Val, bool IsFP64)
LLVM_READONLY int32_t getGlobalVaddrOp(uint32_t Opcode)
LLVM_READNONE bool isLegalDPALU_DPPControl(const MCSubtargetInfo &ST, unsigned DC)
LLVM_READONLY int32_t getMFMAEarlyClobberOp(uint32_t Opcode)
bool getMAIIsGFX940XDL(unsigned Opc)
const uint64_t RSRC_ELEMENT_SIZE_SHIFT
bool isIntrinsicAlwaysUniform(unsigned IntrID)
LLVM_READONLY bool hasNamedOperand(uint64_t Opcode, OpName NamedIdx)
LLVM_READONLY int32_t getIfAddr64Inst(uint32_t Opcode)
Check if Opcode is an Addr64 opcode.
LLVM_READONLY const MIMGDimInfo * getMIMGDimInfoByEncoding(uint8_t DimEnc)
bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi)
const uint64_t RSRC_TID_ENABLE
LLVM_READONLY int32_t getVOPe32(uint32_t Opcode)
bool isIntrinsicSourceOfDivergence(unsigned IntrID)
constexpr bool isSISrcOperand(const MCOperandInfo &OpInfo)
Is this an AMDGPU specific source operand?
bool isGenericAtomic(unsigned Opc)
LLVM_READNONE bool isInlinableIntLiteral(int64_t Literal)
Is this literal inlinable, and not one of the values intended for floating point values.
unsigned getAddrSizeMIMGOp(const MIMGBaseOpcodeInfo *BaseOpcode, const MIMGDimInfo *Dim, bool IsA16, bool IsG16Supported)
LLVM_READONLY int32_t getAddr64Inst(uint32_t Opcode)
int32_t getMCOpcode(uint32_t Opcode, unsigned Gen)
@ OPERAND_KIMM32
Operand with 32-bit immediate that uses the constant bus.
Definition SIDefines.h:234
@ OPERAND_REG_IMM_INT64
Definition SIDefines.h:204
@ OPERAND_REG_IMM_V2FP16
Definition SIDefines.h:211
@ OPERAND_REG_INLINE_C_FP64
Definition SIDefines.h:225
@ OPERAND_REG_INLINE_C_BF16
Definition SIDefines.h:222
@ OPERAND_REG_INLINE_C_V2BF16
Definition SIDefines.h:227
@ OPERAND_REG_IMM_V2INT16
Definition SIDefines.h:213
@ OPERAND_REG_IMM_BF16
Definition SIDefines.h:208
@ OPERAND_REG_IMM_INT32
Operands with register, 32-bit, or 64-bit immediate.
Definition SIDefines.h:203
@ OPERAND_REG_IMM_V2BF16
Definition SIDefines.h:210
@ OPERAND_REG_IMM_FP16
Definition SIDefines.h:209
@ OPERAND_REG_IMM_V2FP16_SPLAT
Definition SIDefines.h:212
@ OPERAND_REG_INLINE_C_INT64
Definition SIDefines.h:221
@ OPERAND_REG_INLINE_C_INT16
Operands with register or inline constant.
Definition SIDefines.h:219
@ OPERAND_REG_IMM_NOINLINE_V2FP16
Definition SIDefines.h:214
@ OPERAND_REG_IMM_FP64
Definition SIDefines.h:207
@ OPERAND_REG_INLINE_C_V2FP16
Definition SIDefines.h:228
@ OPERAND_REG_INLINE_AC_INT32
Operands with an AccVGPR register or inline constant.
Definition SIDefines.h:239
@ OPERAND_REG_INLINE_AC_FP32
Definition SIDefines.h:240
@ OPERAND_REG_IMM_V2INT32
Definition SIDefines.h:215
@ OPERAND_SDWA_VOPC_DST
Definition SIDefines.h:251
@ OPERAND_REG_IMM_FP32
Definition SIDefines.h:206
@ OPERAND_REG_INLINE_C_FP32
Definition SIDefines.h:224
@ OPERAND_REG_INLINE_C_INT32
Definition SIDefines.h:220
@ OPERAND_REG_INLINE_C_V2INT16
Definition SIDefines.h:226
@ OPERAND_INLINE_C_AV64_PSEUDO
Definition SIDefines.h:245
@ OPERAND_REG_IMM_V2FP32
Definition SIDefines.h:216
@ OPERAND_REG_INLINE_AC_FP64
Definition SIDefines.h:241
@ OPERAND_REG_INLINE_C_FP16
Definition SIDefines.h:223
@ OPERAND_REG_IMM_INT16
Definition SIDefines.h:205
@ OPERAND_INLINE_SPLIT_BARRIER_INT32
Definition SIDefines.h:231
LLVM_READONLY int32_t getBasicFromSDWAOp(uint32_t Opcode)
bool isDPALU_DPP(const MCInstrDesc &OpDesc, const MCInstrInfo &MII, const MCSubtargetInfo &ST)
@ TI_SCRATCH_RSRC_DWORD1
Definition AMDGPU.h:598
@ TI_SCRATCH_RSRC_DWORD3
Definition AMDGPU.h:600
@ TI_SCRATCH_RSRC_DWORD0
Definition AMDGPU.h:597
@ TI_SCRATCH_RSRC_DWORD2
Definition AMDGPU.h:599
@ TI_CONSTDATA_START
Definition AMDGPU.h:596
unsigned getRegBitWidth(const TargetRegisterClass &RC)
Get the size in bits of a register from the register class RC.
bool supportsScaleOffset(const MCInstrInfo &MII, unsigned Opcode)
const uint64_t RSRC_INDEX_STRIDE_SHIFT
LLVM_READONLY const MIMGBaseOpcodeInfo * getMIMGBaseOpcodeInfo(unsigned BaseOpcode)
LLVM_READONLY int32_t getFlatScratchInstSVfromSS(uint32_t Opcode)
bool isInlinableLiteralI16(int32_t Literal, bool HasInv2Pi)
LLVM_READNONE constexpr bool isGraphics(CallingConv::ID CC)
bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi)
Is this literal inlinable.
@ AMDGPU_CS
Used for Mesa/AMDPAL compute shaders.
@ AMDGPU_VS
Used for Mesa vertex shaders, or AMDPAL last shader stage before rasterization (vertex shader if tess...
@ AMDGPU_KERNEL
Used for AMDGPU code object kernels.
@ AMDGPU_HS
Used for Mesa/AMDPAL hull shaders (= tessellation control shaders).
@ AMDGPU_GS
Used for Mesa/AMDPAL geometry shaders.
@ AMDGPU_PS
Used for Mesa/AMDPAL pixel shaders.
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition CallingConv.h:41
@ AMDGPU_ES
Used for AMDPAL shader stage before geometry shader if geometry is in use.
@ AMDGPU_LS
Used for AMDPAL vertex shader if tessellation is in use.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ OPERAND_GENERIC_4
Definition MCInstrDesc.h:71
@ OPERAND_GENERIC_2
Definition MCInstrDesc.h:69
@ OPERAND_GENERIC_1
Definition MCInstrDesc.h:68
@ OPERAND_GENERIC_3
Definition MCInstrDesc.h:70
@ OPERAND_IMMEDIATE
Definition MCInstrDesc.h:61
@ OPERAND_GENERIC_0
Definition MCInstrDesc.h:67
@ OPERAND_GENERIC_5
Definition MCInstrDesc.h:72
Not(const Pred &P) -> Not< Pred >
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Definition Threading.h:280
@ Offset
Definition DWP.cpp:532
LLVM_ABI void finalizeBundle(MachineBasicBlock &MBB, MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
finalizeBundle - Finalize a machine instruction bundle which includes a sequence of instructions star...
TargetInstrInfo::RegSubRegPair getRegSubRegPair(const MachineOperand &O)
Create RegSubRegPair from a register MachineOperand.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1739
constexpr uint64_t maxUIntN(uint64_t N)
Gets the maximum value for a N-bit unsigned integer.
Definition MathExtras.h:207
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
bool execMayBeModifiedBeforeUse(const MachineRegisterInfo &MRI, Register VReg, const MachineInstr &DefMI, const MachineInstr &UseMI)
Return false if EXEC is not changed between the def of VReg at DefMI and the use at UseMI.
RegState
Flags to represent properties of register accesses.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Dead
Unused definition.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
@ Define
Register definition.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2554
constexpr RegState getKillRegState(bool B)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:634
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
Definition MathExtras.h:546
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:284
Op::Description Desc
constexpr int popcount(T Value) noexcept
Count the number of set bits in a value.
Definition bit.h:154
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:202
TargetInstrInfo::RegSubRegPair getRegSequenceSubReg(MachineInstr &MI, unsigned SubReg)
Return the SubReg component from REG_SEQUENCE.
static const MachineMemOperand::Flags MONoClobber
Mark the MMO of a uniform load if there are no potentially clobbering stores on any path from the sta...
Definition SIInstrInfo.h:44
constexpr bool has_single_bit(T Value) noexcept
Definition bit.h:147
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1746
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
auto reverse(ContainerTy &&C)
Definition STLExtras.h:408
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
MachineInstr * getVRegSubRegDef(const TargetInstrInfo::RegSubRegPair &P, const MachineRegisterInfo &MRI)
Return the defining instruction for a given reg:subreg pair skipping copy like instructions and subre...
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
constexpr uint32_t Hi_32(uint64_t Value)
Return the high 32 bits of a 64 bit value.
Definition MathExtras.h:150
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
constexpr uint32_t Lo_32(uint64_t Value)
Return the low 32 bits of a 64 bit value.
Definition MathExtras.h:155
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI VirtRegInfo AnalyzeVirtRegInBundle(MachineInstr &MI, Register Reg, SmallVectorImpl< std::pair< MachineInstr *, unsigned > > *Ops=nullptr)
AnalyzeVirtRegInBundle - Analyze how the current instruction or bundle uses a virtual register.
static const MachineMemOperand::Flags MOCooperative
Mark the MMO of cooperative load/store atomics.
Definition SIInstrInfo.h:52
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
Definition MathExtras.h:394
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
FunctionAddr VTableAddr uintptr_t uintptr_t Data
Definition InstrProf.h:189
@ Xor
Bitwise or logical XOR of integers.
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
bool isTargetSpecificOpcode(unsigned Opcode)
Check whether the given Opcode is a target-specific opcode.
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned DefaultMemoryClusterDWordsLimit
Definition SIInstrInfo.h:40
constexpr unsigned BitWidth
constexpr bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
Definition MathExtras.h:248
static const MachineMemOperand::Flags MOLastUse
Mark the MMO of a load as the last use.
Definition SIInstrInfo.h:48
constexpr T reverseBits(T Val)
Reverse the bits in Val.
Definition MathExtras.h:118
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1947
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
Definition MathExtras.h:572
constexpr T maskTrailingOnes(unsigned N)
Create a bitmask with the N right-most bits set to 1, and all other bits set to 0.
Definition MathExtras.h:77
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
InstructionUniformity
Enum describing how instructions behave with respect to uniformity and divergence,...
Definition Uniformity.h:18
@ AlwaysUniform
The result values are always uniform.
Definition Uniformity.h:23
@ NeverUniform
The result values can never be assumed to be uniform.
Definition Uniformity.h:26
@ Default
The result values are uniform if and only if all operands are uniform.
Definition Uniformity.h:20
constexpr RegState getUndefRegState(bool B)
GenericCycleInfo< MachineSSAContext > MachineCycleInfo
MachineCycleInfo::CycleT MachineCycle
static const MachineMemOperand::Flags MOThreadPrivate
Mark the MMO of accesses to memory locations that are never written to by other threads.
Definition SIInstrInfo.h:57
bool execMayBeModifiedBeforeAnyUse(const MachineRegisterInfo &MRI, Register VReg, const MachineInstr &DefMI)
Return false if EXEC is not changed between the def of VReg at DefMI and all its uses.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define N
Helper struct for the implementation of 3-address conversion to communicate updates made to instructi...
MachineInstr * RemoveMIUse
Other instruction whose def is no longer used by the converted instruction.
static constexpr uint64_t encode(Fields... Values)
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
SparseBitVector AliveBlocks
AliveBlocks - Set of blocks in which this value is alive completely through.
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Utility to store machine instructions worklist.
Definition SIInstrInfo.h:61
MachineInstr * top() const
Definition SIInstrInfo.h:66
bool isDeferred(MachineInstr *MI)
SetVector< MachineInstr * > & getDeferredList()
Definition SIInstrInfo.h:85
void insert(MachineInstr *MI)
A pair composed of a register and a sub-register index.
VirtRegInfo - Information about a virtual register used by a set of operands.
bool Reads
Reads - One of the operands read the virtual register.
bool Writes
Writes - One of the operands writes the virtual register.