LLVM 23.0.0git
AArch64AsmBackend.cpp
Go to the documentation of this file.
1//===-- AArch64AsmBackend.cpp - AArch64 Assembler Backend -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
15#include "llvm/MC/MCAssembler.h"
16#include "llvm/MC/MCContext.h"
22#include "llvm/MC/MCValue.h"
27using namespace llvm;
28
29namespace {
30
31class AArch64AsmBackend : public MCAsmBackend {
32protected:
33 Triple TheTriple;
34
35public:
36 AArch64AsmBackend(const Target &T, const Triple &TT, bool IsLittleEndian)
37 : MCAsmBackend(IsLittleEndian ? llvm::endianness::little
38 : llvm::endianness::big),
39 TheTriple(TT) {}
40
41
42 std::optional<MCFixupKind> getFixupKind(StringRef Name) const override;
43
44 MCFixupKindInfo getFixupKindInfo(MCFixupKind Kind) const override {
45 const static MCFixupKindInfo Infos[AArch64::NumTargetFixupKinds] = {
46 // This table *must* be in the order that the fixup_* kinds are defined
47 // in AArch64FixupKinds.h.
48 //
49 // Name Offset (bits) Size (bits) Flags
50 {"fixup_aarch64_pcrel_adr_imm21", 0, 32, 0},
51 {"fixup_aarch64_pcrel_adrp_imm21", 0, 32, 0},
52 {"fixup_aarch64_add_imm12", 10, 12, 0},
53 {"fixup_aarch64_ldst_imm12_scale1", 10, 12, 0},
54 {"fixup_aarch64_ldst_imm12_scale2", 10, 12, 0},
55 {"fixup_aarch64_ldst_imm12_scale4", 10, 12, 0},
56 {"fixup_aarch64_ldst_imm12_scale8", 10, 12, 0},
57 {"fixup_aarch64_ldst_imm12_scale16", 10, 12, 0},
58 {"fixup_aarch64_ldr_pcrel_imm19", 5, 19, 0},
59 {"fixup_aarch64_movw", 5, 16, 0},
60 {"fixup_aarch64_pcrel_branch9", 5, 9, 0},
61 {"fixup_aarch64_pcrel_branch14", 5, 14, 0},
62 {"fixup_aarch64_pcrel_branch16", 5, 16, 0},
63 {"fixup_aarch64_pcrel_branch19", 5, 19, 0},
64 {"fixup_aarch64_pcrel_branch26", 0, 26, 0},
65 {"fixup_aarch64_pcrel_call26", 0, 26, 0}};
66
67 // Fixup kinds from raw relocation types and .reloc directives force
68 // relocations and do not need these fields.
69 if (mc::isRelocation(Kind))
70 return {};
71
72 if (Kind < FirstTargetFixupKind)
74
75 assert(unsigned(Kind - FirstTargetFixupKind) <
77 "Invalid kind!");
78 return Infos[Kind - FirstTargetFixupKind];
79 }
80
81 void applyFixup(const MCFragment &, const MCFixup &, const MCValue &Target,
82 uint8_t *Data, uint64_t Value, bool IsResolved) override;
83
84 bool writeNopData(raw_ostream &OS, uint64_t Count,
85 const MCSubtargetInfo *STI) const override;
86
87 unsigned getFixupKindContainereSizeInBytes(unsigned Kind) const;
88};
89
90} // end anonymous namespace
91
92/// The number of bytes the fixup may change.
131
132static unsigned AdrImmBits(unsigned Value) {
133 unsigned lo2 = Value & 0x3;
134 unsigned hi19 = (Value & 0x1ffffc) >> 2;
135 return (hi19 << 5) | (lo2 << 29);
136}
137
140 const Triple &TheTriple, bool IsResolved) {
141 int64_t SignedValue = static_cast<int64_t>(Value);
142 switch (Fixup.getKind()) {
143 default:
144 llvm_unreachable("Unknown fixup kind!");
146 if (!isInt<21>(SignedValue))
147 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
148 return AdrImmBits(Value & 0x1fffffULL);
150 assert(!IsResolved);
151 if (TheTriple.isOSBinFormatCOFF()) {
152 if (!isInt<21>(SignedValue))
153 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
154 return AdrImmBits(Value & 0x1fffffULL);
155 }
156 return AdrImmBits((Value & 0x1fffff000ULL) >> 12);
159 // Signed 19-bit immediate which gets multiplied by 4
160 if (!isInt<21>(SignedValue))
161 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
162 if (Value & 0x3)
163 Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
164 // Low two bits are not encoded.
165 return (Value >> 2) & 0x7ffff;
168 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
169 Value &= 0xfff;
170 // Unsigned 12-bit immediate
171 if (!isUInt<12>(Value))
172 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
173 return Value;
175 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
176 Value &= 0xfff;
177 // Unsigned 12-bit immediate which gets multiplied by 2
178 if (!isUInt<13>(Value))
179 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
180 if (Value & 0x1)
181 Ctx.reportError(Fixup.getLoc(), "fixup must be 2-byte aligned");
182 return Value >> 1;
184 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
185 Value &= 0xfff;
186 // Unsigned 12-bit immediate which gets multiplied by 4
187 if (!isUInt<14>(Value))
188 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
189 if (Value & 0x3)
190 Ctx.reportError(Fixup.getLoc(), "fixup must be 4-byte aligned");
191 return Value >> 2;
193 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
194 Value &= 0xfff;
195 // Unsigned 12-bit immediate which gets multiplied by 8
196 if (!isUInt<15>(Value))
197 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
198 if (Value & 0x7)
199 Ctx.reportError(Fixup.getLoc(), "fixup must be 8-byte aligned");
200 return Value >> 3;
202 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
203 Value &= 0xfff;
204 // Unsigned 12-bit immediate which gets multiplied by 16
205 if (!isUInt<16>(Value))
206 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
207 if (Value & 0xf)
208 Ctx.reportError(Fixup.getLoc(), "fixup must be 16-byte aligned");
209 return Value >> 4;
211 AArch64::Specifier RefKind =
212 static_cast<AArch64::Specifier>(Target.getSpecifier());
213 if (AArch64::getSymbolLoc(RefKind) != AArch64::S_ABS &&
215 if (!RefKind) {
216 // The fixup is an expression
217 if (SignedValue > 0xFFFF || SignedValue < -0xFFFF)
218 Ctx.reportError(Fixup.getLoc(),
219 "fixup value out of range [-0xFFFF, 0xFFFF]");
220
221 // Invert the negative immediate because it will feed into a MOVN.
222 if (SignedValue < 0)
223 SignedValue = ~SignedValue;
224 Value = static_cast<uint64_t>(SignedValue);
225 } else
226 // VK_GOTTPREL, VK_TPREL, VK_DTPREL are movw fixups, but they can't
227 // ever be resolved in the assembler.
228 Ctx.reportError(Fixup.getLoc(),
229 "relocation for a thread-local variable points to an "
230 "absolute symbol");
231 return Value;
232 }
233
234 if (!IsResolved) {
235 // FIXME: Figure out when this can actually happen, and verify our
236 // behavior.
237 Ctx.reportError(Fixup.getLoc(), "unresolved movw fixup not yet "
238 "implemented");
239 return Value;
240 }
241
242 if (AArch64::getSymbolLoc(RefKind) == AArch64::S_SABS) {
243 switch (AArch64::getAddressFrag(RefKind)) {
244 case AArch64::S_G0:
245 break;
246 case AArch64::S_G1:
247 SignedValue = SignedValue >> 16;
248 break;
249 case AArch64::S_G2:
250 SignedValue = SignedValue >> 32;
251 break;
252 case AArch64::S_G3:
253 SignedValue = SignedValue >> 48;
254 break;
255 default:
256 llvm_unreachable("Variant kind doesn't correspond to fixup");
257 }
258
259 } else {
260 switch (AArch64::getAddressFrag(RefKind)) {
261 case AArch64::S_G0:
262 break;
263 case AArch64::S_G1:
264 Value = Value >> 16;
265 break;
266 case AArch64::S_G2:
267 Value = Value >> 32;
268 break;
269 case AArch64::S_G3:
270 Value = Value >> 48;
271 break;
272 default:
273 llvm_unreachable("Variant kind doesn't correspond to fixup");
274 }
275 }
276
277 if (RefKind & AArch64::S_NC) {
278 Value &= 0xFFFF;
279 } else if (AArch64::getSymbolLoc(RefKind) == AArch64::S_SABS) {
280 if (SignedValue > 0xFFFF || SignedValue < -0xFFFF)
281 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
282
283 // Invert the negative immediate because it will feed into a MOVN.
284 if (SignedValue < 0)
285 SignedValue = ~SignedValue;
286 Value = static_cast<uint64_t>(SignedValue);
287 } else if (Value > 0xFFFF) {
288 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
289 }
290 return Value;
291 }
293 // Signed 11-bit(9bits + 2 shifts) label
294 if (!isInt<11>(SignedValue))
295 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
296 // Low two bits are not encoded (4-byte alignment assumed).
297 if (Value & 0b11)
298 Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
299 return (Value >> 2) & 0x1ff;
301 // Signed 16-bit immediate
302 if (!isInt<16>(SignedValue))
303 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
304 // Low two bits are not encoded (4-byte alignment assumed).
305 if (Value & 0x3)
306 Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
307 return (Value >> 2) & 0x3fff;
309 // Unsigned PC-relative offset, so invert the negative immediate.
310 SignedValue = -SignedValue;
311 Value = static_cast<uint64_t>(SignedValue);
312 // Check valid 18-bit unsigned range.
313 if (SignedValue < 0 || SignedValue > ((1 << 18) - 1))
314 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
315 // Low two bits are not encoded (4-byte alignment assumed).
316 if (Value & 0b11)
317 Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
318 return (Value >> 2) & 0xffff;
321 if (TheTriple.isOSBinFormatCOFF() && !IsResolved && SignedValue != 0) {
322 // MSVC link.exe and lld do not support this relocation type
323 // with a non-zero offset
324 Ctx.reportError(Fixup.getLoc(),
325 "cannot perform a PC-relative fixup with a non-zero "
326 "symbol offset");
327 }
328 // Signed 28-bit immediate
329 if (!isInt<28>(SignedValue))
330 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
331 // Low two bits are not encoded (4-byte alignment assumed).
332 if (Value & 0x3)
333 Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
334 return (Value >> 2) & 0x3ffffff;
335 case FK_Data_1:
336 case FK_Data_2:
337 case FK_Data_4:
338 case FK_Data_8:
339 case FK_SecRel_2:
340 case FK_SecRel_4:
341 return Value;
342 }
343}
344
345std::optional<MCFixupKind>
346AArch64AsmBackend::getFixupKind(StringRef Name) const {
347 if (!TheTriple.isOSBinFormatELF())
348 return std::nullopt;
349
350 unsigned Type = llvm::StringSwitch<unsigned>(Name)
351#define ELF_RELOC(X, Y) .Case(#X, Y)
352#include "llvm/BinaryFormat/ELFRelocs/AArch64.def"
353#undef ELF_RELOC
354 .Case("BFD_RELOC_NONE", ELF::R_AARCH64_NONE)
355 .Case("BFD_RELOC_16", ELF::R_AARCH64_ABS16)
356 .Case("BFD_RELOC_32", ELF::R_AARCH64_ABS32)
357 .Case("BFD_RELOC_64", ELF::R_AARCH64_ABS64)
358 .Default(-1u);
359 if (Type == -1u)
360 return std::nullopt;
361 return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type);
362}
363
364/// getFixupKindContainereSizeInBytes - The number of bytes of the
365/// container involved in big endian or 0 if the item is little endian
366unsigned AArch64AsmBackend::getFixupKindContainereSizeInBytes(unsigned Kind) const {
367 if (Endian == llvm::endianness::little)
368 return 0;
369
370 switch (Kind) {
371 default:
372 llvm_unreachable("Unknown fixup kind!");
373
374 case FK_Data_1:
375 return 1;
376 case FK_Data_2:
377 return 2;
378 case FK_Data_4:
379 return 4;
380 case FK_Data_8:
381 return 8;
382
399 // Instructions are always little endian
400 return 0;
401 }
402}
403
405 // The ADRP instruction adds some multiple of 0x1000 to the current PC &
406 // ~0xfff. This means that the required offset to reach a symbol can vary by
407 // up to one step depending on where the ADRP is in memory. For example:
408 //
409 // ADRP x0, there
410 // there:
411 //
412 // If the ADRP occurs at address 0xffc then "there" will be at 0x1000 and
413 // we'll need that as an offset. At any other address "there" will be in the
414 // same page as the ADRP and the instruction should encode 0x0. Assuming the
415 // section isn't 0x1000-aligned, we therefore need to delegate this decision
416 // to the linker -- a relocation!
418}
419
420void AArch64AsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
421 const MCValue &Target, uint8_t *Data,
422 uint64_t Value, bool IsResolved) {
424 IsResolved = false;
425 maybeAddReloc(F, Fixup, Target, Value, IsResolved);
426 MCFixupKind Kind = Fixup.getKind();
427 if (mc::isRelocation(Kind))
428 return;
429
430 if (Fixup.getKind() == FK_Data_8 && TheTriple.isOSBinFormatELF()) {
431 auto RefKind = static_cast<AArch64::Specifier>(Target.getSpecifier());
433 if (SymLoc == AArch64::S_AUTH || SymLoc == AArch64::S_AUTHADDR) {
434 const auto *Expr = dyn_cast<AArch64AuthMCExpr>(Fixup.getValue());
435 if (!Expr) {
436 getContext().reportError(Fixup.getValue()->getLoc(),
437 "expected relocatable expression");
438 return;
439 }
440 assert(Value == 0);
441 Value = (uint64_t(Expr->getDiscriminator()) << 32) |
442 (uint64_t(Expr->getKey()) << 60) |
443 (uint64_t(Expr->hasAddressDiversity()) << 63);
444 }
445 }
446
447 if (!Value)
448 return; // Doesn't change encoding.
449 unsigned NumBytes = getFixupKindNumBytes(Kind);
450 MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
451 MCContext &Ctx = getContext();
452 int64_t SignedValue = static_cast<int64_t>(Value);
453 // Apply any target-specific value adjustments.
454 Value = adjustFixupValue(Fixup, Target, Value, Ctx, TheTriple, IsResolved);
455
456 // Shift the value into position.
457 Value <<= Info.TargetOffset;
458
459 assert(Fixup.getOffset() + NumBytes <= F.getSize() &&
460 "Invalid fixup offset!");
461
462 // Used to point to big endian bytes.
463 unsigned FulleSizeInBytes = getFixupKindContainereSizeInBytes(Fixup.getKind());
464
465 // For each byte of the fragment that the fixup touches, mask in the
466 // bits from the fixup value.
467 if (FulleSizeInBytes == 0) {
468 // Handle as little-endian
469 for (unsigned i = 0; i != NumBytes; ++i) {
470 Data[i] |= uint8_t((Value >> (i * 8)) & 0xff);
471 }
472 } else {
473 // Handle as big-endian
474 assert(Fixup.getOffset() + FulleSizeInBytes <= F.getSize() &&
475 "Invalid fixup size!");
476 assert(NumBytes <= FulleSizeInBytes && "Invalid fixup size!");
477 for (unsigned i = 0; i != NumBytes; ++i) {
478 unsigned Idx = FulleSizeInBytes - 1 - i;
479 Data[Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
480 }
481 }
482
483 // FIXME: getFixupKindInfo() and getFixupKindNumBytes() could be fixed to
484 // handle this more cleanly. This may affect the output of -show-mc-encoding.
485 AArch64::Specifier RefKind =
486 static_cast<AArch64::Specifier>(Target.getSpecifier());
487 if (AArch64::getSymbolLoc(RefKind) == AArch64::S_SABS ||
488 (!RefKind && Fixup.getKind() == AArch64::fixup_aarch64_movw)) {
489 // If the immediate is negative, generate MOVN else MOVZ.
490 // (Bit 30 = 0) ==> MOVN, (Bit 30 = 1) ==> MOVZ.
491 if (SignedValue < 0)
492 Data[3] &= ~(1 << 6);
493 else
494 Data[3] |= (1 << 6);
495 }
496}
497
498bool AArch64AsmBackend::writeNopData(raw_ostream &OS, uint64_t Count,
499 const MCSubtargetInfo *STI) const {
500 // If the count is not 4-byte aligned, we must be writing data into the text
501 // section (otherwise we have unaligned instructions, and thus have far
502 // bigger problems), so just write zeros instead.
503 OS.write_zeros(Count % 4);
504
505 // We are properly aligned, so write NOPs as requested.
506 Count /= 4;
507 for (uint64_t i = 0; i != Count; ++i)
508 OS.write("\x1f\x20\x03\xd5", 4);
509 return true;
510}
511
512namespace {
513
514namespace CU {
515
516/// Compact unwind encoding values.
518 /// A "frameless" leaf function, where no non-volatile registers are
519 /// saved. The return remains in LR throughout the function.
520 UNWIND_ARM64_MODE_FRAMELESS = 0x02000000,
521
522 /// No compact unwind encoding available. Instead the low 23-bits of
523 /// the compact unwind encoding is the offset of the DWARF FDE in the
524 /// __eh_frame section. This mode is never used in object files. It is only
525 /// generated by the linker in final linked images, which have only DWARF info
526 /// for a function.
527 UNWIND_ARM64_MODE_DWARF = 0x03000000,
528
529 /// This is a standard arm64 prologue where FP/LR are immediately
530 /// pushed on the stack, then SP is copied to FP. If there are any
531 /// non-volatile register saved, they are copied into the stack fame in pairs
532 /// in a contiguous ranger right below the saved FP/LR pair. Any subset of the
533 /// five X pairs and four D pairs can be saved, but the memory layout must be
534 /// in register number order.
535 UNWIND_ARM64_MODE_FRAME = 0x04000000,
536
537 /// Frame register pair encodings.
538 UNWIND_ARM64_FRAME_X19_X20_PAIR = 0x00000001,
539 UNWIND_ARM64_FRAME_X21_X22_PAIR = 0x00000002,
540 UNWIND_ARM64_FRAME_X23_X24_PAIR = 0x00000004,
541 UNWIND_ARM64_FRAME_X25_X26_PAIR = 0x00000008,
542 UNWIND_ARM64_FRAME_X27_X28_PAIR = 0x00000010,
543 UNWIND_ARM64_FRAME_D8_D9_PAIR = 0x00000100,
544 UNWIND_ARM64_FRAME_D10_D11_PAIR = 0x00000200,
545 UNWIND_ARM64_FRAME_D12_D13_PAIR = 0x00000400,
546 UNWIND_ARM64_FRAME_D14_D15_PAIR = 0x00000800
547};
548
549} // end CU namespace
550
551// FIXME: This should be in a separate file.
552class DarwinAArch64AsmBackend : public AArch64AsmBackend {
553 const MCRegisterInfo &MRI;
554
555 /// Encode compact unwind stack adjustment for frameless functions.
556 /// See UNWIND_ARM64_FRAMELESS_STACK_SIZE_MASK in compact_unwind_encoding.h.
557 /// The stack size always needs to be 16 byte aligned.
558 uint32_t encodeStackAdjustment(uint32_t StackSize) const {
559 return (StackSize / 16) << 12;
560 }
561
562public:
563 DarwinAArch64AsmBackend(const Target &T, const Triple &TT,
564 const MCRegisterInfo &MRI)
565 : AArch64AsmBackend(T, TT, /*IsLittleEndian*/ true), MRI(MRI) {}
566
567 std::unique_ptr<MCObjectTargetWriter>
568 createObjectTargetWriter() const override {
569 uint32_t CPUType = cantFail(MachO::getCPUType(TheTriple));
570 uint32_t CPUSubType = cantFail(MachO::getCPUSubType(TheTriple));
571 return createAArch64MachObjectWriter(CPUType, CPUSubType,
572 TheTriple.isArch32Bit());
573 }
574
575 /// Generate the compact unwind encoding from the CFI directives.
576 uint64_t generateCompactUnwindEncoding(const MCDwarfFrameInfo *FI,
577 const MCContext *Ctxt) const override {
578 // MTE-tagged frames must use DWARF unwinding because compact unwind
579 // doesn't handle MTE tags
580 if (FI->IsMTETaggedFrame)
581 return CU::UNWIND_ARM64_MODE_DWARF;
582
584 if (Instrs.empty())
585 return CU::UNWIND_ARM64_MODE_FRAMELESS;
586 if (!isDarwinCanonicalPersonality(FI->Personality) &&
588 return CU::UNWIND_ARM64_MODE_DWARF;
589
590 bool HasFP = false;
591 uint64_t StackSize = 0;
592
593 uint64_t CompactUnwindEncoding = 0;
594 int64_t CurOffset = 0;
595 for (size_t i = 0, e = Instrs.size(); i != e; ++i) {
596 const MCCFIInstruction &Inst = Instrs[i];
597
598 switch (Inst.getOperation()) {
599 default:
600 // Cannot handle this directive: bail out.
601 return CU::UNWIND_ARM64_MODE_DWARF;
603 // Defines a frame pointer.
604 MCRegister XReg =
605 getXRegFromWReg(*MRI.getLLVMRegNum(Inst.getRegister(), true));
606
607 // Other CFA registers than FP are not supported by compact unwind.
608 // Fallback on DWARF.
609 // FIXME: When opt-remarks are supported in MC, add a remark to notify
610 // the user.
611 if (XReg != AArch64::FP)
612 return CU::UNWIND_ARM64_MODE_DWARF;
613
614 if (i + 2 >= e)
615 return CU::UNWIND_ARM64_MODE_DWARF;
616
617 const MCCFIInstruction &LRPush = Instrs[++i];
619 return CU::UNWIND_ARM64_MODE_DWARF;
620 const MCCFIInstruction &FPPush = Instrs[++i];
622 return CU::UNWIND_ARM64_MODE_DWARF;
623
624 if (FPPush.getOffset() + 8 != LRPush.getOffset())
625 return CU::UNWIND_ARM64_MODE_DWARF;
626 CurOffset = FPPush.getOffset();
627
628 MCRegister LRReg = *MRI.getLLVMRegNum(LRPush.getRegister(), true);
629 MCRegister FPReg = *MRI.getLLVMRegNum(FPPush.getRegister(), true);
630
631 LRReg = getXRegFromWReg(LRReg);
633
634 if (LRReg != AArch64::LR || FPReg != AArch64::FP)
635 return CU::UNWIND_ARM64_MODE_DWARF;
636
637 // Indicate that the function has a frame.
638 CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAME;
639 HasFP = true;
640 break;
641 }
643 if (StackSize != 0)
644 return CU::UNWIND_ARM64_MODE_DWARF;
645 StackSize = std::abs(Inst.getOffset());
646 break;
647 }
649 // Registers are saved in pairs. We expect there to be two consecutive
650 // `.cfi_offset' instructions with the appropriate registers specified.
651 MCRegister Reg1 = *MRI.getLLVMRegNum(Inst.getRegister(), true);
652 if (i + 1 == e)
653 return CU::UNWIND_ARM64_MODE_DWARF;
654
655 if (CurOffset != 0 && Inst.getOffset() != CurOffset - 8)
656 return CU::UNWIND_ARM64_MODE_DWARF;
657 CurOffset = Inst.getOffset();
658
659 const MCCFIInstruction &Inst2 = Instrs[++i];
661 return CU::UNWIND_ARM64_MODE_DWARF;
662 MCRegister Reg2 = *MRI.getLLVMRegNum(Inst2.getRegister(), true);
663
664 if (Inst2.getOffset() != CurOffset - 8)
665 return CU::UNWIND_ARM64_MODE_DWARF;
666 CurOffset = Inst2.getOffset();
667
668 // N.B. The encodings must be in register number order, and the X
669 // registers before the D registers.
670
671 // X19/X20 pair = 0x00000001,
672 // X21/X22 pair = 0x00000002,
673 // X23/X24 pair = 0x00000004,
674 // X25/X26 pair = 0x00000008,
675 // X27/X28 pair = 0x00000010
676 Reg1 = getXRegFromWReg(Reg1);
677 Reg2 = getXRegFromWReg(Reg2);
678
679 if (Reg1 == AArch64::X19 && Reg2 == AArch64::X20 &&
680 (CompactUnwindEncoding & 0xF1E) == 0)
681 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X19_X20_PAIR;
682 else if (Reg1 == AArch64::X21 && Reg2 == AArch64::X22 &&
683 (CompactUnwindEncoding & 0xF1C) == 0)
684 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X21_X22_PAIR;
685 else if (Reg1 == AArch64::X23 && Reg2 == AArch64::X24 &&
686 (CompactUnwindEncoding & 0xF18) == 0)
687 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X23_X24_PAIR;
688 else if (Reg1 == AArch64::X25 && Reg2 == AArch64::X26 &&
689 (CompactUnwindEncoding & 0xF10) == 0)
690 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X25_X26_PAIR;
691 else if (Reg1 == AArch64::X27 && Reg2 == AArch64::X28 &&
692 (CompactUnwindEncoding & 0xF00) == 0)
693 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X27_X28_PAIR;
694 else {
695 Reg1 = getDRegFromBReg(Reg1);
696 Reg2 = getDRegFromBReg(Reg2);
697
698 // D8/D9 pair = 0x00000100,
699 // D10/D11 pair = 0x00000200,
700 // D12/D13 pair = 0x00000400,
701 // D14/D15 pair = 0x00000800
702 if (Reg1 == AArch64::D8 && Reg2 == AArch64::D9 &&
703 (CompactUnwindEncoding & 0xE00) == 0)
704 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D8_D9_PAIR;
705 else if (Reg1 == AArch64::D10 && Reg2 == AArch64::D11 &&
706 (CompactUnwindEncoding & 0xC00) == 0)
707 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D10_D11_PAIR;
708 else if (Reg1 == AArch64::D12 && Reg2 == AArch64::D13 &&
709 (CompactUnwindEncoding & 0x800) == 0)
710 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D12_D13_PAIR;
711 else if (Reg1 == AArch64::D14 && Reg2 == AArch64::D15)
712 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D14_D15_PAIR;
713 else
714 // A pair was pushed which we cannot handle.
715 return CU::UNWIND_ARM64_MODE_DWARF;
716 }
717
718 break;
719 }
720 }
721 }
722
723 if (!HasFP) {
724 // With compact unwind info we can only represent stack adjustments of up
725 // to 65520 bytes.
726 if (StackSize > 65520)
727 return CU::UNWIND_ARM64_MODE_DWARF;
728
729 CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAMELESS;
730 CompactUnwindEncoding |= encodeStackAdjustment(StackSize);
731 }
732
733 return CompactUnwindEncoding;
734 }
735};
736
737} // end anonymous namespace
738
739namespace {
740
741class ELFAArch64AsmBackend : public AArch64AsmBackend {
742public:
743 uint8_t OSABI;
744 bool IsILP32;
745
746 ELFAArch64AsmBackend(const Target &T, const Triple &TT, uint8_t OSABI,
747 bool IsLittleEndian, bool IsILP32)
748 : AArch64AsmBackend(T, TT, IsLittleEndian), OSABI(OSABI),
749 IsILP32(IsILP32) {}
750
751 std::unique_ptr<MCObjectTargetWriter>
752 createObjectTargetWriter() const override {
753 return createAArch64ELFObjectWriter(OSABI, IsILP32);
754 }
755};
756
757}
758
759namespace {
760class COFFAArch64AsmBackend : public AArch64AsmBackend {
761public:
762 COFFAArch64AsmBackend(const Target &T, const Triple &TheTriple)
763 : AArch64AsmBackend(T, TheTriple, /*IsLittleEndian*/ true) {}
764
765 std::unique_ptr<MCObjectTargetWriter>
766 createObjectTargetWriter() const override {
767 return createAArch64WinCOFFObjectWriter(TheTriple);
768 }
769};
770}
771
773 const MCSubtargetInfo &STI,
774 const MCRegisterInfo &MRI,
775 const MCTargetOptions &Options) {
776 const Triple &TheTriple = STI.getTargetTriple();
777 if (TheTriple.isOSBinFormatMachO()) {
778 return new DarwinAArch64AsmBackend(T, TheTriple, MRI);
779 }
780
781 if (TheTriple.isOSBinFormatCOFF())
782 return new COFFAArch64AsmBackend(T, TheTriple);
783
784 assert(TheTriple.isOSBinFormatELF() && "Invalid target");
785
787 bool IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
788 return new ELFAArch64AsmBackend(T, TheTriple, OSABI, /*IsLittleEndian=*/true,
789 IsILP32);
790}
791
793 const MCSubtargetInfo &STI,
794 const MCRegisterInfo &MRI,
795 const MCTargetOptions &Options) {
796 const Triple &TheTriple = STI.getTargetTriple();
797 assert(TheTriple.isOSBinFormatELF() &&
798 "Big endian is only supported for ELF targets!");
800 bool IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
801 return new ELFAArch64AsmBackend(T, TheTriple, OSABI, /*IsLittleEndian=*/false,
802 IsILP32);
803}
static unsigned AdrImmBits(unsigned Value)
static unsigned getFixupKindNumBytes(unsigned Kind)
The number of bytes the fixup may change.
static uint64_t adjustFixupValue(const MCFixup &Fixup, const MCValue &Target, uint64_t Value, MCContext &Ctx, const Triple &TheTriple, bool IsResolved)
static bool shouldForceRelocation(const MCFixup &Fixup)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
basic Basic Alias true
static LVOptions Options
Definition LVOptions.cpp:25
#define F(x, y, z)
Definition MD5.cpp:54
#define T
PowerPC TLS Dynamic Call Fixup
static constexpr MCPhysReg FPReg
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
Generic interface to target specific assembler backends.
virtual MCFixupKindInfo getFixupKindInfo(MCFixupKind Kind) const
Get information on a fixup kind.
unsigned getRegister() const
Definition MCDwarf.h:717
OpType getOperation() const
Definition MCDwarf.h:714
int64_t getOffset() const
Definition MCDwarf.h:736
Context object for machine code objects.
Definition MCContext.h:83
LLVM_ABI bool emitCompactUnwindNonCanonical() const
Encode information on a single operation to perform on a byte sequence (e.g., an encoded instruction)...
Definition MCFixup.h:61
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
std::optional< MCRegister > getLLVMRegNum(uint64_t RegNum, bool isEH) const
Map a dwarf register back to a target register.
Generic base class for all target subtargets.
const Triple & getTargetTriple() const
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
bool isOSBinFormatMachO() const
Tests whether the environment is MachO.
Definition Triple.h:816
OSType getOS() const
Get the parsed operating system type of this triple.
Definition Triple.h:429
bool isOSBinFormatCOFF() const
Tests whether the OS uses the COFF binary format.
Definition Triple.h:808
EnvironmentType getEnvironment() const
Get the parsed environment type of this triple.
Definition Triple.h:437
LLVM_ABI bool isArch32Bit() const
Test whether the architecture is 32-bit.
Definition Triple.cpp:1831
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
Definition Triple.h:803
LLVM Value Representation.
Definition Value.h:75
raw_ostream & write_zeros(unsigned NumZeros)
write_zeros - Insert 'NumZeros' nulls.
raw_ostream & write(unsigned char C)
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
CompactUnwindEncodings
Compact unwind encoding values.
Specifier getSymbolLoc(Specifier S)
Specifier getAddressFrag(Specifier S)
LLVM_ABI Expected< uint32_t > getCPUSubType(const Triple &T)
Definition MachO.cpp:101
LLVM_ABI Expected< uint32_t > getCPUType(const Triple &T)
Definition MachO.cpp:81
VE::Fixups getFixupKind(uint8_t S)
bool isRelocation(MCFixupKind FixupKind)
Definition MCFixup.h:130
Context & getContext() const
Definition BasicBlock.h:99
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
std::unique_ptr< MCObjectTargetWriter > createAArch64WinCOFFObjectWriter(const Triple &TheTriple)
uint16_t MCFixupKind
Extensible enumeration to represent the type of a fixup.
Definition MCFixup.h:22
MCAsmBackend * createAArch64leAsmBackend(const Target &T, const MCSubtargetInfo &STI, const MCRegisterInfo &MRI, const MCTargetOptions &Options)
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
@ FirstTargetFixupKind
Definition MCFixup.h:44
@ FK_SecRel_2
A two-byte section relative fixup.
Definition MCFixup.h:40
@ FirstLiteralRelocationKind
Definition MCFixup.h:29
@ FK_Data_8
A eight-byte fixup.
Definition MCFixup.h:37
@ FK_Data_1
A one-byte fixup.
Definition MCFixup.h:34
@ FK_Data_4
A four-byte fixup.
Definition MCFixup.h:36
@ FK_SecRel_4
A four-byte section relative fixup.
Definition MCFixup.h:41
@ FK_Data_2
A two-byte fixup.
Definition MCFixup.h:35
void cantFail(Error Err, const char *Msg=nullptr)
Report a fatal error if Err is a failure value.
Definition Error.h:769
FunctionAddr VTableAddr uintptr_t uintptr_t Data
Definition InstrProf.h:189
static MCRegister getXRegFromWReg(MCRegister Reg)
ArrayRef(const T &OneElt) -> ArrayRef< T >
std::unique_ptr< MCObjectTargetWriter > createAArch64MachObjectWriter(uint32_t CPUType, uint32_t CPUSubtype, bool IsILP32)
MCAsmBackend * createAArch64beAsmBackend(const Target &T, const MCSubtargetInfo &STI, const MCRegisterInfo &MRI, const MCTargetOptions &Options)
static MCRegister getDRegFromBReg(MCRegister Reg)
endianness
Definition bit.h:71
std::unique_ptr< MCObjectTargetWriter > createAArch64ELFObjectWriter(uint8_t OSABI, bool IsILP32)
const MCSymbol * Personality
Definition MCDwarf.h:767
std::vector< MCCFIInstruction > Instructions
Definition MCDwarf.h:769