LLVM 23.0.0git
AArch64AddressingModes.h
Go to the documentation of this file.
1//===- AArch64AddressingModes.h - AArch64 Addressing Modes ------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the AArch64 addressing mode implementation stuff.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64ADDRESSINGMODES_H
14#define LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64ADDRESSINGMODES_H
15
16#include "llvm/ADT/APFloat.h"
17#include "llvm/ADT/APInt.h"
18#include "llvm/ADT/bit.h"
21#include <cassert>
22
23namespace llvm {
24
25/// AArch64_AM - AArch64 Addressing Mode Stuff
26namespace AArch64_AM {
27
28//===----------------------------------------------------------------------===//
29// Shifts
30//
31
50
51/// isSignExtendShiftType - Returns true if \p Type is sign extending.
53 switch (Type) {
58 return true;
59 default:
60 return false;
61 }
62}
63
64/// getShiftName - Get the string encoding for the shift type.
65static inline const char *getShiftExtendName(AArch64_AM::ShiftExtendType ST) {
66 switch (ST) {
67 default: llvm_unreachable("unhandled shift type!");
68 case AArch64_AM::LSL: return "lsl";
69 case AArch64_AM::LSR: return "lsr";
70 case AArch64_AM::ASR: return "asr";
71 case AArch64_AM::ROR: return "ror";
72 case AArch64_AM::MSL: return "msl";
73 case AArch64_AM::UXTB: return "uxtb";
74 case AArch64_AM::UXTH: return "uxth";
75 case AArch64_AM::UXTW: return "uxtw";
76 case AArch64_AM::UXTX: return "uxtx";
77 case AArch64_AM::SXTB: return "sxtb";
78 case AArch64_AM::SXTH: return "sxth";
79 case AArch64_AM::SXTW: return "sxtw";
80 case AArch64_AM::SXTX: return "sxtx";
81 }
82 return nullptr;
83}
84
85/// getShiftType - Extract the shift type.
86static inline AArch64_AM::ShiftExtendType getShiftType(unsigned Imm) {
87 switch ((Imm >> 6) & 0x7) {
88 default: return AArch64_AM::InvalidShiftExtend;
89 case 0: return AArch64_AM::LSL;
90 case 1: return AArch64_AM::LSR;
91 case 2: return AArch64_AM::ASR;
92 case 3: return AArch64_AM::ROR;
93 case 4: return AArch64_AM::MSL;
94 }
95}
96
97/// getShiftValue - Extract the shift value.
98static inline unsigned getShiftValue(unsigned Imm) {
99 return Imm & 0x3f;
100}
101
102/// getShifterImm - Encode the shift type and amount:
103/// imm: 6-bit shift amount
104/// shifter: 000 ==> lsl
105/// 001 ==> lsr
106/// 010 ==> asr
107/// 011 ==> ror
108/// 100 ==> msl
109/// {8-6} = shifter
110/// {5-0} = imm
112 unsigned Imm) {
113 assert((Imm & 0x3f) == Imm && "Illegal shifted immediate value!");
114 unsigned STEnc = 0;
115 switch (ST) {
116 default: llvm_unreachable("Invalid shift requested");
117 case AArch64_AM::LSL: STEnc = 0; break;
118 case AArch64_AM::LSR: STEnc = 1; break;
119 case AArch64_AM::ASR: STEnc = 2; break;
120 case AArch64_AM::ROR: STEnc = 3; break;
121 case AArch64_AM::MSL: STEnc = 4; break;
122 }
123 return (STEnc << 6) | (Imm & 0x3f);
124}
125
126//===----------------------------------------------------------------------===//
127// Extends
128//
129
130/// getArithShiftValue - get the arithmetic shift value.
131static inline unsigned getArithShiftValue(unsigned Imm) {
132 return Imm & 0x7;
133}
134
135/// getExtendType - Extract the extend type for operands of arithmetic ops.
136static inline AArch64_AM::ShiftExtendType getExtendType(unsigned Imm) {
137 assert((Imm & 0x7) == Imm && "invalid immediate!");
138 switch (Imm) {
139 default: llvm_unreachable("Compiler bug!");
140 case 0: return AArch64_AM::UXTB;
141 case 1: return AArch64_AM::UXTH;
142 case 2: return AArch64_AM::UXTW;
143 case 3: return AArch64_AM::UXTX;
144 case 4: return AArch64_AM::SXTB;
145 case 5: return AArch64_AM::SXTH;
146 case 6: return AArch64_AM::SXTW;
147 case 7: return AArch64_AM::SXTX;
148 }
149}
150
152 return getExtendType((Imm >> 3) & 0x7);
153}
154
155/// Mapping from extend bits to required operation:
156/// shifter: 000 ==> uxtb
157/// 001 ==> uxth
158/// 010 ==> uxtw
159/// 011 ==> uxtx
160/// 100 ==> sxtb
161/// 101 ==> sxth
162/// 110 ==> sxtw
163/// 111 ==> sxtx
165 switch (ET) {
166 default: llvm_unreachable("Invalid extend type requested");
167 case AArch64_AM::UXTB: return 0; break;
168 case AArch64_AM::UXTH: return 1; break;
169 case AArch64_AM::UXTW: return 2; break;
170 case AArch64_AM::UXTX: return 3; break;
171 case AArch64_AM::SXTB: return 4; break;
172 case AArch64_AM::SXTH: return 5; break;
173 case AArch64_AM::SXTW: return 6; break;
174 case AArch64_AM::SXTX: return 7; break;
175 }
176}
177
178/// getArithExtendImm - Encode the extend type and shift amount for an
179/// arithmetic instruction:
180/// imm: 3-bit extend amount
181/// {5-3} = shifter
182/// {2-0} = imm3
184 unsigned Imm) {
185 assert((Imm & 0x7) == Imm && "Illegal shifted immediate value!");
186 return (getExtendEncoding(ET) << 3) | (Imm & 0x7);
187}
188
189/// getMemDoShift - Extract the "do shift" flag value for load/store
190/// instructions.
191static inline bool getMemDoShift(unsigned Imm) {
192 return (Imm & 0x1) != 0;
193}
194
195/// getExtendType - Extract the extend type for the offset operand of
196/// loads/stores.
198 return getExtendType((Imm >> 1) & 0x7);
199}
200
201/// getExtendImm - Encode the extend type and amount for a load/store inst:
202/// doshift: should the offset be scaled by the access size
203/// shifter: 000 ==> uxtb
204/// 001 ==> uxth
205/// 010 ==> uxtw
206/// 011 ==> uxtx
207/// 100 ==> sxtb
208/// 101 ==> sxth
209/// 110 ==> sxtw
210/// 111 ==> sxtx
211/// {3-1} = shifter
212/// {0} = doshift
214 bool DoShift) {
215 return (getExtendEncoding(ET) << 1) | unsigned(DoShift);
216}
217
218static inline uint64_t ror(uint64_t elt, unsigned size) {
219 return ((elt & 1) << (size-1)) | (elt >> 1);
220}
221
222/// processLogicalImmediate - Determine if an immediate value can be encoded
223/// as the immediate operand of a logical instruction for the given register
224/// size. If so, return true with "encoding" set to the encoded value in
225/// the form N:immr:imms.
226static inline bool processLogicalImmediate(uint64_t Imm, unsigned RegSize,
227 uint64_t &Encoding) {
228 if (Imm == 0ULL || Imm == ~0ULL ||
229 (RegSize != 64 &&
230 (Imm >> RegSize != 0 || Imm == (~0ULL >> (64 - RegSize)))))
231 return false;
232
233 // First, determine the element size.
234 unsigned Size = RegSize;
235
236 do {
237 Size /= 2;
238 uint64_t Mask = (1ULL << Size) - 1;
239
240 if ((Imm & Mask) != ((Imm >> Size) & Mask)) {
241 Size *= 2;
242 break;
243 }
244 } while (Size > 2);
245
246 // Second, determine the rotation to make the element be: 0^m 1^n.
247 uint32_t CTO, I;
248 uint64_t Mask = ((uint64_t)-1LL) >> (64 - Size);
249 Imm &= Mask;
250
251 if (isShiftedMask_64(Imm)) {
252 I = llvm::countr_zero(Imm);
253 assert(I < 64 && "undefined behavior");
254 CTO = llvm::countr_one(Imm >> I);
255 } else {
256 Imm |= ~Mask;
257 if (!isShiftedMask_64(~Imm))
258 return false;
259
260 unsigned CLO = llvm::countl_one(Imm);
261 I = 64 - CLO;
262 CTO = CLO + llvm::countr_one(Imm) - (64 - Size);
263 }
264
265 // Encode in Immr the number of RORs it would take to get *from* 0^m 1^n
266 // to our target value, where I is the number of RORs to go the opposite
267 // direction.
268 assert(Size > I && "I should be smaller than element size");
269 unsigned Immr = (Size - I) & (Size - 1);
270
271 // If size has a 1 in the n'th bit, create a value that has zeroes in
272 // bits [0, n] and ones above that.
273 uint64_t NImms = ~(Size-1) << 1;
274
275 // Or the CTO value into the low bits, which must be below the Nth bit
276 // bit mentioned above.
277 NImms |= (CTO-1);
278
279 // Extract the seventh bit and toggle it to create the N field.
280 unsigned N = ((NImms >> 6) & 1) ^ 1;
281
282 Encoding = (N << 12) | (Immr << 6) | (NImms & 0x3f);
283 return true;
284}
285
286/// isLogicalImmediate - Return true if the immediate is valid for a logical
287/// immediate instruction of the given register size. Return false otherwise.
288static inline bool isLogicalImmediate(uint64_t imm, unsigned regSize) {
289 uint64_t encoding;
290 return processLogicalImmediate(imm, regSize, encoding);
291}
292
293/// encodeLogicalImmediate - Return the encoded immediate value for a logical
294/// immediate instruction of the given register size.
295static inline uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize) {
296 uint64_t encoding = 0;
297 bool res = processLogicalImmediate(imm, regSize, encoding);
298 assert(res && "invalid logical immediate");
299 (void)res;
300 return encoding;
301}
302
303/// decodeLogicalImmediate - Decode a logical immediate value in the form
304/// "N:immr:imms" (where the immr and imms fields are each 6 bits) into the
305/// integer value it represents with regSize bits.
306static inline uint64_t decodeLogicalImmediate(uint64_t val, unsigned regSize) {
307 // Extract the N, imms, and immr fields.
308 unsigned N = (val >> 12) & 1;
309 unsigned immr = (val >> 6) & 0x3f;
310 unsigned imms = val & 0x3f;
311
312 assert((regSize == 64 || N == 0) && "undefined logical immediate encoding");
313 int len = 31 - llvm::countl_zero((N << 6) | (~imms & 0x3f));
314 assert(len >= 0 && "undefined logical immediate encoding");
315 unsigned size = (1 << len);
316 unsigned R = immr & (size - 1);
317 unsigned S = imms & (size - 1);
318 assert(S != size - 1 && "undefined logical immediate encoding");
319 uint64_t pattern = (1ULL << (S + 1)) - 1;
320 for (unsigned i = 0; i < R; ++i)
321 pattern = ror(pattern, size);
322
323 // Replicate the pattern to fill the regSize.
324 while (size != regSize) {
325 pattern |= (pattern << size);
326 size *= 2;
327 }
328 return pattern;
329}
330
331/// isValidDecodeLogicalImmediate - Check to see if the logical immediate value
332/// in the form "N:immr:imms" (where the immr and imms fields are each 6 bits)
333/// is a valid encoding for an integer value with regSize bits.
335 unsigned regSize) {
336 // Extract the N and imms fields needed for checking.
337 unsigned N = (val >> 12) & 1;
338 unsigned imms = val & 0x3f;
339
340 if (regSize == 32 && N != 0) // undefined logical immediate encoding
341 return false;
342 int len = 31 - llvm::countl_zero((N << 6) | (~imms & 0x3f));
343 if (len < 0) // undefined logical immediate encoding
344 return false;
345 unsigned size = (1 << len);
346 unsigned S = imms & (size - 1);
347 if (S == size - 1) // undefined logical immediate encoding
348 return false;
349
350 return true;
351}
352
353//===----------------------------------------------------------------------===//
354// Floating-point Immediates
355//
356static inline float getFPImmFloat(unsigned Imm) {
357 // We expect an 8-bit binary encoding of a floating-point number here.
358
359 uint8_t Sign = (Imm >> 7) & 0x1;
360 uint8_t Exp = (Imm >> 4) & 0x7;
361 uint8_t Mantissa = Imm & 0xf;
362
363 // 8-bit FP IEEE Float Encoding
364 // abcd efgh aBbbbbbc defgh000 00000000 00000000
365 //
366 // where B = NOT(b);
367
368 uint32_t I = 0;
369 I |= Sign << 31;
370 I |= ((Exp & 0x4) != 0 ? 0 : 1) << 30;
371 I |= ((Exp & 0x4) != 0 ? 0x1f : 0) << 25;
372 I |= (Exp & 0x3) << 23;
373 I |= Mantissa << 19;
374 return bit_cast<float>(I);
375}
376
377/// getFP16Imm - Return an 8-bit floating-point version of the 16-bit
378/// floating-point value. If the value cannot be represented as an 8-bit
379/// floating-point value, then return -1.
380static inline int getFP16Imm(const APInt &Imm) {
381 uint32_t Sign = Imm.lshr(15).getZExtValue() & 1;
382 int32_t Exp = (Imm.lshr(10).getSExtValue() & 0x1f) - 15; // -14 to 15
383 int32_t Mantissa = Imm.getZExtValue() & 0x3ff; // 10 bits
384
385 // We can handle 4 bits of mantissa.
386 // mantissa = (16+UInt(e:f:g:h))/16.
387 if (Mantissa & 0x3f)
388 return -1;
389 Mantissa >>= 6;
390
391 // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3
392 if (Exp < -3 || Exp > 4)
393 return -1;
394 Exp = ((Exp+3) & 0x7) ^ 4;
395
396 return ((int)Sign << 7) | (Exp << 4) | Mantissa;
397}
398
399static inline int getFP16Imm(const APFloat &FPImm) {
400 return getFP16Imm(FPImm.bitcastToAPInt());
401}
402
403/// getFP32Imm - Return an 8-bit floating-point version of the 32-bit
404/// floating-point value. If the value cannot be represented as an 8-bit
405/// floating-point value, then return -1.
406static inline int getFP32Imm(const APInt &Imm) {
407 uint32_t Sign = Imm.lshr(31).getZExtValue() & 1;
408 int32_t Exp = (Imm.lshr(23).getSExtValue() & 0xff) - 127; // -126 to 127
409 int64_t Mantissa = Imm.getZExtValue() & 0x7fffff; // 23 bits
410
411 // We can handle 4 bits of mantissa.
412 // mantissa = (16+UInt(e:f:g:h))/16.
413 if (Mantissa & 0x7ffff)
414 return -1;
415 Mantissa >>= 19;
416 if ((Mantissa & 0xf) != Mantissa)
417 return -1;
418
419 // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3
420 if (Exp < -3 || Exp > 4)
421 return -1;
422 Exp = ((Exp+3) & 0x7) ^ 4;
423
424 return ((int)Sign << 7) | (Exp << 4) | Mantissa;
425}
426
427static inline int getFP32Imm(const APFloat &FPImm) {
428 return getFP32Imm(FPImm.bitcastToAPInt());
429}
430
431/// getFP64Imm - Return an 8-bit floating-point version of the 64-bit
432/// floating-point value. If the value cannot be represented as an 8-bit
433/// floating-point value, then return -1.
434static inline int getFP64Imm(const APInt &Imm) {
435 uint64_t Sign = Imm.lshr(63).getZExtValue() & 1;
436 int64_t Exp = (Imm.lshr(52).getSExtValue() & 0x7ff) - 1023; // -1022 to 1023
437 uint64_t Mantissa = Imm.getZExtValue() & 0xfffffffffffffULL;
438
439 // We can handle 4 bits of mantissa.
440 // mantissa = (16+UInt(e:f:g:h))/16.
441 if (Mantissa & 0xffffffffffffULL)
442 return -1;
443 Mantissa >>= 48;
444 if ((Mantissa & 0xf) != Mantissa)
445 return -1;
446
447 // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3
448 if (Exp < -3 || Exp > 4)
449 return -1;
450 Exp = ((Exp+3) & 0x7) ^ 4;
451
452 return ((int)Sign << 7) | (Exp << 4) | Mantissa;
453}
454
455static inline int getFP64Imm(const APFloat &FPImm) {
456 return getFP64Imm(FPImm.bitcastToAPInt());
457}
458
459//===--------------------------------------------------------------------===//
460// AdvSIMD Modified Immediates
461//===--------------------------------------------------------------------===//
462
463// 0x00 0x00 0x00 abcdefgh 0x00 0x00 0x00 abcdefgh
464static inline bool isAdvSIMDModImmType1(uint64_t Imm) {
465 return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
466 ((Imm & 0xffffff00ffffff00ULL) == 0);
467}
468
470 return (Imm & 0xffULL);
471}
472
474 uint64_t EncVal = Imm;
475 return (EncVal << 32) | EncVal;
476}
477
478// 0x00 0x00 abcdefgh 0x00 0x00 0x00 abcdefgh 0x00
479static inline bool isAdvSIMDModImmType2(uint64_t Imm) {
480 return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
481 ((Imm & 0xffff00ffffff00ffULL) == 0);
482}
483
485 return (Imm & 0xff00ULL) >> 8;
486}
487
489 uint64_t EncVal = Imm;
490 return (EncVal << 40) | (EncVal << 8);
491}
492
493// 0x00 abcdefgh 0x00 0x00 0x00 abcdefgh 0x00 0x00
494static inline bool isAdvSIMDModImmType3(uint64_t Imm) {
495 return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
496 ((Imm & 0xff00ffffff00ffffULL) == 0);
497}
498
500 return (Imm & 0xff0000ULL) >> 16;
501}
502
504 uint64_t EncVal = Imm;
505 return (EncVal << 48) | (EncVal << 16);
506}
507
508// abcdefgh 0x00 0x00 0x00 abcdefgh 0x00 0x00 0x00
509static inline bool isAdvSIMDModImmType4(uint64_t Imm) {
510 return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
511 ((Imm & 0x00ffffff00ffffffULL) == 0);
512}
513
515 return (Imm & 0xff000000ULL) >> 24;
516}
517
519 uint64_t EncVal = Imm;
520 return (EncVal << 56) | (EncVal << 24);
521}
522
523// 0x00 abcdefgh 0x00 abcdefgh 0x00 abcdefgh 0x00 abcdefgh
524static inline bool isAdvSIMDModImmType5(uint64_t Imm) {
525 return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
526 (((Imm & 0x00ff0000ULL) >> 16) == (Imm & 0x000000ffULL)) &&
527 ((Imm & 0xff00ff00ff00ff00ULL) == 0);
528}
529
531 return (Imm & 0xffULL);
532}
533
535 uint64_t EncVal = Imm;
536 return (EncVal << 48) | (EncVal << 32) | (EncVal << 16) | EncVal;
537}
538
539// abcdefgh 0x00 abcdefgh 0x00 abcdefgh 0x00 abcdefgh 0x00
540static inline bool isAdvSIMDModImmType6(uint64_t Imm) {
541 return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
542 (((Imm & 0xff000000ULL) >> 16) == (Imm & 0x0000ff00ULL)) &&
543 ((Imm & 0x00ff00ff00ff00ffULL) == 0);
544}
545
547 return (Imm & 0xff00ULL) >> 8;
548}
549
551 uint64_t EncVal = Imm;
552 return (EncVal << 56) | (EncVal << 40) | (EncVal << 24) | (EncVal << 8);
553}
554
555// 0x00 0x00 abcdefgh 0xFF 0x00 0x00 abcdefgh 0xFF
556static inline bool isAdvSIMDModImmType7(uint64_t Imm) {
557 return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
558 ((Imm & 0xffff00ffffff00ffULL) == 0x000000ff000000ffULL);
559}
560
562 return (Imm & 0xff00ULL) >> 8;
563}
564
566 uint64_t EncVal = Imm;
567 return (EncVal << 40) | (EncVal << 8) | 0x000000ff000000ffULL;
568}
569
570// 0x00 abcdefgh 0xFF 0xFF 0x00 abcdefgh 0xFF 0xFF
571static inline bool isAdvSIMDModImmType8(uint64_t Imm) {
572 return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
573 ((Imm & 0xff00ffffff00ffffULL) == 0x0000ffff0000ffffULL);
574}
575
577 uint64_t EncVal = Imm;
578 return (EncVal << 48) | (EncVal << 16) | 0x0000ffff0000ffffULL;
579}
580
582 return (Imm & 0x00ff0000ULL) >> 16;
583}
584
585// abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh
586static inline bool isAdvSIMDModImmType9(uint64_t Imm) {
587 return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
588 ((Imm >> 48) == (Imm & 0x0000ffffULL)) &&
589 ((Imm >> 56) == (Imm & 0x000000ffULL));
590}
591
593 return (Imm & 0xffULL);
594}
595
597 uint64_t EncVal = Imm;
598 EncVal |= (EncVal << 8);
599 EncVal |= (EncVal << 16);
600 EncVal |= (EncVal << 32);
601 return EncVal;
602}
603
604// aaaaaaaa bbbbbbbb cccccccc dddddddd eeeeeeee ffffffff gggggggg hhhhhhhh
605// cmode: 1110, op: 1
606static inline bool isAdvSIMDModImmType10(uint64_t Imm) {
607#if defined(_MSC_VER) && _MSC_VER == 1937 && !defined(__clang__) && \
608 defined(_M_ARM64)
609 // The MSVC compiler 19.37 for ARM64 has an optimization bug that
610 // causes an incorrect behavior with the original version. Work around
611 // by using a slightly different variation.
612 // https://developercommunity.visualstudio.com/t/C-ARM64-compiler-optimization-bug/10481261
613 constexpr uint64_t Mask = 0xFFULL;
614 uint64_t ByteA = (Imm >> 56) & Mask;
615 uint64_t ByteB = (Imm >> 48) & Mask;
616 uint64_t ByteC = (Imm >> 40) & Mask;
617 uint64_t ByteD = (Imm >> 32) & Mask;
618 uint64_t ByteE = (Imm >> 24) & Mask;
619 uint64_t ByteF = (Imm >> 16) & Mask;
620 uint64_t ByteG = (Imm >> 8) & Mask;
621 uint64_t ByteH = Imm & Mask;
622
623 return (ByteA == 0ULL || ByteA == Mask) && (ByteB == 0ULL || ByteB == Mask) &&
624 (ByteC == 0ULL || ByteC == Mask) && (ByteD == 0ULL || ByteD == Mask) &&
625 (ByteE == 0ULL || ByteE == Mask) && (ByteF == 0ULL || ByteF == Mask) &&
626 (ByteG == 0ULL || ByteG == Mask) && (ByteH == 0ULL || ByteH == Mask);
627#else
628 uint64_t ByteA = Imm & 0xff00000000000000ULL;
629 uint64_t ByteB = Imm & 0x00ff000000000000ULL;
630 uint64_t ByteC = Imm & 0x0000ff0000000000ULL;
631 uint64_t ByteD = Imm & 0x000000ff00000000ULL;
632 uint64_t ByteE = Imm & 0x00000000ff000000ULL;
633 uint64_t ByteF = Imm & 0x0000000000ff0000ULL;
634 uint64_t ByteG = Imm & 0x000000000000ff00ULL;
635 uint64_t ByteH = Imm & 0x00000000000000ffULL;
636
637 return (ByteA == 0ULL || ByteA == 0xff00000000000000ULL) &&
638 (ByteB == 0ULL || ByteB == 0x00ff000000000000ULL) &&
639 (ByteC == 0ULL || ByteC == 0x0000ff0000000000ULL) &&
640 (ByteD == 0ULL || ByteD == 0x000000ff00000000ULL) &&
641 (ByteE == 0ULL || ByteE == 0x00000000ff000000ULL) &&
642 (ByteF == 0ULL || ByteF == 0x0000000000ff0000ULL) &&
643 (ByteG == 0ULL || ByteG == 0x000000000000ff00ULL) &&
644 (ByteH == 0ULL || ByteH == 0x00000000000000ffULL);
645#endif
646}
647
649 uint8_t BitA = (Imm & 0xff00000000000000ULL) != 0;
650 uint8_t BitB = (Imm & 0x00ff000000000000ULL) != 0;
651 uint8_t BitC = (Imm & 0x0000ff0000000000ULL) != 0;
652 uint8_t BitD = (Imm & 0x000000ff00000000ULL) != 0;
653 uint8_t BitE = (Imm & 0x00000000ff000000ULL) != 0;
654 uint8_t BitF = (Imm & 0x0000000000ff0000ULL) != 0;
655 uint8_t BitG = (Imm & 0x000000000000ff00ULL) != 0;
656 uint8_t BitH = (Imm & 0x00000000000000ffULL) != 0;
657
658 uint8_t EncVal = BitA;
659 EncVal <<= 1;
660 EncVal |= BitB;
661 EncVal <<= 1;
662 EncVal |= BitC;
663 EncVal <<= 1;
664 EncVal |= BitD;
665 EncVal <<= 1;
666 EncVal |= BitE;
667 EncVal <<= 1;
668 EncVal |= BitF;
669 EncVal <<= 1;
670 EncVal |= BitG;
671 EncVal <<= 1;
672 EncVal |= BitH;
673 return EncVal;
674}
675
677 uint64_t EncVal = 0;
678 if (Imm & 0x80) EncVal |= 0xff00000000000000ULL;
679 if (Imm & 0x40) EncVal |= 0x00ff000000000000ULL;
680 if (Imm & 0x20) EncVal |= 0x0000ff0000000000ULL;
681 if (Imm & 0x10) EncVal |= 0x000000ff00000000ULL;
682 if (Imm & 0x08) EncVal |= 0x00000000ff000000ULL;
683 if (Imm & 0x04) EncVal |= 0x0000000000ff0000ULL;
684 if (Imm & 0x02) EncVal |= 0x000000000000ff00ULL;
685 if (Imm & 0x01) EncVal |= 0x00000000000000ffULL;
686 return EncVal;
687}
688
689// aBbbbbbc defgh000 0x00 0x00 aBbbbbbc defgh000 0x00 0x00
690static inline bool isAdvSIMDModImmType11(uint64_t Imm) {
691 uint64_t BString = (Imm & 0x7E000000ULL) >> 25;
692 return ((Imm >> 32) == (Imm & 0xffffffffULL)) &&
693 (BString == 0x1f || BString == 0x20) &&
694 ((Imm & 0x0007ffff0007ffffULL) == 0);
695}
696
698 uint8_t BitA = (Imm & 0x80000000ULL) != 0;
699 uint8_t BitB = (Imm & 0x20000000ULL) != 0;
700 uint8_t BitC = (Imm & 0x01000000ULL) != 0;
701 uint8_t BitD = (Imm & 0x00800000ULL) != 0;
702 uint8_t BitE = (Imm & 0x00400000ULL) != 0;
703 uint8_t BitF = (Imm & 0x00200000ULL) != 0;
704 uint8_t BitG = (Imm & 0x00100000ULL) != 0;
705 uint8_t BitH = (Imm & 0x00080000ULL) != 0;
706
707 uint8_t EncVal = BitA;
708 EncVal <<= 1;
709 EncVal |= BitB;
710 EncVal <<= 1;
711 EncVal |= BitC;
712 EncVal <<= 1;
713 EncVal |= BitD;
714 EncVal <<= 1;
715 EncVal |= BitE;
716 EncVal <<= 1;
717 EncVal |= BitF;
718 EncVal <<= 1;
719 EncVal |= BitG;
720 EncVal <<= 1;
721 EncVal |= BitH;
722 return EncVal;
723}
724
726 uint64_t EncVal = 0;
727 if (Imm & 0x80) EncVal |= 0x80000000ULL;
728 if (Imm & 0x40) EncVal |= 0x3e000000ULL;
729 else EncVal |= 0x40000000ULL;
730 if (Imm & 0x20) EncVal |= 0x01000000ULL;
731 if (Imm & 0x10) EncVal |= 0x00800000ULL;
732 if (Imm & 0x08) EncVal |= 0x00400000ULL;
733 if (Imm & 0x04) EncVal |= 0x00200000ULL;
734 if (Imm & 0x02) EncVal |= 0x00100000ULL;
735 if (Imm & 0x01) EncVal |= 0x00080000ULL;
736 return (EncVal << 32) | EncVal;
737}
738
739// aBbbbbbb bbcdefgh 0x00 0x00 0x00 0x00 0x00 0x00
740static inline bool isAdvSIMDModImmType12(uint64_t Imm) {
741 uint64_t BString = (Imm & 0x7fc0000000000000ULL) >> 54;
742 return ((BString == 0xff || BString == 0x100) &&
743 ((Imm & 0x0000ffffffffffffULL) == 0));
744}
745
747 uint8_t BitA = (Imm & 0x8000000000000000ULL) != 0;
748 uint8_t BitB = (Imm & 0x0040000000000000ULL) != 0;
749 uint8_t BitC = (Imm & 0x0020000000000000ULL) != 0;
750 uint8_t BitD = (Imm & 0x0010000000000000ULL) != 0;
751 uint8_t BitE = (Imm & 0x0008000000000000ULL) != 0;
752 uint8_t BitF = (Imm & 0x0004000000000000ULL) != 0;
753 uint8_t BitG = (Imm & 0x0002000000000000ULL) != 0;
754 uint8_t BitH = (Imm & 0x0001000000000000ULL) != 0;
755
756 uint8_t EncVal = BitA;
757 EncVal <<= 1;
758 EncVal |= BitB;
759 EncVal <<= 1;
760 EncVal |= BitC;
761 EncVal <<= 1;
762 EncVal |= BitD;
763 EncVal <<= 1;
764 EncVal |= BitE;
765 EncVal <<= 1;
766 EncVal |= BitF;
767 EncVal <<= 1;
768 EncVal |= BitG;
769 EncVal <<= 1;
770 EncVal |= BitH;
771 return EncVal;
772}
773
775 uint64_t EncVal = 0;
776 if (Imm & 0x80) EncVal |= 0x8000000000000000ULL;
777 if (Imm & 0x40) EncVal |= 0x3fc0000000000000ULL;
778 else EncVal |= 0x4000000000000000ULL;
779 if (Imm & 0x20) EncVal |= 0x0020000000000000ULL;
780 if (Imm & 0x10) EncVal |= 0x0010000000000000ULL;
781 if (Imm & 0x08) EncVal |= 0x0008000000000000ULL;
782 if (Imm & 0x04) EncVal |= 0x0004000000000000ULL;
783 if (Imm & 0x02) EncVal |= 0x0002000000000000ULL;
784 if (Imm & 0x01) EncVal |= 0x0001000000000000ULL;
785 return EncVal;
786}
787
788/// Returns true if Imm is the concatenation of a repeating pattern of type T.
789template <typename T>
790static inline bool isSVEMaskOfIdenticalElements(int64_t Imm) {
791 auto Parts = bit_cast<std::array<T, sizeof(int64_t) / sizeof(T)>>(Imm);
792 return llvm::all_equal(Parts);
793}
794
795/// Returns true if Imm is valid for CPY/DUP.
796template <typename T>
797static inline bool isSVECpyImm(int64_t Imm) {
798 // Imm is interpreted as a signed value, which means top bits must be all ones
799 // (sign bits if the immediate value is negative and passed in a larger
800 // container), or all zeroes.
801 int64_t Mask = ~int64_t(std::numeric_limits<std::make_unsigned_t<T>>::max());
802 if ((Imm & Mask) != 0 && (Imm & Mask) != Mask)
803 return false;
804
805 // Imm is a signed 8-bit value.
806 // Top bits must be zeroes or sign bits.
807 if (Imm & 0xff)
808 return int8_t(Imm) == T(Imm);
809
810 // Imm is a signed 16-bit value and multiple of 256.
811 // Top bits must be zeroes or sign bits.
812 if (Imm & 0xff00)
813 return int16_t(Imm) == T(Imm);
814
815 return Imm == 0;
816}
817
818/// Returns true if Imm is valid for ADD/SUB.
819template <typename T>
820static inline bool isSVEAddSubImm(int64_t Imm) {
821 bool IsInt8t = std::is_same<int8_t, std::make_signed_t<T>>::value ||
822 std::is_same<int8_t, T>::value;
823 return uint8_t(Imm) == Imm || (!IsInt8t && uint16_t(Imm & ~0xff) == Imm);
824}
825
826/// Return true if Imm is valid for DUPM and has no single CPY/DUP equivalent.
827static inline bool isSVEMoveMaskPreferredLogicalImmediate(int64_t Imm) {
828 if (isSVECpyImm<int64_t>(Imm))
829 return false;
830
831 auto S = bit_cast<std::array<int32_t, 2>>(Imm);
834
836 return false;
838 return false;
840 return false;
841 return isLogicalImmediate(Imm, 64);
842}
843
844inline static bool isAnyMOVZMovAlias(uint64_t Value, int RegWidth) {
845 for (int Shift = 0; Shift <= RegWidth - 16; Shift += 16)
846 if ((Value & ~(0xffffULL << Shift)) == 0)
847 return true;
848
849 return false;
850}
851
852inline static bool isMOVZMovAlias(uint64_t Value, int Shift, int RegWidth) {
853 if (RegWidth == 32)
854 Value &= 0xffffffffULL;
855
856 // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
857 if (Value == 0 && Shift != 0)
858 return false;
859
860 return (Value & ~(0xffffULL << Shift)) == 0;
861}
862
863inline static bool isMOVNMovAlias(uint64_t Value, int Shift, int RegWidth) {
864 // MOVZ takes precedence over MOVN.
865 if (isAnyMOVZMovAlias(Value, RegWidth))
866 return false;
867
868 Value = ~Value;
869 if (RegWidth == 32)
870 Value &= 0xffffffffULL;
871
872 return isMOVZMovAlias(Value, Shift, RegWidth);
873}
874
875inline static bool isAnyMOVWMovAlias(uint64_t Value, int RegWidth) {
876 if (isAnyMOVZMovAlias(Value, RegWidth))
877 return true;
878
879 // It's not a MOVZ, but it might be a MOVN.
880 Value = ~Value;
881 if (RegWidth == 32)
882 Value &= 0xffffffffULL;
883
884 return isAnyMOVZMovAlias(Value, RegWidth);
885}
886
887static inline bool isSVECpyDupImm(int SizeInBits, int64_t Val, int32_t &Imm,
888 int32_t &Shift) {
889 switch (SizeInBits) {
890 case 8:
891 // All immediates are supported.
892 Shift = 0;
893 Imm = Val & 0xFF;
894 return true;
895 case 16:
896 case 32:
897 case 64:
898 // Support 8bit signed immediates.
899 if (Val >= -128 && Val <= 127) {
900 Shift = 0;
901 Imm = Val & 0xFF;
902 return true;
903 }
904 // Support 16bit signed immediates that are a multiple of 256.
905 if (Val >= -32768 && Val <= 32512 && Val % 256 == 0) {
906 Shift = 8;
907 Imm = (Val >> 8) & 0xFF;
908 return true;
909 }
910 break;
911 default:
912 break;
913 }
914 return false;
915}
916
917static inline bool isSVELogicalImm(unsigned SizeInBits, uint64_t ImmVal,
918 uint64_t &Encoding) {
919 // Shift mask depending on type size.
920 switch (SizeInBits) {
921 case 8:
922 ImmVal &= 0xFF;
923 ImmVal |= ImmVal << 8;
924 ImmVal |= ImmVal << 16;
925 ImmVal |= ImmVal << 32;
926 break;
927 case 16:
928 ImmVal &= 0xFFFF;
929 ImmVal |= ImmVal << 16;
930 ImmVal |= ImmVal << 32;
931 break;
932 case 32:
933 ImmVal &= 0xFFFFFFFF;
934 ImmVal |= ImmVal << 32;
935 break;
936 case 64:
937 break;
938 default:
939 llvm_unreachable("Unexpected size");
940 }
941
942 return processLogicalImmediate(ImmVal, 64, Encoding);
943}
944
945} // end namespace AArch64_AM
946
947} // end namespace llvm
948
949#endif
unsigned RegSize
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define I(x, y, z)
Definition MD5.cpp:57
#define H(x, y, z)
Definition MD5.cpp:56
#define T
This file implements the C++20 <bit> header.
APInt bitcastToAPInt() const
Definition APFloat.h:1408
Class for arbitrary precision integers.
Definition APInt.h:78
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM Value Representation.
Definition Value.h:75
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
AArch64_AM - AArch64 Addressing Mode Stuff.
static bool isValidDecodeLogicalImmediate(uint64_t val, unsigned regSize)
isValidDecodeLogicalImmediate - Check to see if the logical immediate value in the form "N:immr:imms"...
static bool isSVEMoveMaskPreferredLogicalImmediate(int64_t Imm)
Return true if Imm is valid for DUPM and has no single CPY/DUP equivalent.
static bool isAnyMOVZMovAlias(uint64_t Value, int RegWidth)
static bool isMOVNMovAlias(uint64_t Value, int Shift, int RegWidth)
static uint64_t decodeLogicalImmediate(uint64_t val, unsigned regSize)
decodeLogicalImmediate - Decode a logical immediate value in the form "N:immr:imms" (where the immr a...
static unsigned getMemExtendImm(AArch64_AM::ShiftExtendType ET, bool DoShift)
getExtendImm - Encode the extend type and amount for a load/store inst: doshift: should the offset be...
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
static uint64_t decodeAdvSIMDModImmType4(uint8_t Imm)
static bool isLogicalImmediate(uint64_t imm, unsigned regSize)
isLogicalImmediate - Return true if the immediate is valid for a logical immediate instruction of the...
static uint8_t encodeAdvSIMDModImmType2(uint64_t Imm)
static bool isSVEAddSubImm(int64_t Imm)
Returns true if Imm is valid for ADD/SUB.
static bool processLogicalImmediate(uint64_t Imm, unsigned RegSize, uint64_t &Encoding)
processLogicalImmediate - Determine if an immediate value can be encoded as the immediate operand of ...
static bool isAdvSIMDModImmType9(uint64_t Imm)
static uint64_t decodeAdvSIMDModImmType2(uint8_t Imm)
static bool isAdvSIMDModImmType4(uint64_t Imm)
static unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, unsigned Imm)
getArithExtendImm - Encode the extend type and shift amount for an arithmetic instruction: imm: 3-bit...
static uint64_t decodeAdvSIMDModImmType12(uint8_t Imm)
static bool isAdvSIMDModImmType5(uint64_t Imm)
static bool isAnyMOVWMovAlias(uint64_t Value, int RegWidth)
static unsigned getArithShiftValue(unsigned Imm)
getArithShiftValue - get the arithmetic shift value.
static uint64_t decodeAdvSIMDModImmType11(uint8_t Imm)
static int getFP32Imm(const APInt &Imm)
getFP32Imm - Return an 8-bit floating-point version of the 32-bit floating-point value.
static float getFPImmFloat(unsigned Imm)
static AArch64_AM::ShiftExtendType getMemExtendType(unsigned Imm)
getExtendType - Extract the extend type for the offset operand of loads/stores.
static uint8_t encodeAdvSIMDModImmType7(uint64_t Imm)
static uint64_t decodeAdvSIMDModImmType1(uint8_t Imm)
static uint8_t encodeAdvSIMDModImmType12(uint64_t Imm)
static uint8_t encodeAdvSIMDModImmType10(uint64_t Imm)
static uint8_t encodeAdvSIMDModImmType9(uint64_t Imm)
static bool isSVEMaskOfIdenticalElements(int64_t Imm)
Returns true if Imm is the concatenation of a repeating pattern of type T.
static bool isMOVZMovAlias(uint64_t Value, int Shift, int RegWidth)
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static const char * getShiftExtendName(AArch64_AM::ShiftExtendType ST)
getShiftName - Get the string encoding for the shift type.
static bool isAdvSIMDModImmType7(uint64_t Imm)
static uint64_t decodeAdvSIMDModImmType3(uint8_t Imm)
static uint64_t decodeAdvSIMDModImmType7(uint8_t Imm)
unsigned getExtendEncoding(AArch64_AM::ShiftExtendType ET)
Mapping from extend bits to required operation: shifter: 000 ==> uxtb 001 ==> uxth 010 ==> uxtw 011 =...
static bool isSVECpyImm(int64_t Imm)
Returns true if Imm is valid for CPY/DUP.
static uint8_t encodeAdvSIMDModImmType5(uint64_t Imm)
static int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
static uint64_t ror(uint64_t elt, unsigned size)
static bool isAdvSIMDModImmType10(uint64_t Imm)
static AArch64_AM::ShiftExtendType getExtendType(unsigned Imm)
getExtendType - Extract the extend type for operands of arithmetic ops.
static int getFP16Imm(const APInt &Imm)
getFP16Imm - Return an 8-bit floating-point version of the 16-bit floating-point value.
static uint64_t decodeAdvSIMDModImmType9(uint8_t Imm)
static uint64_t decodeAdvSIMDModImmType10(uint8_t Imm)
static uint64_t decodeAdvSIMDModImmType5(uint8_t Imm)
static uint64_t decodeAdvSIMDModImmType8(uint8_t Imm)
static uint8_t encodeAdvSIMDModImmType8(uint64_t Imm)
static bool isAdvSIMDModImmType12(uint64_t Imm)
static bool isSVELogicalImm(unsigned SizeInBits, uint64_t ImmVal, uint64_t &Encoding)
static uint8_t encodeAdvSIMDModImmType11(uint64_t Imm)
static AArch64_AM::ShiftExtendType getArithExtendType(unsigned Imm)
static bool isSVECpyDupImm(int SizeInBits, int64_t Val, int32_t &Imm, int32_t &Shift)
static bool isAdvSIMDModImmType11(uint64_t Imm)
static uint8_t encodeAdvSIMDModImmType6(uint64_t Imm)
static AArch64_AM::ShiftExtendType getShiftType(unsigned Imm)
getShiftType - Extract the shift type.
static bool isAdvSIMDModImmType8(uint64_t Imm)
static uint8_t encodeAdvSIMDModImmType4(uint64_t Imm)
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
static bool isAdvSIMDModImmType6(uint64_t Imm)
static bool getMemDoShift(unsigned Imm)
getMemDoShift - Extract the "do shift" flag value for load/store instructions.
static uint8_t encodeAdvSIMDModImmType1(uint64_t Imm)
static uint8_t encodeAdvSIMDModImmType3(uint64_t Imm)
static bool isAdvSIMDModImmType2(uint64_t Imm)
static uint64_t decodeAdvSIMDModImmType6(uint8_t Imm)
static bool isAdvSIMDModImmType3(uint64_t Imm)
static bool isSignExtendShiftType(AArch64_AM::ShiftExtendType Type)
isSignExtendShiftType - Returns true if Type is sign extending.
static bool isAdvSIMDModImmType1(uint64_t Imm)
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1669
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
Definition bit.h:293
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:202
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
Definition MathExtras.h:273
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
Definition bit.h:236
int countl_one(T Value)
Count the number of ones from the most significant bit to the first zero bit.
Definition bit.h:280
To bit_cast(const From &from) noexcept
Definition bit.h:90
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
Definition STLExtras.h:2166
#define N