LLVM 20.0.0git
AArch64AsmParser.cpp
Go to the documentation of this file.
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "AArch64InstrInfo.h"
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallSet.h"
24#include "llvm/ADT/StringMap.h"
25#include "llvm/ADT/StringRef.h"
27#include "llvm/ADT/Twine.h"
28#include "llvm/MC/MCContext.h"
29#include "llvm/MC/MCExpr.h"
30#include "llvm/MC/MCInst.h"
39#include "llvm/MC/MCStreamer.h"
41#include "llvm/MC/MCSymbol.h"
43#include "llvm/MC/MCValue.h"
49#include "llvm/Support/SMLoc.h"
53#include <cassert>
54#include <cctype>
55#include <cstdint>
56#include <cstdio>
57#include <optional>
58#include <string>
59#include <tuple>
60#include <utility>
61#include <vector>
62
63using namespace llvm;
64
65namespace {
66
67enum class RegKind {
68 Scalar,
69 NeonVector,
70 SVEDataVector,
71 SVEPredicateAsCounter,
72 SVEPredicateVector,
73 Matrix,
74 LookupTable
75};
76
77enum class MatrixKind { Array, Tile, Row, Col };
78
79enum RegConstraintEqualityTy {
80 EqualsReg,
81 EqualsSuperReg,
82 EqualsSubReg
83};
84
85class AArch64AsmParser : public MCTargetAsmParser {
86private:
87 StringRef Mnemonic; ///< Instruction mnemonic.
88
89 // Map of register aliases registers via the .req directive.
91
92 class PrefixInfo {
93 public:
94 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
95 PrefixInfo Prefix;
96 switch (Inst.getOpcode()) {
97 case AArch64::MOVPRFX_ZZ:
98 Prefix.Active = true;
99 Prefix.Dst = Inst.getOperand(0).getReg();
100 break;
101 case AArch64::MOVPRFX_ZPmZ_B:
102 case AArch64::MOVPRFX_ZPmZ_H:
103 case AArch64::MOVPRFX_ZPmZ_S:
104 case AArch64::MOVPRFX_ZPmZ_D:
105 Prefix.Active = true;
106 Prefix.Predicated = true;
107 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
108 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
109 "No destructive element size set for movprfx");
110 Prefix.Dst = Inst.getOperand(0).getReg();
111 Prefix.Pg = Inst.getOperand(2).getReg();
112 break;
113 case AArch64::MOVPRFX_ZPzZ_B:
114 case AArch64::MOVPRFX_ZPzZ_H:
115 case AArch64::MOVPRFX_ZPzZ_S:
116 case AArch64::MOVPRFX_ZPzZ_D:
117 Prefix.Active = true;
118 Prefix.Predicated = true;
119 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
120 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
121 "No destructive element size set for movprfx");
122 Prefix.Dst = Inst.getOperand(0).getReg();
123 Prefix.Pg = Inst.getOperand(1).getReg();
124 break;
125 default:
126 break;
127 }
128
129 return Prefix;
130 }
131
132 PrefixInfo() = default;
133 bool isActive() const { return Active; }
134 bool isPredicated() const { return Predicated; }
135 unsigned getElementSize() const {
136 assert(Predicated);
137 return ElementSize;
138 }
139 MCRegister getDstReg() const { return Dst; }
140 MCRegister getPgReg() const {
141 assert(Predicated);
142 return Pg;
143 }
144
145 private:
146 bool Active = false;
147 bool Predicated = false;
148 unsigned ElementSize;
149 MCRegister Dst;
150 MCRegister Pg;
151 } NextPrefix;
152
153 AArch64TargetStreamer &getTargetStreamer() {
155 return static_cast<AArch64TargetStreamer &>(TS);
156 }
157
158 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
159
160 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
161 bool parseSyspAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
162 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
163 AArch64CC::CondCode parseCondCodeString(StringRef Cond,
164 std::string &Suggestion);
165 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
166 unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
168 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
169 bool parseNeonVectorList(OperandVector &Operands);
170 bool parseOptionalMulOperand(OperandVector &Operands);
171 bool parseOptionalVGOperand(OperandVector &Operands, StringRef &VecGroup);
172 bool parseKeywordOperand(OperandVector &Operands);
173 bool parseOperand(OperandVector &Operands, bool isCondCode,
174 bool invertCondCode);
175 bool parseImmExpr(int64_t &Out);
176 bool parseComma();
177 bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
178 unsigned Last);
179
180 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
182
183 bool parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc);
184
185 bool parseDirectiveArch(SMLoc L);
186 bool parseDirectiveArchExtension(SMLoc L);
187 bool parseDirectiveCPU(SMLoc L);
188 bool parseDirectiveInst(SMLoc L);
189
190 bool parseDirectiveTLSDescCall(SMLoc L);
191
192 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
193 bool parseDirectiveLtorg(SMLoc L);
194
195 bool parseDirectiveReq(StringRef Name, SMLoc L);
196 bool parseDirectiveUnreq(SMLoc L);
197 bool parseDirectiveCFINegateRAState();
198 bool parseDirectiveCFINegateRAStateWithPC();
199 bool parseDirectiveCFIBKeyFrame();
200 bool parseDirectiveCFIMTETaggedFrame();
201
202 bool parseDirectiveVariantPCS(SMLoc L);
203
204 bool parseDirectiveSEHAllocStack(SMLoc L);
205 bool parseDirectiveSEHPrologEnd(SMLoc L);
206 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
207 bool parseDirectiveSEHSaveFPLR(SMLoc L);
208 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
209 bool parseDirectiveSEHSaveReg(SMLoc L);
210 bool parseDirectiveSEHSaveRegX(SMLoc L);
211 bool parseDirectiveSEHSaveRegP(SMLoc L);
212 bool parseDirectiveSEHSaveRegPX(SMLoc L);
213 bool parseDirectiveSEHSaveLRPair(SMLoc L);
214 bool parseDirectiveSEHSaveFReg(SMLoc L);
215 bool parseDirectiveSEHSaveFRegX(SMLoc L);
216 bool parseDirectiveSEHSaveFRegP(SMLoc L);
217 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
218 bool parseDirectiveSEHSetFP(SMLoc L);
219 bool parseDirectiveSEHAddFP(SMLoc L);
220 bool parseDirectiveSEHNop(SMLoc L);
221 bool parseDirectiveSEHSaveNext(SMLoc L);
222 bool parseDirectiveSEHEpilogStart(SMLoc L);
223 bool parseDirectiveSEHEpilogEnd(SMLoc L);
224 bool parseDirectiveSEHTrapFrame(SMLoc L);
225 bool parseDirectiveSEHMachineFrame(SMLoc L);
226 bool parseDirectiveSEHContext(SMLoc L);
227 bool parseDirectiveSEHECContext(SMLoc L);
228 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
229 bool parseDirectiveSEHPACSignLR(SMLoc L);
230 bool parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired, bool Writeback);
231 bool parseDirectiveAeabiSubSectionHeader(SMLoc L);
232 bool parseDirectiveAeabiAArch64Attr(SMLoc L);
233
234 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
236 unsigned getNumRegsForRegKind(RegKind K);
237 bool matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
240 bool MatchingInlineAsm) override;
241 /// @name Auto-generated Match Functions
242 /// {
243
244#define GET_ASSEMBLER_HEADER
245#include "AArch64GenAsmMatcher.inc"
246
247 /// }
248
249 ParseStatus tryParseScalarRegister(MCRegister &Reg);
250 ParseStatus tryParseVectorRegister(MCRegister &Reg, StringRef &Kind,
251 RegKind MatchKind);
252 ParseStatus tryParseMatrixRegister(OperandVector &Operands);
253 ParseStatus tryParseSVCR(OperandVector &Operands);
254 ParseStatus tryParseOptionalShiftExtend(OperandVector &Operands);
255 ParseStatus tryParseBarrierOperand(OperandVector &Operands);
256 ParseStatus tryParseBarriernXSOperand(OperandVector &Operands);
257 ParseStatus tryParseSysReg(OperandVector &Operands);
258 ParseStatus tryParseSysCROperand(OperandVector &Operands);
259 template <bool IsSVEPrefetch = false>
260 ParseStatus tryParsePrefetch(OperandVector &Operands);
261 ParseStatus tryParseRPRFMOperand(OperandVector &Operands);
262 ParseStatus tryParsePSBHint(OperandVector &Operands);
263 ParseStatus tryParseBTIHint(OperandVector &Operands);
264 ParseStatus tryParseAdrpLabel(OperandVector &Operands);
265 ParseStatus tryParseAdrLabel(OperandVector &Operands);
266 template <bool AddFPZeroAsLiteral>
267 ParseStatus tryParseFPImm(OperandVector &Operands);
268 ParseStatus tryParseImmWithOptionalShift(OperandVector &Operands);
269 ParseStatus tryParseGPR64sp0Operand(OperandVector &Operands);
270 bool tryParseNeonVectorRegister(OperandVector &Operands);
271 ParseStatus tryParseVectorIndex(OperandVector &Operands);
272 ParseStatus tryParseGPRSeqPair(OperandVector &Operands);
273 ParseStatus tryParseSyspXzrPair(OperandVector &Operands);
274 template <bool ParseShiftExtend,
275 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
276 ParseStatus tryParseGPROperand(OperandVector &Operands);
277 ParseStatus tryParseZTOperand(OperandVector &Operands);
278 template <bool ParseShiftExtend, bool ParseSuffix>
279 ParseStatus tryParseSVEDataVector(OperandVector &Operands);
280 template <RegKind RK>
281 ParseStatus tryParseSVEPredicateVector(OperandVector &Operands);
283 tryParseSVEPredicateOrPredicateAsCounterVector(OperandVector &Operands);
284 template <RegKind VectorKind>
285 ParseStatus tryParseVectorList(OperandVector &Operands,
286 bool ExpectMatch = false);
287 ParseStatus tryParseMatrixTileList(OperandVector &Operands);
288 ParseStatus tryParseSVEPattern(OperandVector &Operands);
289 ParseStatus tryParseSVEVecLenSpecifier(OperandVector &Operands);
290 ParseStatus tryParseGPR64x8(OperandVector &Operands);
291 ParseStatus tryParseImmRange(OperandVector &Operands);
292 template <int> ParseStatus tryParseAdjImm0_63(OperandVector &Operands);
293 ParseStatus tryParsePHintInstOperand(OperandVector &Operands);
294
295public:
296 enum AArch64MatchResultTy {
297 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
298#define GET_OPERAND_DIAGNOSTIC_TYPES
299#include "AArch64GenAsmMatcher.inc"
300 };
301 bool IsILP32;
302 bool IsWindowsArm64EC;
303
304 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
305 const MCInstrInfo &MII, const MCTargetOptions &Options)
306 : MCTargetAsmParser(Options, STI, MII) {
308 IsWindowsArm64EC = STI.getTargetTriple().isWindowsArm64EC();
311 if (S.getTargetStreamer() == nullptr)
313
314 // Alias .hword/.word/.[dx]word to the target-independent
315 // .2byte/.4byte/.8byte directives as they have the same form and
316 // semantics:
317 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
318 Parser.addAliasForDirective(".hword", ".2byte");
319 Parser.addAliasForDirective(".word", ".4byte");
320 Parser.addAliasForDirective(".dword", ".8byte");
321 Parser.addAliasForDirective(".xword", ".8byte");
322
323 // Initialize the set of available features.
324 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
325 }
326
327 bool areEqualRegs(const MCParsedAsmOperand &Op1,
328 const MCParsedAsmOperand &Op2) const override;
330 SMLoc NameLoc, OperandVector &Operands) override;
331 bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) override;
333 SMLoc &EndLoc) override;
334 bool ParseDirective(AsmToken DirectiveID) override;
336 unsigned Kind) override;
337
338 bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) override;
339
340 static bool classifySymbolRef(const MCExpr *Expr,
341 AArch64MCExpr::VariantKind &ELFRefKind,
342 MCSymbolRefExpr::VariantKind &DarwinRefKind,
343 int64_t &Addend);
344};
345
346/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
347/// instruction.
348class AArch64Operand : public MCParsedAsmOperand {
349private:
350 enum KindTy {
351 k_Immediate,
352 k_ShiftedImm,
353 k_ImmRange,
354 k_CondCode,
355 k_Register,
356 k_MatrixRegister,
357 k_MatrixTileList,
358 k_SVCR,
359 k_VectorList,
360 k_VectorIndex,
361 k_Token,
362 k_SysReg,
363 k_SysCR,
364 k_Prefetch,
365 k_ShiftExtend,
366 k_FPImm,
367 k_Barrier,
368 k_PSBHint,
369 k_PHint,
370 k_BTIHint,
371 } Kind;
372
373 SMLoc StartLoc, EndLoc;
374
375 struct TokOp {
376 const char *Data;
377 unsigned Length;
378 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
379 };
380
381 // Separate shift/extend operand.
382 struct ShiftExtendOp {
384 unsigned Amount;
385 bool HasExplicitAmount;
386 };
387
388 struct RegOp {
389 unsigned RegNum;
390 RegKind Kind;
391 int ElementWidth;
392
393 // The register may be allowed as a different register class,
394 // e.g. for GPR64as32 or GPR32as64.
395 RegConstraintEqualityTy EqualityTy;
396
397 // In some cases the shift/extend needs to be explicitly parsed together
398 // with the register, rather than as a separate operand. This is needed
399 // for addressing modes where the instruction as a whole dictates the
400 // scaling/extend, rather than specific bits in the instruction.
401 // By parsing them as a single operand, we avoid the need to pass an
402 // extra operand in all CodeGen patterns (because all operands need to
403 // have an associated value), and we avoid the need to update TableGen to
404 // accept operands that have no associated bits in the instruction.
405 //
406 // An added benefit of parsing them together is that the assembler
407 // can give a sensible diagnostic if the scaling is not correct.
408 //
409 // The default is 'lsl #0' (HasExplicitAmount = false) if no
410 // ShiftExtend is specified.
411 ShiftExtendOp ShiftExtend;
412 };
413
414 struct MatrixRegOp {
415 unsigned RegNum;
416 unsigned ElementWidth;
417 MatrixKind Kind;
418 };
419
420 struct MatrixTileListOp {
421 unsigned RegMask = 0;
422 };
423
424 struct VectorListOp {
425 unsigned RegNum;
426 unsigned Count;
427 unsigned Stride;
428 unsigned NumElements;
429 unsigned ElementWidth;
430 RegKind RegisterKind;
431 };
432
433 struct VectorIndexOp {
434 int Val;
435 };
436
437 struct ImmOp {
438 const MCExpr *Val;
439 };
440
441 struct ShiftedImmOp {
442 const MCExpr *Val;
443 unsigned ShiftAmount;
444 };
445
446 struct ImmRangeOp {
447 unsigned First;
448 unsigned Last;
449 };
450
451 struct CondCodeOp {
453 };
454
455 struct FPImmOp {
456 uint64_t Val; // APFloat value bitcasted to uint64_t.
457 bool IsExact; // describes whether parsed value was exact.
458 };
459
460 struct BarrierOp {
461 const char *Data;
462 unsigned Length;
463 unsigned Val; // Not the enum since not all values have names.
464 bool HasnXSModifier;
465 };
466
467 struct SysRegOp {
468 const char *Data;
469 unsigned Length;
470 uint32_t MRSReg;
471 uint32_t MSRReg;
472 uint32_t PStateField;
473 };
474
475 struct SysCRImmOp {
476 unsigned Val;
477 };
478
479 struct PrefetchOp {
480 const char *Data;
481 unsigned Length;
482 unsigned Val;
483 };
484
485 struct PSBHintOp {
486 const char *Data;
487 unsigned Length;
488 unsigned Val;
489 };
490 struct PHintOp {
491 const char *Data;
492 unsigned Length;
493 unsigned Val;
494 };
495 struct BTIHintOp {
496 const char *Data;
497 unsigned Length;
498 unsigned Val;
499 };
500
501 struct SVCROp {
502 const char *Data;
503 unsigned Length;
504 unsigned PStateField;
505 };
506
507 union {
508 struct TokOp Tok;
509 struct RegOp Reg;
510 struct MatrixRegOp MatrixReg;
511 struct MatrixTileListOp MatrixTileList;
512 struct VectorListOp VectorList;
513 struct VectorIndexOp VectorIndex;
514 struct ImmOp Imm;
515 struct ShiftedImmOp ShiftedImm;
516 struct ImmRangeOp ImmRange;
517 struct CondCodeOp CondCode;
518 struct FPImmOp FPImm;
519 struct BarrierOp Barrier;
520 struct SysRegOp SysReg;
521 struct SysCRImmOp SysCRImm;
522 struct PrefetchOp Prefetch;
523 struct PSBHintOp PSBHint;
524 struct PHintOp PHint;
525 struct BTIHintOp BTIHint;
526 struct ShiftExtendOp ShiftExtend;
527 struct SVCROp SVCR;
528 };
529
530 // Keep the MCContext around as the MCExprs may need manipulated during
531 // the add<>Operands() calls.
532 MCContext &Ctx;
533
534public:
535 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
536
537 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
538 Kind = o.Kind;
539 StartLoc = o.StartLoc;
540 EndLoc = o.EndLoc;
541 switch (Kind) {
542 case k_Token:
543 Tok = o.Tok;
544 break;
545 case k_Immediate:
546 Imm = o.Imm;
547 break;
548 case k_ShiftedImm:
549 ShiftedImm = o.ShiftedImm;
550 break;
551 case k_ImmRange:
552 ImmRange = o.ImmRange;
553 break;
554 case k_CondCode:
555 CondCode = o.CondCode;
556 break;
557 case k_FPImm:
558 FPImm = o.FPImm;
559 break;
560 case k_Barrier:
561 Barrier = o.Barrier;
562 break;
563 case k_Register:
564 Reg = o.Reg;
565 break;
566 case k_MatrixRegister:
567 MatrixReg = o.MatrixReg;
568 break;
569 case k_MatrixTileList:
570 MatrixTileList = o.MatrixTileList;
571 break;
572 case k_VectorList:
573 VectorList = o.VectorList;
574 break;
575 case k_VectorIndex:
576 VectorIndex = o.VectorIndex;
577 break;
578 case k_SysReg:
579 SysReg = o.SysReg;
580 break;
581 case k_SysCR:
582 SysCRImm = o.SysCRImm;
583 break;
584 case k_Prefetch:
585 Prefetch = o.Prefetch;
586 break;
587 case k_PSBHint:
588 PSBHint = o.PSBHint;
589 break;
590 case k_PHint:
591 PHint = o.PHint;
592 break;
593 case k_BTIHint:
594 BTIHint = o.BTIHint;
595 break;
596 case k_ShiftExtend:
597 ShiftExtend = o.ShiftExtend;
598 break;
599 case k_SVCR:
600 SVCR = o.SVCR;
601 break;
602 }
603 }
604
605 /// getStartLoc - Get the location of the first token of this operand.
606 SMLoc getStartLoc() const override { return StartLoc; }
607 /// getEndLoc - Get the location of the last token of this operand.
608 SMLoc getEndLoc() const override { return EndLoc; }
609
610 StringRef getToken() const {
611 assert(Kind == k_Token && "Invalid access!");
612 return StringRef(Tok.Data, Tok.Length);
613 }
614
615 bool isTokenSuffix() const {
616 assert(Kind == k_Token && "Invalid access!");
617 return Tok.IsSuffix;
618 }
619
620 const MCExpr *getImm() const {
621 assert(Kind == k_Immediate && "Invalid access!");
622 return Imm.Val;
623 }
624
625 const MCExpr *getShiftedImmVal() const {
626 assert(Kind == k_ShiftedImm && "Invalid access!");
627 return ShiftedImm.Val;
628 }
629
630 unsigned getShiftedImmShift() const {
631 assert(Kind == k_ShiftedImm && "Invalid access!");
632 return ShiftedImm.ShiftAmount;
633 }
634
635 unsigned getFirstImmVal() const {
636 assert(Kind == k_ImmRange && "Invalid access!");
637 return ImmRange.First;
638 }
639
640 unsigned getLastImmVal() const {
641 assert(Kind == k_ImmRange && "Invalid access!");
642 return ImmRange.Last;
643 }
644
646 assert(Kind == k_CondCode && "Invalid access!");
647 return CondCode.Code;
648 }
649
650 APFloat getFPImm() const {
651 assert (Kind == k_FPImm && "Invalid access!");
652 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
653 }
654
655 bool getFPImmIsExact() const {
656 assert (Kind == k_FPImm && "Invalid access!");
657 return FPImm.IsExact;
658 }
659
660 unsigned getBarrier() const {
661 assert(Kind == k_Barrier && "Invalid access!");
662 return Barrier.Val;
663 }
664
665 StringRef getBarrierName() const {
666 assert(Kind == k_Barrier && "Invalid access!");
667 return StringRef(Barrier.Data, Barrier.Length);
668 }
669
670 bool getBarriernXSModifier() const {
671 assert(Kind == k_Barrier && "Invalid access!");
672 return Barrier.HasnXSModifier;
673 }
674
675 MCRegister getReg() const override {
676 assert(Kind == k_Register && "Invalid access!");
677 return Reg.RegNum;
678 }
679
680 unsigned getMatrixReg() const {
681 assert(Kind == k_MatrixRegister && "Invalid access!");
682 return MatrixReg.RegNum;
683 }
684
685 unsigned getMatrixElementWidth() const {
686 assert(Kind == k_MatrixRegister && "Invalid access!");
687 return MatrixReg.ElementWidth;
688 }
689
690 MatrixKind getMatrixKind() const {
691 assert(Kind == k_MatrixRegister && "Invalid access!");
692 return MatrixReg.Kind;
693 }
694
695 unsigned getMatrixTileListRegMask() const {
696 assert(isMatrixTileList() && "Invalid access!");
697 return MatrixTileList.RegMask;
698 }
699
700 RegConstraintEqualityTy getRegEqualityTy() const {
701 assert(Kind == k_Register && "Invalid access!");
702 return Reg.EqualityTy;
703 }
704
705 unsigned getVectorListStart() const {
706 assert(Kind == k_VectorList && "Invalid access!");
707 return VectorList.RegNum;
708 }
709
710 unsigned getVectorListCount() const {
711 assert(Kind == k_VectorList && "Invalid access!");
712 return VectorList.Count;
713 }
714
715 unsigned getVectorListStride() const {
716 assert(Kind == k_VectorList && "Invalid access!");
717 return VectorList.Stride;
718 }
719
720 int getVectorIndex() const {
721 assert(Kind == k_VectorIndex && "Invalid access!");
722 return VectorIndex.Val;
723 }
724
725 StringRef getSysReg() const {
726 assert(Kind == k_SysReg && "Invalid access!");
727 return StringRef(SysReg.Data, SysReg.Length);
728 }
729
730 unsigned getSysCR() const {
731 assert(Kind == k_SysCR && "Invalid access!");
732 return SysCRImm.Val;
733 }
734
735 unsigned getPrefetch() const {
736 assert(Kind == k_Prefetch && "Invalid access!");
737 return Prefetch.Val;
738 }
739
740 unsigned getPSBHint() const {
741 assert(Kind == k_PSBHint && "Invalid access!");
742 return PSBHint.Val;
743 }
744
745 unsigned getPHint() const {
746 assert(Kind == k_PHint && "Invalid access!");
747 return PHint.Val;
748 }
749
750 StringRef getPSBHintName() const {
751 assert(Kind == k_PSBHint && "Invalid access!");
752 return StringRef(PSBHint.Data, PSBHint.Length);
753 }
754
755 StringRef getPHintName() const {
756 assert(Kind == k_PHint && "Invalid access!");
757 return StringRef(PHint.Data, PHint.Length);
758 }
759
760 unsigned getBTIHint() const {
761 assert(Kind == k_BTIHint && "Invalid access!");
762 return BTIHint.Val;
763 }
764
765 StringRef getBTIHintName() const {
766 assert(Kind == k_BTIHint && "Invalid access!");
767 return StringRef(BTIHint.Data, BTIHint.Length);
768 }
769
770 StringRef getSVCR() const {
771 assert(Kind == k_SVCR && "Invalid access!");
772 return StringRef(SVCR.Data, SVCR.Length);
773 }
774
775 StringRef getPrefetchName() const {
776 assert(Kind == k_Prefetch && "Invalid access!");
777 return StringRef(Prefetch.Data, Prefetch.Length);
778 }
779
780 AArch64_AM::ShiftExtendType getShiftExtendType() const {
781 if (Kind == k_ShiftExtend)
782 return ShiftExtend.Type;
783 if (Kind == k_Register)
784 return Reg.ShiftExtend.Type;
785 llvm_unreachable("Invalid access!");
786 }
787
788 unsigned getShiftExtendAmount() const {
789 if (Kind == k_ShiftExtend)
790 return ShiftExtend.Amount;
791 if (Kind == k_Register)
792 return Reg.ShiftExtend.Amount;
793 llvm_unreachable("Invalid access!");
794 }
795
796 bool hasShiftExtendAmount() const {
797 if (Kind == k_ShiftExtend)
798 return ShiftExtend.HasExplicitAmount;
799 if (Kind == k_Register)
800 return Reg.ShiftExtend.HasExplicitAmount;
801 llvm_unreachable("Invalid access!");
802 }
803
804 bool isImm() const override { return Kind == k_Immediate; }
805 bool isMem() const override { return false; }
806
807 bool isUImm6() const {
808 if (!isImm())
809 return false;
810 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
811 if (!MCE)
812 return false;
813 int64_t Val = MCE->getValue();
814 return (Val >= 0 && Val < 64);
815 }
816
817 template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
818
819 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
820 return isImmScaled<Bits, Scale>(true);
821 }
822
823 template <int Bits, int Scale, int Offset = 0, bool IsRange = false>
824 DiagnosticPredicate isUImmScaled() const {
825 if (IsRange && isImmRange() &&
826 (getLastImmVal() != getFirstImmVal() + Offset))
827 return DiagnosticPredicateTy::NoMatch;
828
829 return isImmScaled<Bits, Scale, IsRange>(false);
830 }
831
832 template <int Bits, int Scale, bool IsRange = false>
833 DiagnosticPredicate isImmScaled(bool Signed) const {
834 if ((!isImm() && !isImmRange()) || (isImm() && IsRange) ||
835 (isImmRange() && !IsRange))
836 return DiagnosticPredicateTy::NoMatch;
837
838 int64_t Val;
839 if (isImmRange())
840 Val = getFirstImmVal();
841 else {
842 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
843 if (!MCE)
844 return DiagnosticPredicateTy::NoMatch;
845 Val = MCE->getValue();
846 }
847
848 int64_t MinVal, MaxVal;
849 if (Signed) {
850 int64_t Shift = Bits - 1;
851 MinVal = (int64_t(1) << Shift) * -Scale;
852 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
853 } else {
854 MinVal = 0;
855 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
856 }
857
858 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
859 return DiagnosticPredicateTy::Match;
860
861 return DiagnosticPredicateTy::NearMatch;
862 }
863
864 DiagnosticPredicate isSVEPattern() const {
865 if (!isImm())
866 return DiagnosticPredicateTy::NoMatch;
867 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
868 if (!MCE)
869 return DiagnosticPredicateTy::NoMatch;
870 int64_t Val = MCE->getValue();
871 if (Val >= 0 && Val < 32)
872 return DiagnosticPredicateTy::Match;
873 return DiagnosticPredicateTy::NearMatch;
874 }
875
876 DiagnosticPredicate isSVEVecLenSpecifier() const {
877 if (!isImm())
878 return DiagnosticPredicateTy::NoMatch;
879 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
880 if (!MCE)
881 return DiagnosticPredicateTy::NoMatch;
882 int64_t Val = MCE->getValue();
883 if (Val >= 0 && Val <= 1)
884 return DiagnosticPredicateTy::Match;
885 return DiagnosticPredicateTy::NearMatch;
886 }
887
888 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
890 MCSymbolRefExpr::VariantKind DarwinRefKind;
891 int64_t Addend;
892 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
893 Addend)) {
894 // If we don't understand the expression, assume the best and
895 // let the fixup and relocation code deal with it.
896 return true;
897 }
898
899 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
900 ELFRefKind == AArch64MCExpr::VK_LO12 ||
901 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
902 ELFRefKind == AArch64MCExpr::VK_GOT_AUTH_LO12 ||
903 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
904 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
905 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
906 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
908 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
910 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
911 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
912 ELFRefKind == AArch64MCExpr::VK_GOT_PAGE_LO15) {
913 // Note that we don't range-check the addend. It's adjusted modulo page
914 // size when converted, so there is no "out of range" condition when using
915 // @pageoff.
916 return true;
917 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
918 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
919 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
920 return Addend == 0;
921 }
922
923 return false;
924 }
925
926 template <int Scale> bool isUImm12Offset() const {
927 if (!isImm())
928 return false;
929
930 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
931 if (!MCE)
932 return isSymbolicUImm12Offset(getImm());
933
934 int64_t Val = MCE->getValue();
935 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
936 }
937
938 template <int N, int M>
939 bool isImmInRange() const {
940 if (!isImm())
941 return false;
942 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
943 if (!MCE)
944 return false;
945 int64_t Val = MCE->getValue();
946 return (Val >= N && Val <= M);
947 }
948
949 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
950 // a logical immediate can always be represented when inverted.
951 template <typename T>
952 bool isLogicalImm() const {
953 if (!isImm())
954 return false;
955 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
956 if (!MCE)
957 return false;
958
959 int64_t Val = MCE->getValue();
960 // Avoid left shift by 64 directly.
961 uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4);
962 // Allow all-0 or all-1 in top bits to permit bitwise NOT.
963 if ((Val & Upper) && (Val & Upper) != Upper)
964 return false;
965
966 return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
967 }
968
969 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
970
971 bool isImmRange() const { return Kind == k_ImmRange; }
972
973 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
974 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
975 /// immediate that can be shifted by 'Shift'.
976 template <unsigned Width>
977 std::optional<std::pair<int64_t, unsigned>> getShiftedVal() const {
978 if (isShiftedImm() && Width == getShiftedImmShift())
979 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
980 return std::make_pair(CE->getValue(), Width);
981
982 if (isImm())
983 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
984 int64_t Val = CE->getValue();
985 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
986 return std::make_pair(Val >> Width, Width);
987 else
988 return std::make_pair(Val, 0u);
989 }
990
991 return {};
992 }
993
994 bool isAddSubImm() const {
995 if (!isShiftedImm() && !isImm())
996 return false;
997
998 const MCExpr *Expr;
999
1000 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
1001 if (isShiftedImm()) {
1002 unsigned Shift = ShiftedImm.ShiftAmount;
1003 Expr = ShiftedImm.Val;
1004 if (Shift != 0 && Shift != 12)
1005 return false;
1006 } else {
1007 Expr = getImm();
1008 }
1009
1010 AArch64MCExpr::VariantKind ELFRefKind;
1011 MCSymbolRefExpr::VariantKind DarwinRefKind;
1012 int64_t Addend;
1013 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
1014 DarwinRefKind, Addend)) {
1015 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
1016 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF ||
1017 (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0) ||
1018 ELFRefKind == AArch64MCExpr::VK_LO12 ||
1019 ELFRefKind == AArch64MCExpr::VK_GOT_AUTH_LO12 ||
1020 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
1021 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
1022 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
1023 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
1024 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
1025 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
1026 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
1028 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
1029 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
1030 }
1031
1032 // If it's a constant, it should be a real immediate in range.
1033 if (auto ShiftedVal = getShiftedVal<12>())
1034 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
1035
1036 // If it's an expression, we hope for the best and let the fixup/relocation
1037 // code deal with it.
1038 return true;
1039 }
1040
1041 bool isAddSubImmNeg() const {
1042 if (!isShiftedImm() && !isImm())
1043 return false;
1044
1045 // Otherwise it should be a real negative immediate in range.
1046 if (auto ShiftedVal = getShiftedVal<12>())
1047 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
1048
1049 return false;
1050 }
1051
1052 // Signed value in the range -128 to +127. For element widths of
1053 // 16 bits or higher it may also be a signed multiple of 256 in the
1054 // range -32768 to +32512.
1055 // For element-width of 8 bits a range of -128 to 255 is accepted,
1056 // since a copy of a byte can be either signed/unsigned.
1057 template <typename T>
1058 DiagnosticPredicate isSVECpyImm() const {
1059 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1060 return DiagnosticPredicateTy::NoMatch;
1061
1062 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1063 std::is_same<int8_t, T>::value;
1064 if (auto ShiftedImm = getShiftedVal<8>())
1065 if (!(IsByte && ShiftedImm->second) &&
1066 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
1067 << ShiftedImm->second))
1068 return DiagnosticPredicateTy::Match;
1069
1070 return DiagnosticPredicateTy::NearMatch;
1071 }
1072
1073 // Unsigned value in the range 0 to 255. For element widths of
1074 // 16 bits or higher it may also be a signed multiple of 256 in the
1075 // range 0 to 65280.
1076 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
1077 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1078 return DiagnosticPredicateTy::NoMatch;
1079
1080 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1081 std::is_same<int8_t, T>::value;
1082 if (auto ShiftedImm = getShiftedVal<8>())
1083 if (!(IsByte && ShiftedImm->second) &&
1084 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
1085 << ShiftedImm->second))
1086 return DiagnosticPredicateTy::Match;
1087
1088 return DiagnosticPredicateTy::NearMatch;
1089 }
1090
1091 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
1092 if (isLogicalImm<T>() && !isSVECpyImm<T>())
1093 return DiagnosticPredicateTy::Match;
1094 return DiagnosticPredicateTy::NoMatch;
1095 }
1096
1097 bool isCondCode() const { return Kind == k_CondCode; }
1098
1099 bool isSIMDImmType10() const {
1100 if (!isImm())
1101 return false;
1102 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1103 if (!MCE)
1104 return false;
1106 }
1107
1108 template<int N>
1109 bool isBranchTarget() const {
1110 if (!isImm())
1111 return false;
1112 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1113 if (!MCE)
1114 return true;
1115 int64_t Val = MCE->getValue();
1116 if (Val & 0x3)
1117 return false;
1118 assert(N > 0 && "Branch target immediate cannot be 0 bits!");
1119 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1120 }
1121
1122 bool
1123 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
1124 if (!isImm())
1125 return false;
1126
1127 AArch64MCExpr::VariantKind ELFRefKind;
1128 MCSymbolRefExpr::VariantKind DarwinRefKind;
1129 int64_t Addend;
1130 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
1131 DarwinRefKind, Addend)) {
1132 return false;
1133 }
1134 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
1135 return false;
1136
1137 return llvm::is_contained(AllowedModifiers, ELFRefKind);
1138 }
1139
1140 bool isMovWSymbolG3() const {
1142 }
1143
1144 bool isMovWSymbolG2() const {
1145 return isMovWSymbol(
1150 }
1151
1152 bool isMovWSymbolG1() const {
1153 return isMovWSymbol(
1159 }
1160
1161 bool isMovWSymbolG0() const {
1162 return isMovWSymbol(
1168 }
1169
1170 template<int RegWidth, int Shift>
1171 bool isMOVZMovAlias() const {
1172 if (!isImm()) return false;
1173
1174 const MCExpr *E = getImm();
1175 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1176 uint64_t Value = CE->getValue();
1177
1178 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1179 }
1180 // Only supports the case of Shift being 0 if an expression is used as an
1181 // operand
1182 return !Shift && E;
1183 }
1184
1185 template<int RegWidth, int Shift>
1186 bool isMOVNMovAlias() const {
1187 if (!isImm()) return false;
1188
1189 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1190 if (!CE) return false;
1191 uint64_t Value = CE->getValue();
1192
1193 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1194 }
1195
1196 bool isFPImm() const {
1197 return Kind == k_FPImm &&
1198 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1199 }
1200
1201 bool isBarrier() const {
1202 return Kind == k_Barrier && !getBarriernXSModifier();
1203 }
1204 bool isBarriernXS() const {
1205 return Kind == k_Barrier && getBarriernXSModifier();
1206 }
1207 bool isSysReg() const { return Kind == k_SysReg; }
1208
1209 bool isMRSSystemRegister() const {
1210 if (!isSysReg()) return false;
1211
1212 return SysReg.MRSReg != -1U;
1213 }
1214
1215 bool isMSRSystemRegister() const {
1216 if (!isSysReg()) return false;
1217 return SysReg.MSRReg != -1U;
1218 }
1219
1220 bool isSystemPStateFieldWithImm0_1() const {
1221 if (!isSysReg()) return false;
1222 return AArch64PState::lookupPStateImm0_1ByEncoding(SysReg.PStateField);
1223 }
1224
1225 bool isSystemPStateFieldWithImm0_15() const {
1226 if (!isSysReg())
1227 return false;
1228 return AArch64PState::lookupPStateImm0_15ByEncoding(SysReg.PStateField);
1229 }
1230
1231 bool isSVCR() const {
1232 if (Kind != k_SVCR)
1233 return false;
1234 return SVCR.PStateField != -1U;
1235 }
1236
1237 bool isReg() const override {
1238 return Kind == k_Register;
1239 }
1240
1241 bool isVectorList() const { return Kind == k_VectorList; }
1242
1243 bool isScalarReg() const {
1244 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1245 }
1246
1247 bool isNeonVectorReg() const {
1248 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1249 }
1250
1251 bool isNeonVectorRegLo() const {
1252 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1253 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1254 Reg.RegNum) ||
1255 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1256 Reg.RegNum));
1257 }
1258
1259 bool isNeonVectorReg0to7() const {
1260 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1261 (AArch64MCRegisterClasses[AArch64::FPR128_0to7RegClassID].contains(
1262 Reg.RegNum));
1263 }
1264
1265 bool isMatrix() const { return Kind == k_MatrixRegister; }
1266 bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1267
1268 template <unsigned Class> bool isSVEPredicateAsCounterReg() const {
1269 RegKind RK;
1270 switch (Class) {
1271 case AArch64::PPRRegClassID:
1272 case AArch64::PPR_3bRegClassID:
1273 case AArch64::PPR_p8to15RegClassID:
1274 case AArch64::PNRRegClassID:
1275 case AArch64::PNR_p8to15RegClassID:
1276 case AArch64::PPRorPNRRegClassID:
1277 RK = RegKind::SVEPredicateAsCounter;
1278 break;
1279 default:
1280 llvm_unreachable("Unsupport register class");
1281 }
1282
1283 return (Kind == k_Register && Reg.Kind == RK) &&
1284 AArch64MCRegisterClasses[Class].contains(getReg());
1285 }
1286
1287 template <unsigned Class> bool isSVEVectorReg() const {
1288 RegKind RK;
1289 switch (Class) {
1290 case AArch64::ZPRRegClassID:
1291 case AArch64::ZPR_3bRegClassID:
1292 case AArch64::ZPR_4bRegClassID:
1293 case AArch64::ZPRMul2_LoRegClassID:
1294 case AArch64::ZPRMul2_HiRegClassID:
1295 case AArch64::ZPR_KRegClassID:
1296 RK = RegKind::SVEDataVector;
1297 break;
1298 case AArch64::PPRRegClassID:
1299 case AArch64::PPR_3bRegClassID:
1300 case AArch64::PPR_p8to15RegClassID:
1301 case AArch64::PNRRegClassID:
1302 case AArch64::PNR_p8to15RegClassID:
1303 case AArch64::PPRorPNRRegClassID:
1304 RK = RegKind::SVEPredicateVector;
1305 break;
1306 default:
1307 llvm_unreachable("Unsupport register class");
1308 }
1309
1310 return (Kind == k_Register && Reg.Kind == RK) &&
1311 AArch64MCRegisterClasses[Class].contains(getReg());
1312 }
1313
1314 template <unsigned Class> bool isFPRasZPR() const {
1315 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1316 AArch64MCRegisterClasses[Class].contains(getReg());
1317 }
1318
1319 template <int ElementWidth, unsigned Class>
1320 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1321 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1322 return DiagnosticPredicateTy::NoMatch;
1323
1324 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1325 return DiagnosticPredicateTy::Match;
1326
1327 return DiagnosticPredicateTy::NearMatch;
1328 }
1329
1330 template <int ElementWidth, unsigned Class>
1331 DiagnosticPredicate isSVEPredicateOrPredicateAsCounterRegOfWidth() const {
1332 if (Kind != k_Register || (Reg.Kind != RegKind::SVEPredicateAsCounter &&
1333 Reg.Kind != RegKind::SVEPredicateVector))
1334 return DiagnosticPredicateTy::NoMatch;
1335
1336 if ((isSVEPredicateAsCounterReg<Class>() ||
1337 isSVEPredicateVectorRegOfWidth<ElementWidth, Class>()) &&
1338 Reg.ElementWidth == ElementWidth)
1339 return DiagnosticPredicateTy::Match;
1340
1341 return DiagnosticPredicateTy::NearMatch;
1342 }
1343
1344 template <int ElementWidth, unsigned Class>
1345 DiagnosticPredicate isSVEPredicateAsCounterRegOfWidth() const {
1346 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateAsCounter)
1347 return DiagnosticPredicateTy::NoMatch;
1348
1349 if (isSVEPredicateAsCounterReg<Class>() && (Reg.ElementWidth == ElementWidth))
1350 return DiagnosticPredicateTy::Match;
1351
1352 return DiagnosticPredicateTy::NearMatch;
1353 }
1354
1355 template <int ElementWidth, unsigned Class>
1356 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1357 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1358 return DiagnosticPredicateTy::NoMatch;
1359
1360 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1361 return DiagnosticPredicateTy::Match;
1362
1363 return DiagnosticPredicateTy::NearMatch;
1364 }
1365
1366 template <int ElementWidth, unsigned Class,
1367 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1368 bool ShiftWidthAlwaysSame>
1369 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1370 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1371 if (!VectorMatch.isMatch())
1372 return DiagnosticPredicateTy::NoMatch;
1373
1374 // Give a more specific diagnostic when the user has explicitly typed in
1375 // a shift-amount that does not match what is expected, but for which
1376 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1377 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1378 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1379 ShiftExtendTy == AArch64_AM::SXTW) &&
1380 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1381 return DiagnosticPredicateTy::NoMatch;
1382
1383 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1384 return DiagnosticPredicateTy::Match;
1385
1386 return DiagnosticPredicateTy::NearMatch;
1387 }
1388
1389 bool isGPR32as64() const {
1390 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1391 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1392 }
1393
1394 bool isGPR64as32() const {
1395 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1396 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1397 }
1398
1399 bool isGPR64x8() const {
1400 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1401 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1402 Reg.RegNum);
1403 }
1404
1405 bool isWSeqPair() const {
1406 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1407 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1408 Reg.RegNum);
1409 }
1410
1411 bool isXSeqPair() const {
1412 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1413 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1414 Reg.RegNum);
1415 }
1416
1417 bool isSyspXzrPair() const {
1418 return isGPR64<AArch64::GPR64RegClassID>() && Reg.RegNum == AArch64::XZR;
1419 }
1420
1421 template<int64_t Angle, int64_t Remainder>
1422 DiagnosticPredicate isComplexRotation() const {
1423 if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1424
1425 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1426 if (!CE) return DiagnosticPredicateTy::NoMatch;
1427 uint64_t Value = CE->getValue();
1428
1429 if (Value % Angle == Remainder && Value <= 270)
1430 return DiagnosticPredicateTy::Match;
1431 return DiagnosticPredicateTy::NearMatch;
1432 }
1433
1434 template <unsigned RegClassID> bool isGPR64() const {
1435 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1436 AArch64MCRegisterClasses[RegClassID].contains(getReg());
1437 }
1438
1439 template <unsigned RegClassID, int ExtWidth>
1440 DiagnosticPredicate isGPR64WithShiftExtend() const {
1441 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1442 return DiagnosticPredicateTy::NoMatch;
1443
1444 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1445 getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1446 return DiagnosticPredicateTy::Match;
1447 return DiagnosticPredicateTy::NearMatch;
1448 }
1449
1450 /// Is this a vector list with the type implicit (presumably attached to the
1451 /// instruction itself)?
1452 template <RegKind VectorKind, unsigned NumRegs, bool IsConsecutive = false>
1453 bool isImplicitlyTypedVectorList() const {
1454 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1455 VectorList.NumElements == 0 &&
1456 VectorList.RegisterKind == VectorKind &&
1457 (!IsConsecutive || (VectorList.Stride == 1));
1458 }
1459
1460 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1461 unsigned ElementWidth, unsigned Stride = 1>
1462 bool isTypedVectorList() const {
1463 if (Kind != k_VectorList)
1464 return false;
1465 if (VectorList.Count != NumRegs)
1466 return false;
1467 if (VectorList.RegisterKind != VectorKind)
1468 return false;
1469 if (VectorList.ElementWidth != ElementWidth)
1470 return false;
1471 if (VectorList.Stride != Stride)
1472 return false;
1473 return VectorList.NumElements == NumElements;
1474 }
1475
1476 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1477 unsigned ElementWidth, unsigned RegClass>
1478 DiagnosticPredicate isTypedVectorListMultiple() const {
1479 bool Res =
1480 isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>();
1481 if (!Res)
1482 return DiagnosticPredicateTy::NoMatch;
1483 if (!AArch64MCRegisterClasses[RegClass].contains(VectorList.RegNum))
1484 return DiagnosticPredicateTy::NearMatch;
1485 return DiagnosticPredicateTy::Match;
1486 }
1487
1488 template <RegKind VectorKind, unsigned NumRegs, unsigned Stride,
1489 unsigned ElementWidth>
1490 DiagnosticPredicate isTypedVectorListStrided() const {
1491 bool Res = isTypedVectorList<VectorKind, NumRegs, /*NumElements*/ 0,
1492 ElementWidth, Stride>();
1493 if (!Res)
1494 return DiagnosticPredicateTy::NoMatch;
1495 if ((VectorList.RegNum < (AArch64::Z0 + Stride)) ||
1496 ((VectorList.RegNum >= AArch64::Z16) &&
1497 (VectorList.RegNum < (AArch64::Z16 + Stride))))
1498 return DiagnosticPredicateTy::Match;
1499 return DiagnosticPredicateTy::NoMatch;
1500 }
1501
1502 template <int Min, int Max>
1503 DiagnosticPredicate isVectorIndex() const {
1504 if (Kind != k_VectorIndex)
1505 return DiagnosticPredicateTy::NoMatch;
1506 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1507 return DiagnosticPredicateTy::Match;
1508 return DiagnosticPredicateTy::NearMatch;
1509 }
1510
1511 bool isToken() const override { return Kind == k_Token; }
1512
1513 bool isTokenEqual(StringRef Str) const {
1514 return Kind == k_Token && getToken() == Str;
1515 }
1516 bool isSysCR() const { return Kind == k_SysCR; }
1517 bool isPrefetch() const { return Kind == k_Prefetch; }
1518 bool isPSBHint() const { return Kind == k_PSBHint; }
1519 bool isPHint() const { return Kind == k_PHint; }
1520 bool isBTIHint() const { return Kind == k_BTIHint; }
1521 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1522 bool isShifter() const {
1523 if (!isShiftExtend())
1524 return false;
1525
1526 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1527 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1528 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1529 ST == AArch64_AM::MSL);
1530 }
1531
1532 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1533 if (Kind != k_FPImm)
1534 return DiagnosticPredicateTy::NoMatch;
1535
1536 if (getFPImmIsExact()) {
1537 // Lookup the immediate from table of supported immediates.
1538 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1539 assert(Desc && "Unknown enum value");
1540
1541 // Calculate its FP value.
1542 APFloat RealVal(APFloat::IEEEdouble());
1543 auto StatusOrErr =
1544 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1545 if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1546 llvm_unreachable("FP immediate is not exact");
1547
1548 if (getFPImm().bitwiseIsEqual(RealVal))
1549 return DiagnosticPredicateTy::Match;
1550 }
1551
1552 return DiagnosticPredicateTy::NearMatch;
1553 }
1554
1555 template <unsigned ImmA, unsigned ImmB>
1556 DiagnosticPredicate isExactFPImm() const {
1557 DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1558 if ((Res = isExactFPImm<ImmA>()))
1559 return DiagnosticPredicateTy::Match;
1560 if ((Res = isExactFPImm<ImmB>()))
1561 return DiagnosticPredicateTy::Match;
1562 return Res;
1563 }
1564
1565 bool isExtend() const {
1566 if (!isShiftExtend())
1567 return false;
1568
1569 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1570 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1571 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1572 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1573 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1574 ET == AArch64_AM::LSL) &&
1575 getShiftExtendAmount() <= 4;
1576 }
1577
1578 bool isExtend64() const {
1579 if (!isExtend())
1580 return false;
1581 // Make sure the extend expects a 32-bit source register.
1582 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1583 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1584 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1585 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1586 }
1587
1588 bool isExtendLSL64() const {
1589 if (!isExtend())
1590 return false;
1591 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1592 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1593 ET == AArch64_AM::LSL) &&
1594 getShiftExtendAmount() <= 4;
1595 }
1596
1597 bool isLSLImm3Shift() const {
1598 if (!isShiftExtend())
1599 return false;
1600 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1601 return ET == AArch64_AM::LSL && getShiftExtendAmount() <= 7;
1602 }
1603
1604 template<int Width> bool isMemXExtend() const {
1605 if (!isExtend())
1606 return false;
1607 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1608 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1609 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1610 getShiftExtendAmount() == 0);
1611 }
1612
1613 template<int Width> bool isMemWExtend() const {
1614 if (!isExtend())
1615 return false;
1616 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1617 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1618 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1619 getShiftExtendAmount() == 0);
1620 }
1621
1622 template <unsigned width>
1623 bool isArithmeticShifter() const {
1624 if (!isShifter())
1625 return false;
1626
1627 // An arithmetic shifter is LSL, LSR, or ASR.
1628 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1629 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1630 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1631 }
1632
1633 template <unsigned width>
1634 bool isLogicalShifter() const {
1635 if (!isShifter())
1636 return false;
1637
1638 // A logical shifter is LSL, LSR, ASR or ROR.
1639 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1640 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1641 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1642 getShiftExtendAmount() < width;
1643 }
1644
1645 bool isMovImm32Shifter() const {
1646 if (!isShifter())
1647 return false;
1648
1649 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1650 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1651 if (ST != AArch64_AM::LSL)
1652 return false;
1653 uint64_t Val = getShiftExtendAmount();
1654 return (Val == 0 || Val == 16);
1655 }
1656
1657 bool isMovImm64Shifter() const {
1658 if (!isShifter())
1659 return false;
1660
1661 // A MOVi shifter is LSL of 0 or 16.
1662 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1663 if (ST != AArch64_AM::LSL)
1664 return false;
1665 uint64_t Val = getShiftExtendAmount();
1666 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1667 }
1668
1669 bool isLogicalVecShifter() const {
1670 if (!isShifter())
1671 return false;
1672
1673 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1674 unsigned Shift = getShiftExtendAmount();
1675 return getShiftExtendType() == AArch64_AM::LSL &&
1676 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1677 }
1678
1679 bool isLogicalVecHalfWordShifter() const {
1680 if (!isLogicalVecShifter())
1681 return false;
1682
1683 // A logical vector shifter is a left shift by 0 or 8.
1684 unsigned Shift = getShiftExtendAmount();
1685 return getShiftExtendType() == AArch64_AM::LSL &&
1686 (Shift == 0 || Shift == 8);
1687 }
1688
1689 bool isMoveVecShifter() const {
1690 if (!isShiftExtend())
1691 return false;
1692
1693 // A logical vector shifter is a left shift by 8 or 16.
1694 unsigned Shift = getShiftExtendAmount();
1695 return getShiftExtendType() == AArch64_AM::MSL &&
1696 (Shift == 8 || Shift == 16);
1697 }
1698
1699 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1700 // to LDUR/STUR when the offset is not legal for the former but is for
1701 // the latter. As such, in addition to checking for being a legal unscaled
1702 // address, also check that it is not a legal scaled address. This avoids
1703 // ambiguity in the matcher.
1704 template<int Width>
1705 bool isSImm9OffsetFB() const {
1706 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1707 }
1708
1709 bool isAdrpLabel() const {
1710 // Validation was handled during parsing, so we just verify that
1711 // something didn't go haywire.
1712 if (!isImm())
1713 return false;
1714
1715 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1716 int64_t Val = CE->getValue();
1717 int64_t Min = - (4096 * (1LL << (21 - 1)));
1718 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1719 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1720 }
1721
1722 return true;
1723 }
1724
1725 bool isAdrLabel() const {
1726 // Validation was handled during parsing, so we just verify that
1727 // something didn't go haywire.
1728 if (!isImm())
1729 return false;
1730
1731 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1732 int64_t Val = CE->getValue();
1733 int64_t Min = - (1LL << (21 - 1));
1734 int64_t Max = ((1LL << (21 - 1)) - 1);
1735 return Val >= Min && Val <= Max;
1736 }
1737
1738 return true;
1739 }
1740
1741 template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1742 DiagnosticPredicate isMatrixRegOperand() const {
1743 if (!isMatrix())
1744 return DiagnosticPredicateTy::NoMatch;
1745 if (getMatrixKind() != Kind ||
1746 !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) ||
1747 EltSize != getMatrixElementWidth())
1748 return DiagnosticPredicateTy::NearMatch;
1749 return DiagnosticPredicateTy::Match;
1750 }
1751
1752 bool isPAuthPCRelLabel16Operand() const {
1753 // PAuth PCRel16 operands are similar to regular branch targets, but only
1754 // negative values are allowed for concrete immediates as signing instr
1755 // should be in a lower address.
1756 if (!isImm())
1757 return false;
1758 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1759 if (!MCE)
1760 return true;
1761 int64_t Val = MCE->getValue();
1762 if (Val & 0b11)
1763 return false;
1764 return (Val <= 0) && (Val > -(1 << 18));
1765 }
1766
1767 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1768 // Add as immediates when possible. Null MCExpr = 0.
1769 if (!Expr)
1771 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1772 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1773 else
1775 }
1776
1777 void addRegOperands(MCInst &Inst, unsigned N) const {
1778 assert(N == 1 && "Invalid number of operands!");
1780 }
1781
1782 void addMatrixOperands(MCInst &Inst, unsigned N) const {
1783 assert(N == 1 && "Invalid number of operands!");
1784 Inst.addOperand(MCOperand::createReg(getMatrixReg()));
1785 }
1786
1787 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1788 assert(N == 1 && "Invalid number of operands!");
1789 assert(
1790 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1791
1792 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1793 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1794 RI->getEncodingValue(getReg()));
1795
1797 }
1798
1799 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1800 assert(N == 1 && "Invalid number of operands!");
1801 assert(
1802 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1803
1804 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1805 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1806 RI->getEncodingValue(getReg()));
1807
1809 }
1810
1811 template <int Width>
1812 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1813 unsigned Base;
1814 switch (Width) {
1815 case 8: Base = AArch64::B0; break;
1816 case 16: Base = AArch64::H0; break;
1817 case 32: Base = AArch64::S0; break;
1818 case 64: Base = AArch64::D0; break;
1819 case 128: Base = AArch64::Q0; break;
1820 default:
1821 llvm_unreachable("Unsupported width");
1822 }
1823 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1824 }
1825
1826 void addPPRorPNRRegOperands(MCInst &Inst, unsigned N) const {
1827 assert(N == 1 && "Invalid number of operands!");
1828 unsigned Reg = getReg();
1829 // Normalise to PPR
1830 if (Reg >= AArch64::PN0 && Reg <= AArch64::PN15)
1831 Reg = Reg - AArch64::PN0 + AArch64::P0;
1833 }
1834
1835 void addPNRasPPRRegOperands(MCInst &Inst, unsigned N) const {
1836 assert(N == 1 && "Invalid number of operands!");
1837 Inst.addOperand(
1838 MCOperand::createReg((getReg() - AArch64::PN0) + AArch64::P0));
1839 }
1840
1841 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1842 assert(N == 1 && "Invalid number of operands!");
1843 assert(
1844 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1845 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1846 }
1847
1848 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1849 assert(N == 1 && "Invalid number of operands!");
1850 assert(
1851 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1853 }
1854
1855 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1856 assert(N == 1 && "Invalid number of operands!");
1858 }
1859
1860 void addVectorReg0to7Operands(MCInst &Inst, unsigned N) const {
1861 assert(N == 1 && "Invalid number of operands!");
1863 }
1864
1865 enum VecListIndexType {
1866 VecListIdx_DReg = 0,
1867 VecListIdx_QReg = 1,
1868 VecListIdx_ZReg = 2,
1869 VecListIdx_PReg = 3,
1870 };
1871
1872 template <VecListIndexType RegTy, unsigned NumRegs,
1873 bool IsConsecutive = false>
1874 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1875 assert(N == 1 && "Invalid number of operands!");
1876 assert((!IsConsecutive || (getVectorListStride() == 1)) &&
1877 "Expected consecutive registers");
1878 static const unsigned FirstRegs[][5] = {
1879 /* DReg */ { AArch64::Q0,
1880 AArch64::D0, AArch64::D0_D1,
1881 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1882 /* QReg */ { AArch64::Q0,
1883 AArch64::Q0, AArch64::Q0_Q1,
1884 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1885 /* ZReg */ { AArch64::Z0,
1886 AArch64::Z0, AArch64::Z0_Z1,
1887 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 },
1888 /* PReg */ { AArch64::P0,
1889 AArch64::P0, AArch64::P0_P1 }
1890 };
1891
1892 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1893 " NumRegs must be <= 4 for ZRegs");
1894
1895 assert((RegTy != VecListIdx_PReg || NumRegs <= 2) &&
1896 " NumRegs must be <= 2 for PRegs");
1897
1898 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1899 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1900 FirstRegs[(unsigned)RegTy][0]));
1901 }
1902
1903 template <unsigned NumRegs>
1904 void addStridedVectorListOperands(MCInst &Inst, unsigned N) const {
1905 assert(N == 1 && "Invalid number of operands!");
1906 assert((NumRegs == 2 || NumRegs == 4) && " NumRegs must be 2 or 4");
1907
1908 switch (NumRegs) {
1909 case 2:
1910 if (getVectorListStart() < AArch64::Z16) {
1911 assert((getVectorListStart() < AArch64::Z8) &&
1912 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1914 AArch64::Z0_Z8 + getVectorListStart() - AArch64::Z0));
1915 } else {
1916 assert((getVectorListStart() < AArch64::Z24) &&
1917 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1919 AArch64::Z16_Z24 + getVectorListStart() - AArch64::Z16));
1920 }
1921 break;
1922 case 4:
1923 if (getVectorListStart() < AArch64::Z16) {
1924 assert((getVectorListStart() < AArch64::Z4) &&
1925 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1927 AArch64::Z0_Z4_Z8_Z12 + getVectorListStart() - AArch64::Z0));
1928 } else {
1929 assert((getVectorListStart() < AArch64::Z20) &&
1930 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1932 AArch64::Z16_Z20_Z24_Z28 + getVectorListStart() - AArch64::Z16));
1933 }
1934 break;
1935 default:
1936 llvm_unreachable("Unsupported number of registers for strided vec list");
1937 }
1938 }
1939
1940 void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1941 assert(N == 1 && "Invalid number of operands!");
1942 unsigned RegMask = getMatrixTileListRegMask();
1943 assert(RegMask <= 0xFF && "Invalid mask!");
1944 Inst.addOperand(MCOperand::createImm(RegMask));
1945 }
1946
1947 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1948 assert(N == 1 && "Invalid number of operands!");
1949 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1950 }
1951
1952 template <unsigned ImmIs0, unsigned ImmIs1>
1953 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1954 assert(N == 1 && "Invalid number of operands!");
1955 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1956 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1957 }
1958
1959 void addImmOperands(MCInst &Inst, unsigned N) const {
1960 assert(N == 1 && "Invalid number of operands!");
1961 // If this is a pageoff symrefexpr with an addend, adjust the addend
1962 // to be only the page-offset portion. Otherwise, just add the expr
1963 // as-is.
1964 addExpr(Inst, getImm());
1965 }
1966
1967 template <int Shift>
1968 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1969 assert(N == 2 && "Invalid number of operands!");
1970 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1971 Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1972 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1973 } else if (isShiftedImm()) {
1974 addExpr(Inst, getShiftedImmVal());
1975 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1976 } else {
1977 addExpr(Inst, getImm());
1979 }
1980 }
1981
1982 template <int Shift>
1983 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1984 assert(N == 2 && "Invalid number of operands!");
1985 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1986 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1987 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1988 } else
1989 llvm_unreachable("Not a shifted negative immediate");
1990 }
1991
1992 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1993 assert(N == 1 && "Invalid number of operands!");
1995 }
1996
1997 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1998 assert(N == 1 && "Invalid number of operands!");
1999 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2000 if (!MCE)
2001 addExpr(Inst, getImm());
2002 else
2003 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
2004 }
2005
2006 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2007 addImmOperands(Inst, N);
2008 }
2009
2010 template<int Scale>
2011 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2012 assert(N == 1 && "Invalid number of operands!");
2013 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2014
2015 if (!MCE) {
2016 Inst.addOperand(MCOperand::createExpr(getImm()));
2017 return;
2018 }
2019 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
2020 }
2021
2022 void addUImm6Operands(MCInst &Inst, unsigned N) const {
2023 assert(N == 1 && "Invalid number of operands!");
2024 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2026 }
2027
2028 template <int Scale>
2029 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
2030 assert(N == 1 && "Invalid number of operands!");
2031 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2032 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
2033 }
2034
2035 template <int Scale>
2036 void addImmScaledRangeOperands(MCInst &Inst, unsigned N) const {
2037 assert(N == 1 && "Invalid number of operands!");
2038 Inst.addOperand(MCOperand::createImm(getFirstImmVal() / Scale));
2039 }
2040
2041 template <typename T>
2042 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
2043 assert(N == 1 && "Invalid number of operands!");
2044 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2045 std::make_unsigned_t<T> Val = MCE->getValue();
2046 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
2047 Inst.addOperand(MCOperand::createImm(encoding));
2048 }
2049
2050 template <typename T>
2051 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
2052 assert(N == 1 && "Invalid number of operands!");
2053 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2054 std::make_unsigned_t<T> Val = ~MCE->getValue();
2055 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
2056 Inst.addOperand(MCOperand::createImm(encoding));
2057 }
2058
2059 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
2060 assert(N == 1 && "Invalid number of operands!");
2061 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2063 Inst.addOperand(MCOperand::createImm(encoding));
2064 }
2065
2066 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
2067 // Branch operands don't encode the low bits, so shift them off
2068 // here. If it's a label, however, just put it on directly as there's
2069 // not enough information now to do anything.
2070 assert(N == 1 && "Invalid number of operands!");
2071 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2072 if (!MCE) {
2073 addExpr(Inst, getImm());
2074 return;
2075 }
2076 assert(MCE && "Invalid constant immediate operand!");
2077 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2078 }
2079
2080 void addPAuthPCRelLabel16Operands(MCInst &Inst, unsigned N) const {
2081 // PC-relative operands don't encode the low bits, so shift them off
2082 // here. If it's a label, however, just put it on directly as there's
2083 // not enough information now to do anything.
2084 assert(N == 1 && "Invalid number of operands!");
2085 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2086 if (!MCE) {
2087 addExpr(Inst, getImm());
2088 return;
2089 }
2090 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2091 }
2092
2093 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
2094 // Branch operands don't encode the low bits, so shift them off
2095 // here. If it's a label, however, just put it on directly as there's
2096 // not enough information now to do anything.
2097 assert(N == 1 && "Invalid number of operands!");
2098 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2099 if (!MCE) {
2100 addExpr(Inst, getImm());
2101 return;
2102 }
2103 assert(MCE && "Invalid constant immediate operand!");
2104 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2105 }
2106
2107 void addPCRelLabel9Operands(MCInst &Inst, unsigned N) const {
2108 // Branch operands don't encode the low bits, so shift them off
2109 // here. If it's a label, however, just put it on directly as there's
2110 // not enough information now to do anything.
2111 assert(N == 1 && "Invalid number of operands!");
2112 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2113 if (!MCE) {
2114 addExpr(Inst, getImm());
2115 return;
2116 }
2117 assert(MCE && "Invalid constant immediate operand!");
2118 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2119 }
2120
2121 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
2122 // Branch operands don't encode the low bits, so shift them off
2123 // here. If it's a label, however, just put it on directly as there's
2124 // not enough information now to do anything.
2125 assert(N == 1 && "Invalid number of operands!");
2126 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2127 if (!MCE) {
2128 addExpr(Inst, getImm());
2129 return;
2130 }
2131 assert(MCE && "Invalid constant immediate operand!");
2132 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2133 }
2134
2135 void addFPImmOperands(MCInst &Inst, unsigned N) const {
2136 assert(N == 1 && "Invalid number of operands!");
2138 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
2139 }
2140
2141 void addBarrierOperands(MCInst &Inst, unsigned N) const {
2142 assert(N == 1 && "Invalid number of operands!");
2143 Inst.addOperand(MCOperand::createImm(getBarrier()));
2144 }
2145
2146 void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
2147 assert(N == 1 && "Invalid number of operands!");
2148 Inst.addOperand(MCOperand::createImm(getBarrier()));
2149 }
2150
2151 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2152 assert(N == 1 && "Invalid number of operands!");
2153
2154 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
2155 }
2156
2157 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2158 assert(N == 1 && "Invalid number of operands!");
2159
2160 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
2161 }
2162
2163 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
2164 assert(N == 1 && "Invalid number of operands!");
2165
2166 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2167 }
2168
2169 void addSVCROperands(MCInst &Inst, unsigned N) const {
2170 assert(N == 1 && "Invalid number of operands!");
2171
2172 Inst.addOperand(MCOperand::createImm(SVCR.PStateField));
2173 }
2174
2175 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
2176 assert(N == 1 && "Invalid number of operands!");
2177
2178 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2179 }
2180
2181 void addSysCROperands(MCInst &Inst, unsigned N) const {
2182 assert(N == 1 && "Invalid number of operands!");
2183 Inst.addOperand(MCOperand::createImm(getSysCR()));
2184 }
2185
2186 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
2187 assert(N == 1 && "Invalid number of operands!");
2188 Inst.addOperand(MCOperand::createImm(getPrefetch()));
2189 }
2190
2191 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
2192 assert(N == 1 && "Invalid number of operands!");
2193 Inst.addOperand(MCOperand::createImm(getPSBHint()));
2194 }
2195
2196 void addPHintOperands(MCInst &Inst, unsigned N) const {
2197 assert(N == 1 && "Invalid number of operands!");
2198 Inst.addOperand(MCOperand::createImm(getPHint()));
2199 }
2200
2201 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
2202 assert(N == 1 && "Invalid number of operands!");
2203 Inst.addOperand(MCOperand::createImm(getBTIHint()));
2204 }
2205
2206 void addShifterOperands(MCInst &Inst, unsigned N) const {
2207 assert(N == 1 && "Invalid number of operands!");
2208 unsigned Imm =
2209 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
2211 }
2212
2213 void addLSLImm3ShifterOperands(MCInst &Inst, unsigned N) const {
2214 assert(N == 1 && "Invalid number of operands!");
2215 unsigned Imm = getShiftExtendAmount();
2217 }
2218
2219 void addSyspXzrPairOperand(MCInst &Inst, unsigned N) const {
2220 assert(N == 1 && "Invalid number of operands!");
2221
2222 if (!isScalarReg())
2223 return;
2224
2225 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2226 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID)
2228 if (Reg != AArch64::XZR)
2229 llvm_unreachable("wrong register");
2230
2231 Inst.addOperand(MCOperand::createReg(AArch64::XZR));
2232 }
2233
2234 void addExtendOperands(MCInst &Inst, unsigned N) const {
2235 assert(N == 1 && "Invalid number of operands!");
2236 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2237 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
2238 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2240 }
2241
2242 void addExtend64Operands(MCInst &Inst, unsigned N) const {
2243 assert(N == 1 && "Invalid number of operands!");
2244 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2245 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
2246 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2248 }
2249
2250 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
2251 assert(N == 2 && "Invalid number of operands!");
2252 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2253 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2254 Inst.addOperand(MCOperand::createImm(IsSigned));
2255 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
2256 }
2257
2258 // For 8-bit load/store instructions with a register offset, both the
2259 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
2260 // they're disambiguated by whether the shift was explicit or implicit rather
2261 // than its size.
2262 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
2263 assert(N == 2 && "Invalid number of operands!");
2264 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2265 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2266 Inst.addOperand(MCOperand::createImm(IsSigned));
2267 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
2268 }
2269
2270 template<int Shift>
2271 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
2272 assert(N == 1 && "Invalid number of operands!");
2273
2274 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2275 if (CE) {
2276 uint64_t Value = CE->getValue();
2277 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
2278 } else {
2279 addExpr(Inst, getImm());
2280 }
2281 }
2282
2283 template<int Shift>
2284 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
2285 assert(N == 1 && "Invalid number of operands!");
2286
2287 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2288 uint64_t Value = CE->getValue();
2289 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
2290 }
2291
2292 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
2293 assert(N == 1 && "Invalid number of operands!");
2294 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2295 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
2296 }
2297
2298 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
2299 assert(N == 1 && "Invalid number of operands!");
2300 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2301 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
2302 }
2303
2304 void print(raw_ostream &OS) const override;
2305
2306 static std::unique_ptr<AArch64Operand>
2307 CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
2308 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
2309 Op->Tok.Data = Str.data();
2310 Op->Tok.Length = Str.size();
2311 Op->Tok.IsSuffix = IsSuffix;
2312 Op->StartLoc = S;
2313 Op->EndLoc = S;
2314 return Op;
2315 }
2316
2317 static std::unique_ptr<AArch64Operand>
2318 CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
2319 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
2321 unsigned ShiftAmount = 0,
2322 unsigned HasExplicitAmount = false) {
2323 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
2324 Op->Reg.RegNum = RegNum;
2325 Op->Reg.Kind = Kind;
2326 Op->Reg.ElementWidth = 0;
2327 Op->Reg.EqualityTy = EqTy;
2328 Op->Reg.ShiftExtend.Type = ExtTy;
2329 Op->Reg.ShiftExtend.Amount = ShiftAmount;
2330 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2331 Op->StartLoc = S;
2332 Op->EndLoc = E;
2333 return Op;
2334 }
2335
2336 static std::unique_ptr<AArch64Operand>
2337 CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
2338 SMLoc S, SMLoc E, MCContext &Ctx,
2340 unsigned ShiftAmount = 0,
2341 unsigned HasExplicitAmount = false) {
2342 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2343 Kind == RegKind::SVEPredicateVector ||
2344 Kind == RegKind::SVEPredicateAsCounter) &&
2345 "Invalid vector kind");
2346 auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2347 HasExplicitAmount);
2348 Op->Reg.ElementWidth = ElementWidth;
2349 return Op;
2350 }
2351
2352 static std::unique_ptr<AArch64Operand>
2353 CreateVectorList(unsigned RegNum, unsigned Count, unsigned Stride,
2354 unsigned NumElements, unsigned ElementWidth,
2355 RegKind RegisterKind, SMLoc S, SMLoc E, MCContext &Ctx) {
2356 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2357 Op->VectorList.RegNum = RegNum;
2358 Op->VectorList.Count = Count;
2359 Op->VectorList.Stride = Stride;
2360 Op->VectorList.NumElements = NumElements;
2361 Op->VectorList.ElementWidth = ElementWidth;
2362 Op->VectorList.RegisterKind = RegisterKind;
2363 Op->StartLoc = S;
2364 Op->EndLoc = E;
2365 return Op;
2366 }
2367
2368 static std::unique_ptr<AArch64Operand>
2369 CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2370 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2371 Op->VectorIndex.Val = Idx;
2372 Op->StartLoc = S;
2373 Op->EndLoc = E;
2374 return Op;
2375 }
2376
2377 static std::unique_ptr<AArch64Operand>
2378 CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2379 auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2380 Op->MatrixTileList.RegMask = RegMask;
2381 Op->StartLoc = S;
2382 Op->EndLoc = E;
2383 return Op;
2384 }
2385
2386 static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2387 const unsigned ElementWidth) {
2388 static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2389 RegMap = {
2390 {{0, AArch64::ZAB0},
2391 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2392 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2393 {{8, AArch64::ZAB0},
2394 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2395 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2396 {{16, AArch64::ZAH0},
2397 {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2398 {{16, AArch64::ZAH1},
2399 {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2400 {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2401 {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2402 {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2403 {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2404 };
2405
2406 if (ElementWidth == 64)
2407 OutRegs.insert(Reg);
2408 else {
2409 std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)];
2410 assert(!Regs.empty() && "Invalid tile or element width!");
2411 for (auto OutReg : Regs)
2412 OutRegs.insert(OutReg);
2413 }
2414 }
2415
2416 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2417 SMLoc E, MCContext &Ctx) {
2418 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2419 Op->Imm.Val = Val;
2420 Op->StartLoc = S;
2421 Op->EndLoc = E;
2422 return Op;
2423 }
2424
2425 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2426 unsigned ShiftAmount,
2427 SMLoc S, SMLoc E,
2428 MCContext &Ctx) {
2429 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2430 Op->ShiftedImm .Val = Val;
2431 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2432 Op->StartLoc = S;
2433 Op->EndLoc = E;
2434 return Op;
2435 }
2436
2437 static std::unique_ptr<AArch64Operand> CreateImmRange(unsigned First,
2438 unsigned Last, SMLoc S,
2439 SMLoc E,
2440 MCContext &Ctx) {
2441 auto Op = std::make_unique<AArch64Operand>(k_ImmRange, Ctx);
2442 Op->ImmRange.First = First;
2443 Op->ImmRange.Last = Last;
2444 Op->EndLoc = E;
2445 return Op;
2446 }
2447
2448 static std::unique_ptr<AArch64Operand>
2449 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2450 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2451 Op->CondCode.Code = Code;
2452 Op->StartLoc = S;
2453 Op->EndLoc = E;
2454 return Op;
2455 }
2456
2457 static std::unique_ptr<AArch64Operand>
2458 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2459 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2460 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2461 Op->FPImm.IsExact = IsExact;
2462 Op->StartLoc = S;
2463 Op->EndLoc = S;
2464 return Op;
2465 }
2466
2467 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2468 StringRef Str,
2469 SMLoc S,
2470 MCContext &Ctx,
2471 bool HasnXSModifier) {
2472 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2473 Op->Barrier.Val = Val;
2474 Op->Barrier.Data = Str.data();
2475 Op->Barrier.Length = Str.size();
2476 Op->Barrier.HasnXSModifier = HasnXSModifier;
2477 Op->StartLoc = S;
2478 Op->EndLoc = S;
2479 return Op;
2480 }
2481
2482 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2483 uint32_t MRSReg,
2484 uint32_t MSRReg,
2485 uint32_t PStateField,
2486 MCContext &Ctx) {
2487 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2488 Op->SysReg.Data = Str.data();
2489 Op->SysReg.Length = Str.size();
2490 Op->SysReg.MRSReg = MRSReg;
2491 Op->SysReg.MSRReg = MSRReg;
2492 Op->SysReg.PStateField = PStateField;
2493 Op->StartLoc = S;
2494 Op->EndLoc = S;
2495 return Op;
2496 }
2497
2498 static std::unique_ptr<AArch64Operand>
2499 CreatePHintInst(unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2500 auto Op = std::make_unique<AArch64Operand>(k_PHint, Ctx);
2501 Op->PHint.Val = Val;
2502 Op->PHint.Data = Str.data();
2503 Op->PHint.Length = Str.size();
2504 Op->StartLoc = S;
2505 Op->EndLoc = S;
2506 return Op;
2507 }
2508
2509 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2510 SMLoc E, MCContext &Ctx) {
2511 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2512 Op->SysCRImm.Val = Val;
2513 Op->StartLoc = S;
2514 Op->EndLoc = E;
2515 return Op;
2516 }
2517
2518 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2519 StringRef Str,
2520 SMLoc S,
2521 MCContext &Ctx) {
2522 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2523 Op->Prefetch.Val = Val;
2524 Op->Barrier.Data = Str.data();
2525 Op->Barrier.Length = Str.size();
2526 Op->StartLoc = S;
2527 Op->EndLoc = S;
2528 return Op;
2529 }
2530
2531 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2532 StringRef Str,
2533 SMLoc S,
2534 MCContext &Ctx) {
2535 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2536 Op->PSBHint.Val = Val;
2537 Op->PSBHint.Data = Str.data();
2538 Op->PSBHint.Length = Str.size();
2539 Op->StartLoc = S;
2540 Op->EndLoc = S;
2541 return Op;
2542 }
2543
2544 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2545 StringRef Str,
2546 SMLoc S,
2547 MCContext &Ctx) {
2548 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2549 Op->BTIHint.Val = Val | 32;
2550 Op->BTIHint.Data = Str.data();
2551 Op->BTIHint.Length = Str.size();
2552 Op->StartLoc = S;
2553 Op->EndLoc = S;
2554 return Op;
2555 }
2556
2557 static std::unique_ptr<AArch64Operand>
2558 CreateMatrixRegister(unsigned RegNum, unsigned ElementWidth, MatrixKind Kind,
2559 SMLoc S, SMLoc E, MCContext &Ctx) {
2560 auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2561 Op->MatrixReg.RegNum = RegNum;
2562 Op->MatrixReg.ElementWidth = ElementWidth;
2563 Op->MatrixReg.Kind = Kind;
2564 Op->StartLoc = S;
2565 Op->EndLoc = E;
2566 return Op;
2567 }
2568
2569 static std::unique_ptr<AArch64Operand>
2570 CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2571 auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2572 Op->SVCR.PStateField = PStateField;
2573 Op->SVCR.Data = Str.data();
2574 Op->SVCR.Length = Str.size();
2575 Op->StartLoc = S;
2576 Op->EndLoc = S;
2577 return Op;
2578 }
2579
2580 static std::unique_ptr<AArch64Operand>
2581 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2582 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2583 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2584 Op->ShiftExtend.Type = ShOp;
2585 Op->ShiftExtend.Amount = Val;
2586 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2587 Op->StartLoc = S;
2588 Op->EndLoc = E;
2589 return Op;
2590 }
2591};
2592
2593} // end anonymous namespace.
2594
2595void AArch64Operand::print(raw_ostream &OS) const {
2596 switch (Kind) {
2597 case k_FPImm:
2598 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2599 if (!getFPImmIsExact())
2600 OS << " (inexact)";
2601 OS << ">";
2602 break;
2603 case k_Barrier: {
2604 StringRef Name = getBarrierName();
2605 if (!Name.empty())
2606 OS << "<barrier " << Name << ">";
2607 else
2608 OS << "<barrier invalid #" << getBarrier() << ">";
2609 break;
2610 }
2611 case k_Immediate:
2612 OS << *getImm();
2613 break;
2614 case k_ShiftedImm: {
2615 unsigned Shift = getShiftedImmShift();
2616 OS << "<shiftedimm ";
2617 OS << *getShiftedImmVal();
2618 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2619 break;
2620 }
2621 case k_ImmRange: {
2622 OS << "<immrange ";
2623 OS << getFirstImmVal();
2624 OS << ":" << getLastImmVal() << ">";
2625 break;
2626 }
2627 case k_CondCode:
2628 OS << "<condcode " << getCondCode() << ">";
2629 break;
2630 case k_VectorList: {
2631 OS << "<vectorlist ";
2632 unsigned Reg = getVectorListStart();
2633 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2634 OS << Reg + i * getVectorListStride() << " ";
2635 OS << ">";
2636 break;
2637 }
2638 case k_VectorIndex:
2639 OS << "<vectorindex " << getVectorIndex() << ">";
2640 break;
2641 case k_SysReg:
2642 OS << "<sysreg: " << getSysReg() << '>';
2643 break;
2644 case k_Token:
2645 OS << "'" << getToken() << "'";
2646 break;
2647 case k_SysCR:
2648 OS << "c" << getSysCR();
2649 break;
2650 case k_Prefetch: {
2651 StringRef Name = getPrefetchName();
2652 if (!Name.empty())
2653 OS << "<prfop " << Name << ">";
2654 else
2655 OS << "<prfop invalid #" << getPrefetch() << ">";
2656 break;
2657 }
2658 case k_PSBHint:
2659 OS << getPSBHintName();
2660 break;
2661 case k_PHint:
2662 OS << getPHintName();
2663 break;
2664 case k_BTIHint:
2665 OS << getBTIHintName();
2666 break;
2667 case k_MatrixRegister:
2668 OS << "<matrix " << getMatrixReg() << ">";
2669 break;
2670 case k_MatrixTileList: {
2671 OS << "<matrixlist ";
2672 unsigned RegMask = getMatrixTileListRegMask();
2673 unsigned MaxBits = 8;
2674 for (unsigned I = MaxBits; I > 0; --I)
2675 OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2676 OS << '>';
2677 break;
2678 }
2679 case k_SVCR: {
2680 OS << getSVCR();
2681 break;
2682 }
2683 case k_Register:
2684 OS << "<register " << getReg() << ">";
2685 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2686 break;
2687 [[fallthrough]];
2688 case k_ShiftExtend:
2689 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2690 << getShiftExtendAmount();
2691 if (!hasShiftExtendAmount())
2692 OS << "<imp>";
2693 OS << '>';
2694 break;
2695 }
2696}
2697
2698/// @name Auto-generated Match Functions
2699/// {
2700
2702
2703/// }
2704
2706 return StringSwitch<unsigned>(Name.lower())
2707 .Case("v0", AArch64::Q0)
2708 .Case("v1", AArch64::Q1)
2709 .Case("v2", AArch64::Q2)
2710 .Case("v3", AArch64::Q3)
2711 .Case("v4", AArch64::Q4)
2712 .Case("v5", AArch64::Q5)
2713 .Case("v6", AArch64::Q6)
2714 .Case("v7", AArch64::Q7)
2715 .Case("v8", AArch64::Q8)
2716 .Case("v9", AArch64::Q9)
2717 .Case("v10", AArch64::Q10)
2718 .Case("v11", AArch64::Q11)
2719 .Case("v12", AArch64::Q12)
2720 .Case("v13", AArch64::Q13)
2721 .Case("v14", AArch64::Q14)
2722 .Case("v15", AArch64::Q15)
2723 .Case("v16", AArch64::Q16)
2724 .Case("v17", AArch64::Q17)
2725 .Case("v18", AArch64::Q18)
2726 .Case("v19", AArch64::Q19)
2727 .Case("v20", AArch64::Q20)
2728 .Case("v21", AArch64::Q21)
2729 .Case("v22", AArch64::Q22)
2730 .Case("v23", AArch64::Q23)
2731 .Case("v24", AArch64::Q24)
2732 .Case("v25", AArch64::Q25)
2733 .Case("v26", AArch64::Q26)
2734 .Case("v27", AArch64::Q27)
2735 .Case("v28", AArch64::Q28)
2736 .Case("v29", AArch64::Q29)
2737 .Case("v30", AArch64::Q30)
2738 .Case("v31", AArch64::Q31)
2739 .Default(0);
2740}
2741
2742/// Returns an optional pair of (#elements, element-width) if Suffix
2743/// is a valid vector kind. Where the number of elements in a vector
2744/// or the vector width is implicit or explicitly unknown (but still a
2745/// valid suffix kind), 0 is used.
2746static std::optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2747 RegKind VectorKind) {
2748 std::pair<int, int> Res = {-1, -1};
2749
2750 switch (VectorKind) {
2751 case RegKind::NeonVector:
2753 .Case("", {0, 0})
2754 .Case(".1d", {1, 64})
2755 .Case(".1q", {1, 128})
2756 // '.2h' needed for fp16 scalar pairwise reductions
2757 .Case(".2h", {2, 16})
2758 .Case(".2b", {2, 8})
2759 .Case(".2s", {2, 32})
2760 .Case(".2d", {2, 64})
2761 // '.4b' is another special case for the ARMv8.2a dot product
2762 // operand
2763 .Case(".4b", {4, 8})
2764 .Case(".4h", {4, 16})
2765 .Case(".4s", {4, 32})
2766 .Case(".8b", {8, 8})
2767 .Case(".8h", {8, 16})
2768 .Case(".16b", {16, 8})
2769 // Accept the width neutral ones, too, for verbose syntax. If
2770 // those aren't used in the right places, the token operand won't
2771 // match so all will work out.
2772 .Case(".b", {0, 8})
2773 .Case(".h", {0, 16})
2774 .Case(".s", {0, 32})
2775 .Case(".d", {0, 64})
2776 .Default({-1, -1});
2777 break;
2778 case RegKind::SVEPredicateAsCounter:
2779 case RegKind::SVEPredicateVector:
2780 case RegKind::SVEDataVector:
2781 case RegKind::Matrix:
2783 .Case("", {0, 0})
2784 .Case(".b", {0, 8})
2785 .Case(".h", {0, 16})
2786 .Case(".s", {0, 32})
2787 .Case(".d", {0, 64})
2788 .Case(".q", {0, 128})
2789 .Default({-1, -1});
2790 break;
2791 default:
2792 llvm_unreachable("Unsupported RegKind");
2793 }
2794
2795 if (Res == std::make_pair(-1, -1))
2796 return std::nullopt;
2797
2798 return std::optional<std::pair<int, int>>(Res);
2799}
2800
2801static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2802 return parseVectorKind(Suffix, VectorKind).has_value();
2803}
2804
2806 return StringSwitch<unsigned>(Name.lower())
2807 .Case("z0", AArch64::Z0)
2808 .Case("z1", AArch64::Z1)
2809 .Case("z2", AArch64::Z2)
2810 .Case("z3", AArch64::Z3)
2811 .Case("z4", AArch64::Z4)
2812 .Case("z5", AArch64::Z5)
2813 .Case("z6", AArch64::Z6)
2814 .Case("z7", AArch64::Z7)
2815 .Case("z8", AArch64::Z8)
2816 .Case("z9", AArch64::Z9)
2817 .Case("z10", AArch64::Z10)
2818 .Case("z11", AArch64::Z11)
2819 .Case("z12", AArch64::Z12)
2820 .Case("z13", AArch64::Z13)
2821 .Case("z14", AArch64::Z14)
2822 .Case("z15", AArch64::Z15)
2823 .Case("z16", AArch64::Z16)
2824 .Case("z17", AArch64::Z17)
2825 .Case("z18", AArch64::Z18)
2826 .Case("z19", AArch64::Z19)
2827 .Case("z20", AArch64::Z20)
2828 .Case("z21", AArch64::Z21)
2829 .Case("z22", AArch64::Z22)
2830 .Case("z23", AArch64::Z23)
2831 .Case("z24", AArch64::Z24)
2832 .Case("z25", AArch64::Z25)
2833 .Case("z26", AArch64::Z26)
2834 .Case("z27", AArch64::Z27)
2835 .Case("z28", AArch64::Z28)
2836 .Case("z29", AArch64::Z29)
2837 .Case("z30", AArch64::Z30)
2838 .Case("z31", AArch64::Z31)
2839 .Default(0);
2840}
2841
2843 return StringSwitch<unsigned>(Name.lower())
2844 .Case("p0", AArch64::P0)
2845 .Case("p1", AArch64::P1)
2846 .Case("p2", AArch64::P2)
2847 .Case("p3", AArch64::P3)
2848 .Case("p4", AArch64::P4)
2849 .Case("p5", AArch64::P5)
2850 .Case("p6", AArch64::P6)
2851 .Case("p7", AArch64::P7)
2852 .Case("p8", AArch64::P8)
2853 .Case("p9", AArch64::P9)
2854 .Case("p10", AArch64::P10)
2855 .Case("p11", AArch64::P11)
2856 .Case("p12", AArch64::P12)
2857 .Case("p13", AArch64::P13)
2858 .Case("p14", AArch64::P14)
2859 .Case("p15", AArch64::P15)
2860 .Default(0);
2861}
2862
2864 return StringSwitch<unsigned>(Name.lower())
2865 .Case("pn0", AArch64::PN0)
2866 .Case("pn1", AArch64::PN1)
2867 .Case("pn2", AArch64::PN2)
2868 .Case("pn3", AArch64::PN3)
2869 .Case("pn4", AArch64::PN4)
2870 .Case("pn5", AArch64::PN5)
2871 .Case("pn6", AArch64::PN6)
2872 .Case("pn7", AArch64::PN7)
2873 .Case("pn8", AArch64::PN8)
2874 .Case("pn9", AArch64::PN9)
2875 .Case("pn10", AArch64::PN10)
2876 .Case("pn11", AArch64::PN11)
2877 .Case("pn12", AArch64::PN12)
2878 .Case("pn13", AArch64::PN13)
2879 .Case("pn14", AArch64::PN14)
2880 .Case("pn15", AArch64::PN15)
2881 .Default(0);
2882}
2883
2885 return StringSwitch<unsigned>(Name.lower())
2886 .Case("za0.d", AArch64::ZAD0)
2887 .Case("za1.d", AArch64::ZAD1)
2888 .Case("za2.d", AArch64::ZAD2)
2889 .Case("za3.d", AArch64::ZAD3)
2890 .Case("za4.d", AArch64::ZAD4)
2891 .Case("za5.d", AArch64::ZAD5)
2892 .Case("za6.d", AArch64::ZAD6)
2893 .Case("za7.d", AArch64::ZAD7)
2894 .Case("za0.s", AArch64::ZAS0)
2895 .Case("za1.s", AArch64::ZAS1)
2896 .Case("za2.s", AArch64::ZAS2)
2897 .Case("za3.s", AArch64::ZAS3)
2898 .Case("za0.h", AArch64::ZAH0)
2899 .Case("za1.h", AArch64::ZAH1)
2900 .Case("za0.b", AArch64::ZAB0)
2901 .Default(0);
2902}
2903
2905 return StringSwitch<unsigned>(Name.lower())
2906 .Case("za", AArch64::ZA)
2907 .Case("za0.q", AArch64::ZAQ0)
2908 .Case("za1.q", AArch64::ZAQ1)
2909 .Case("za2.q", AArch64::ZAQ2)
2910 .Case("za3.q", AArch64::ZAQ3)
2911 .Case("za4.q", AArch64::ZAQ4)
2912 .Case("za5.q", AArch64::ZAQ5)
2913 .Case("za6.q", AArch64::ZAQ6)
2914 .Case("za7.q", AArch64::ZAQ7)
2915 .Case("za8.q", AArch64::ZAQ8)
2916 .Case("za9.q", AArch64::ZAQ9)
2917 .Case("za10.q", AArch64::ZAQ10)
2918 .Case("za11.q", AArch64::ZAQ11)
2919 .Case("za12.q", AArch64::ZAQ12)
2920 .Case("za13.q", AArch64::ZAQ13)
2921 .Case("za14.q", AArch64::ZAQ14)
2922 .Case("za15.q", AArch64::ZAQ15)
2923 .Case("za0.d", AArch64::ZAD0)
2924 .Case("za1.d", AArch64::ZAD1)
2925 .Case("za2.d", AArch64::ZAD2)
2926 .Case("za3.d", AArch64::ZAD3)
2927 .Case("za4.d", AArch64::ZAD4)
2928 .Case("za5.d", AArch64::ZAD5)
2929 .Case("za6.d", AArch64::ZAD6)
2930 .Case("za7.d", AArch64::ZAD7)
2931 .Case("za0.s", AArch64::ZAS0)
2932 .Case("za1.s", AArch64::ZAS1)
2933 .Case("za2.s", AArch64::ZAS2)
2934 .Case("za3.s", AArch64::ZAS3)
2935 .Case("za0.h", AArch64::ZAH0)
2936 .Case("za1.h", AArch64::ZAH1)
2937 .Case("za0.b", AArch64::ZAB0)
2938 .Case("za0h.q", AArch64::ZAQ0)
2939 .Case("za1h.q", AArch64::ZAQ1)
2940 .Case("za2h.q", AArch64::ZAQ2)
2941 .Case("za3h.q", AArch64::ZAQ3)
2942 .Case("za4h.q", AArch64::ZAQ4)
2943 .Case("za5h.q", AArch64::ZAQ5)
2944 .Case("za6h.q", AArch64::ZAQ6)
2945 .Case("za7h.q", AArch64::ZAQ7)
2946 .Case("za8h.q", AArch64::ZAQ8)
2947 .Case("za9h.q", AArch64::ZAQ9)
2948 .Case("za10h.q", AArch64::ZAQ10)
2949 .Case("za11h.q", AArch64::ZAQ11)
2950 .Case("za12h.q", AArch64::ZAQ12)
2951 .Case("za13h.q", AArch64::ZAQ13)
2952 .Case("za14h.q", AArch64::ZAQ14)
2953 .Case("za15h.q", AArch64::ZAQ15)
2954 .Case("za0h.d", AArch64::ZAD0)
2955 .Case("za1h.d", AArch64::ZAD1)
2956 .Case("za2h.d", AArch64::ZAD2)
2957 .Case("za3h.d", AArch64::ZAD3)
2958 .Case("za4h.d", AArch64::ZAD4)
2959 .Case("za5h.d", AArch64::ZAD5)
2960 .Case("za6h.d", AArch64::ZAD6)
2961 .Case("za7h.d", AArch64::ZAD7)
2962 .Case("za0h.s", AArch64::ZAS0)
2963 .Case("za1h.s", AArch64::ZAS1)
2964 .Case("za2h.s", AArch64::ZAS2)
2965 .Case("za3h.s", AArch64::ZAS3)
2966 .Case("za0h.h", AArch64::ZAH0)
2967 .Case("za1h.h", AArch64::ZAH1)
2968 .Case("za0h.b", AArch64::ZAB0)
2969 .Case("za0v.q", AArch64::ZAQ0)
2970 .Case("za1v.q", AArch64::ZAQ1)
2971 .Case("za2v.q", AArch64::ZAQ2)
2972 .Case("za3v.q", AArch64::ZAQ3)
2973 .Case("za4v.q", AArch64::ZAQ4)
2974 .Case("za5v.q", AArch64::ZAQ5)
2975 .Case("za6v.q", AArch64::ZAQ6)
2976 .Case("za7v.q", AArch64::ZAQ7)
2977 .Case("za8v.q", AArch64::ZAQ8)
2978 .Case("za9v.q", AArch64::ZAQ9)
2979 .Case("za10v.q", AArch64::ZAQ10)
2980 .Case("za11v.q", AArch64::ZAQ11)
2981 .Case("za12v.q", AArch64::ZAQ12)
2982 .Case("za13v.q", AArch64::ZAQ13)
2983 .Case("za14v.q", AArch64::ZAQ14)
2984 .Case("za15v.q", AArch64::ZAQ15)
2985 .Case("za0v.d", AArch64::ZAD0)
2986 .Case("za1v.d", AArch64::ZAD1)
2987 .Case("za2v.d", AArch64::ZAD2)
2988 .Case("za3v.d", AArch64::ZAD3)
2989 .Case("za4v.d", AArch64::ZAD4)
2990 .Case("za5v.d", AArch64::ZAD5)
2991 .Case("za6v.d", AArch64::ZAD6)
2992 .Case("za7v.d", AArch64::ZAD7)
2993 .Case("za0v.s", AArch64::ZAS0)
2994 .Case("za1v.s", AArch64::ZAS1)
2995 .Case("za2v.s", AArch64::ZAS2)
2996 .Case("za3v.s", AArch64::ZAS3)
2997 .Case("za0v.h", AArch64::ZAH0)
2998 .Case("za1v.h", AArch64::ZAH1)
2999 .Case("za0v.b", AArch64::ZAB0)
3000 .Default(0);
3001}
3002
3003bool AArch64AsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc,
3004 SMLoc &EndLoc) {
3005 return !tryParseRegister(Reg, StartLoc, EndLoc).isSuccess();
3006}
3007
3008ParseStatus AArch64AsmParser::tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
3009 SMLoc &EndLoc) {
3010 StartLoc = getLoc();
3011 ParseStatus Res = tryParseScalarRegister(Reg);
3012 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3013 return Res;
3014}
3015
3016// Matches a register name or register alias previously defined by '.req'
3017unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
3018 RegKind Kind) {
3019 unsigned RegNum = 0;
3020 if ((RegNum = matchSVEDataVectorRegName(Name)))
3021 return Kind == RegKind::SVEDataVector ? RegNum : 0;
3022
3023 if ((RegNum = matchSVEPredicateVectorRegName(Name)))
3024 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
3025
3027 return Kind == RegKind::SVEPredicateAsCounter ? RegNum : 0;
3028
3029 if ((RegNum = MatchNeonVectorRegName(Name)))
3030 return Kind == RegKind::NeonVector ? RegNum : 0;
3031
3032 if ((RegNum = matchMatrixRegName(Name)))
3033 return Kind == RegKind::Matrix ? RegNum : 0;
3034
3035 if (Name.equals_insensitive("zt0"))
3036 return Kind == RegKind::LookupTable ? unsigned(AArch64::ZT0) : 0;
3037
3038 // The parsed register must be of RegKind Scalar
3039 if ((RegNum = MatchRegisterName(Name)))
3040 return (Kind == RegKind::Scalar) ? RegNum : 0;
3041
3042 if (!RegNum) {
3043 // Handle a few common aliases of registers.
3044 if (auto RegNum = StringSwitch<unsigned>(Name.lower())
3045 .Case("fp", AArch64::FP)
3046 .Case("lr", AArch64::LR)
3047 .Case("x31", AArch64::XZR)
3048 .Case("w31", AArch64::WZR)
3049 .Default(0))
3050 return Kind == RegKind::Scalar ? RegNum : 0;
3051
3052 // Check for aliases registered via .req. Canonicalize to lower case.
3053 // That's more consistent since register names are case insensitive, and
3054 // it's how the original entry was passed in from MC/MCParser/AsmParser.
3055 auto Entry = RegisterReqs.find(Name.lower());
3056 if (Entry == RegisterReqs.end())
3057 return 0;
3058
3059 // set RegNum if the match is the right kind of register
3060 if (Kind == Entry->getValue().first)
3061 RegNum = Entry->getValue().second;
3062 }
3063 return RegNum;
3064}
3065
3066unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) {
3067 switch (K) {
3068 case RegKind::Scalar:
3069 case RegKind::NeonVector:
3070 case RegKind::SVEDataVector:
3071 return 32;
3072 case RegKind::Matrix:
3073 case RegKind::SVEPredicateVector:
3074 case RegKind::SVEPredicateAsCounter:
3075 return 16;
3076 case RegKind::LookupTable:
3077 return 1;
3078 }
3079 llvm_unreachable("Unsupported RegKind");
3080}
3081
3082/// tryParseScalarRegister - Try to parse a register name. The token must be an
3083/// Identifier when called, and if it is a register name the token is eaten and
3084/// the register is added to the operand list.
3085ParseStatus AArch64AsmParser::tryParseScalarRegister(MCRegister &RegNum) {
3086 const AsmToken &Tok = getTok();
3087 if (Tok.isNot(AsmToken::Identifier))
3088 return ParseStatus::NoMatch;
3089
3090 std::string lowerCase = Tok.getString().lower();
3091 unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
3092 if (Reg == 0)
3093 return ParseStatus::NoMatch;
3094
3095 RegNum = Reg;
3096 Lex(); // Eat identifier token.
3097 return ParseStatus::Success;
3098}
3099
3100/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
3101ParseStatus AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
3102 SMLoc S = getLoc();
3103
3104 if (getTok().isNot(AsmToken::Identifier))
3105 return Error(S, "Expected cN operand where 0 <= N <= 15");
3106
3107 StringRef Tok = getTok().getIdentifier();
3108 if (Tok[0] != 'c' && Tok[0] != 'C')
3109 return Error(S, "Expected cN operand where 0 <= N <= 15");
3110
3111 uint32_t CRNum;
3112 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
3113 if (BadNum || CRNum > 15)
3114 return Error(S, "Expected cN operand where 0 <= N <= 15");
3115
3116 Lex(); // Eat identifier token.
3117 Operands.push_back(
3118 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
3119 return ParseStatus::Success;
3120}
3121
3122// Either an identifier for named values or a 6-bit immediate.
3123ParseStatus AArch64AsmParser::tryParseRPRFMOperand(OperandVector &Operands) {
3124 SMLoc S = getLoc();
3125 const AsmToken &Tok = getTok();
3126
3127 unsigned MaxVal = 63;
3128
3129 // Immediate case, with optional leading hash:
3130 if (parseOptionalToken(AsmToken::Hash) ||
3131 Tok.is(AsmToken::Integer)) {
3132 const MCExpr *ImmVal;
3133 if (getParser().parseExpression(ImmVal))
3134 return ParseStatus::Failure;
3135
3136 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3137 if (!MCE)
3138 return TokError("immediate value expected for prefetch operand");
3139 unsigned prfop = MCE->getValue();
3140 if (prfop > MaxVal)
3141 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3142 "] expected");
3143
3144 auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(MCE->getValue());
3145 Operands.push_back(AArch64Operand::CreatePrefetch(
3146 prfop, RPRFM ? RPRFM->Name : "", S, getContext()));
3147 return ParseStatus::Success;
3148 }
3149
3150 if (Tok.isNot(AsmToken::Identifier))
3151 return TokError("prefetch hint expected");
3152
3153 auto RPRFM = AArch64RPRFM::lookupRPRFMByName(Tok.getString());
3154 if (!RPRFM)
3155 return TokError("prefetch hint expected");
3156
3157 Operands.push_back(AArch64Operand::CreatePrefetch(
3158 RPRFM->Encoding, Tok.getString(), S, getContext()));
3159 Lex(); // Eat identifier token.
3160 return ParseStatus::Success;
3161}
3162
3163/// tryParsePrefetch - Try to parse a prefetch operand.
3164template <bool IsSVEPrefetch>
3165ParseStatus AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
3166 SMLoc S = getLoc();
3167 const AsmToken &Tok = getTok();
3168
3169 auto LookupByName = [](StringRef N) {
3170 if (IsSVEPrefetch) {
3171 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
3172 return std::optional<unsigned>(Res->Encoding);
3173 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
3174 return std::optional<unsigned>(Res->Encoding);
3175 return std::optional<unsigned>();
3176 };
3177
3178 auto LookupByEncoding = [](unsigned E) {
3179 if (IsSVEPrefetch) {
3180 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
3181 return std::optional<StringRef>(Res->Name);
3182 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
3183 return std::optional<StringRef>(Res->Name);
3184 return std::optional<StringRef>();
3185 };
3186 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
3187
3188 // Either an identifier for named values or a 5-bit immediate.
3189 // Eat optional hash.
3190 if (parseOptionalToken(AsmToken::Hash) ||
3191 Tok.is(AsmToken::Integer)) {
3192 const MCExpr *ImmVal;
3193 if (getParser().parseExpression(ImmVal))
3194 return ParseStatus::Failure;
3195
3196 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3197 if (!MCE)
3198 return TokError("immediate value expected for prefetch operand");
3199 unsigned prfop = MCE->getValue();
3200 if (prfop > MaxVal)
3201 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3202 "] expected");
3203
3204 auto PRFM = LookupByEncoding(MCE->getValue());
3205 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, PRFM.value_or(""),
3206 S, getContext()));
3207 return ParseStatus::Success;
3208 }
3209
3210 if (Tok.isNot(AsmToken::Identifier))
3211 return TokError("prefetch hint expected");
3212
3213 auto PRFM = LookupByName(Tok.getString());
3214 if (!PRFM)
3215 return TokError("prefetch hint expected");
3216
3217 Operands.push_back(AArch64Operand::CreatePrefetch(
3218 *PRFM, Tok.getString(), S, getContext()));
3219 Lex(); // Eat identifier token.
3220 return ParseStatus::Success;
3221}
3222
3223/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
3224ParseStatus AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
3225 SMLoc S = getLoc();
3226 const AsmToken &Tok = getTok();
3227 if (Tok.isNot(AsmToken::Identifier))
3228 return TokError("invalid operand for instruction");
3229
3230 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
3231 if (!PSB)
3232 return TokError("invalid operand for instruction");
3233
3234 Operands.push_back(AArch64Operand::CreatePSBHint(
3235 PSB->Encoding, Tok.getString(), S, getContext()));
3236 Lex(); // Eat identifier token.
3237 return ParseStatus::Success;
3238}
3239
3240ParseStatus AArch64AsmParser::tryParseSyspXzrPair(OperandVector &Operands) {
3241 SMLoc StartLoc = getLoc();
3242
3243 MCRegister RegNum;
3244
3245 // The case where xzr, xzr is not present is handled by an InstAlias.
3246
3247 auto RegTok = getTok(); // in case we need to backtrack
3248 if (!tryParseScalarRegister(RegNum).isSuccess())
3249 return ParseStatus::NoMatch;
3250
3251 if (RegNum != AArch64::XZR) {
3252 getLexer().UnLex(RegTok);
3253 return ParseStatus::NoMatch;
3254 }
3255
3256 if (parseComma())
3257 return ParseStatus::Failure;
3258
3259 if (!tryParseScalarRegister(RegNum).isSuccess())
3260 return TokError("expected register operand");
3261
3262 if (RegNum != AArch64::XZR)
3263 return TokError("xzr must be followed by xzr");
3264
3265 // We need to push something, since we claim this is an operand in .td.
3266 // See also AArch64AsmParser::parseKeywordOperand.
3267 Operands.push_back(AArch64Operand::CreateReg(
3268 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3269
3270 return ParseStatus::Success;
3271}
3272
3273/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
3274ParseStatus AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
3275 SMLoc S = getLoc();
3276 const AsmToken &Tok = getTok();
3277 if (Tok.isNot(AsmToken::Identifier))
3278 return TokError("invalid operand for instruction");
3279
3280 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
3281 if (!BTI)
3282 return TokError("invalid operand for instruction");
3283
3284 Operands.push_back(AArch64Operand::CreateBTIHint(
3285 BTI->Encoding, Tok.getString(), S, getContext()));
3286 Lex(); // Eat identifier token.
3287 return ParseStatus::Success;
3288}
3289
3290/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
3291/// instruction.
3292ParseStatus AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
3293 SMLoc S = getLoc();
3294 const MCExpr *Expr = nullptr;
3295
3296 if (getTok().is(AsmToken::Hash)) {
3297 Lex(); // Eat hash token.
3298 }
3299
3300 if (parseSymbolicImmVal(Expr))
3301 return ParseStatus::Failure;
3302
3303 AArch64MCExpr::VariantKind ELFRefKind;
3304 MCSymbolRefExpr::VariantKind DarwinRefKind;
3305 int64_t Addend;
3306 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3307 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
3308 ELFRefKind == AArch64MCExpr::VK_INVALID) {
3309 // No modifier was specified at all; this is the syntax for an ELF basic
3310 // ADRP relocation (unfortunately).
3311 Expr =
3313 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
3314 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
3315 Addend != 0) {
3316 return Error(S, "gotpage label reference not allowed an addend");
3317 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
3318 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
3319 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
3320 ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
3321 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
3322 ELFRefKind != AArch64MCExpr::VK_GOT_AUTH_PAGE &&
3323 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE_LO15 &&
3324 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
3325 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE &&
3327 // The operand must be an @page or @gotpage qualified symbolref.
3328 return Error(S, "page or gotpage label reference expected");
3329 }
3330 }
3331
3332 // We have either a label reference possibly with addend or an immediate. The
3333 // addend is a raw value here. The linker will adjust it to only reference the
3334 // page.
3335 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3336 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3337
3338 return ParseStatus::Success;
3339}
3340
3341/// tryParseAdrLabel - Parse and validate a source label for the ADR
3342/// instruction.
3343ParseStatus AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
3344 SMLoc S = getLoc();
3345 const MCExpr *Expr = nullptr;
3346
3347 // Leave anything with a bracket to the default for SVE
3348 if (getTok().is(AsmToken::LBrac))
3349 return ParseStatus::NoMatch;
3350
3351 if (getTok().is(AsmToken::Hash))
3352 Lex(); // Eat hash token.
3353
3354 if (parseSymbolicImmVal(Expr))
3355 return ParseStatus::Failure;
3356
3357 AArch64MCExpr::VariantKind ELFRefKind;
3358 MCSymbolRefExpr::VariantKind DarwinRefKind;
3359 int64_t Addend;
3360 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3361 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
3362 ELFRefKind == AArch64MCExpr::VK_INVALID) {
3363 // No modifier was specified at all; this is the syntax for an ELF basic
3364 // ADR relocation (unfortunately).
3365 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
3366 } else if (ELFRefKind != AArch64MCExpr::VK_GOT_AUTH_PAGE) {
3367 // For tiny code model, we use :got_auth: operator to fill 21-bit imm of
3368 // adr. It's not actually GOT entry page address but the GOT address
3369 // itself - we just share the same variant kind with :got_auth: operator
3370 // applied for adrp.
3371 // TODO: can we somehow get current TargetMachine object to call
3372 // getCodeModel() on it to ensure we are using tiny code model?
3373 return Error(S, "unexpected adr label");
3374 }
3375 }
3376
3377 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3378 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3379 return ParseStatus::Success;
3380}
3381
3382/// tryParseFPImm - A floating point immediate expression operand.
3383template <bool AddFPZeroAsLiteral>
3384ParseStatus AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
3385 SMLoc S = getLoc();
3386
3387 bool Hash = parseOptionalToken(AsmToken::Hash);
3388
3389 // Handle negation, as that still comes through as a separate token.
3390 bool isNegative = parseOptionalToken(AsmToken::Minus);
3391
3392 const AsmToken &Tok = getTok();
3393 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
3394 if (!Hash)
3395 return ParseStatus::NoMatch;
3396 return TokError("invalid floating point immediate");
3397 }
3398
3399 // Parse hexadecimal representation.
3400 if (Tok.is(AsmToken::Integer) && Tok.getString().starts_with("0x")) {
3401 if (Tok.getIntVal() > 255 || isNegative)
3402 return TokError("encoded floating point value out of range");
3403
3405 Operands.push_back(
3406 AArch64Operand::CreateFPImm(F, true, S, getContext()));
3407 } else {
3408 // Parse FP representation.
3409 APFloat RealVal(APFloat::IEEEdouble());
3410 auto StatusOrErr =
3411 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
3412 if (errorToBool(StatusOrErr.takeError()))
3413 return TokError("invalid floating point representation");
3414
3415 if (isNegative)
3416 RealVal.changeSign();
3417
3418 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
3419 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
3420 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
3421 } else
3422 Operands.push_back(AArch64Operand::CreateFPImm(
3423 RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
3424 }
3425
3426 Lex(); // Eat the token.
3427
3428 return ParseStatus::Success;
3429}
3430
3431/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
3432/// a shift suffix, for example '#1, lsl #12'.
3434AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
3435 SMLoc S = getLoc();
3436
3437 if (getTok().is(AsmToken::Hash))
3438 Lex(); // Eat '#'
3439 else if (getTok().isNot(AsmToken::Integer))
3440 // Operand should start from # or should be integer, emit error otherwise.
3441 return ParseStatus::NoMatch;
3442
3443 if (getTok().is(AsmToken::Integer) &&
3444 getLexer().peekTok().is(AsmToken::Colon))
3445 return tryParseImmRange(Operands);
3446
3447 const MCExpr *Imm = nullptr;
3448 if (parseSymbolicImmVal(Imm))
3449 return ParseStatus::Failure;
3450 else if (getTok().isNot(AsmToken::Comma)) {
3451 Operands.push_back(
3452 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3453 return ParseStatus::Success;
3454 }
3455
3456 // Eat ','
3457 Lex();
3458 StringRef VecGroup;
3459 if (!parseOptionalVGOperand(Operands, VecGroup)) {
3460 Operands.push_back(
3461 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3462 Operands.push_back(
3463 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
3464 return ParseStatus::Success;
3465 }
3466
3467 // The optional operand must be "lsl #N" where N is non-negative.
3468 if (!getTok().is(AsmToken::Identifier) ||
3469 !getTok().getIdentifier().equals_insensitive("lsl"))
3470 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3471
3472 // Eat 'lsl'
3473 Lex();
3474
3475 parseOptionalToken(AsmToken::Hash);
3476
3477 if (getTok().isNot(AsmToken::Integer))
3478 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3479
3480 int64_t ShiftAmount = getTok().getIntVal();
3481
3482 if (ShiftAmount < 0)
3483 return Error(getLoc(), "positive shift amount required");
3484 Lex(); // Eat the number
3485
3486 // Just in case the optional lsl #0 is used for immediates other than zero.
3487 if (ShiftAmount == 0 && Imm != nullptr) {
3488 Operands.push_back(
3489 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3490 return ParseStatus::Success;
3491 }
3492
3493 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3494 getLoc(), getContext()));
3495 return ParseStatus::Success;
3496}
3497
3498/// parseCondCodeString - Parse a Condition Code string, optionally returning a
3499/// suggestion to help common typos.
3501AArch64AsmParser::parseCondCodeString(StringRef Cond, std::string &Suggestion) {
3503 .Case("eq", AArch64CC::EQ)
3504 .Case("ne", AArch64CC::NE)
3505 .Case("cs", AArch64CC::HS)
3506 .Case("hs", AArch64CC::HS)
3507 .Case("cc", AArch64CC::LO)
3508 .Case("lo", AArch64CC::LO)
3509 .Case("mi", AArch64CC::MI)
3510 .Case("pl", AArch64CC::PL)
3511 .Case("vs", AArch64CC::VS)
3512 .Case("vc", AArch64CC::VC)
3513 .Case("hi", AArch64CC::HI)
3514 .Case("ls", AArch64CC::LS)
3515 .Case("ge", AArch64CC::GE)
3516 .Case("lt", AArch64CC::LT)
3517 .Case("gt", AArch64CC::GT)
3518 .Case("le", AArch64CC::LE)
3519 .Case("al", AArch64CC::AL)
3520 .Case("nv", AArch64CC::NV)
3522
3523 if (CC == AArch64CC::Invalid && getSTI().hasFeature(AArch64::FeatureSVE)) {
3525 .Case("none", AArch64CC::EQ)
3526 .Case("any", AArch64CC::NE)
3527 .Case("nlast", AArch64CC::HS)
3528 .Case("last", AArch64CC::LO)
3529 .Case("first", AArch64CC::MI)
3530 .Case("nfrst", AArch64CC::PL)
3531 .Case("pmore", AArch64CC::HI)
3532 .Case("plast", AArch64CC::LS)
3533 .Case("tcont", AArch64CC::GE)
3534 .Case("tstop", AArch64CC::LT)
3536
3537 if (CC == AArch64CC::Invalid && Cond.lower() == "nfirst")
3538 Suggestion = "nfrst";
3539 }
3540 return CC;
3541}
3542
3543/// parseCondCode - Parse a Condition Code operand.
3544bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3545 bool invertCondCode) {
3546 SMLoc S = getLoc();
3547 const AsmToken &Tok = getTok();
3548 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3549
3550 StringRef Cond = Tok.getString();
3551 std::string Suggestion;
3552 AArch64CC::CondCode CC = parseCondCodeString(Cond, Suggestion);
3553 if (CC == AArch64CC::Invalid) {
3554 std::string Msg = "invalid condition code";
3555 if (!Suggestion.empty())
3556 Msg += ", did you mean " + Suggestion + "?";
3557 return TokError(Msg);
3558 }
3559 Lex(); // Eat identifier token.
3560
3561 if (invertCondCode) {
3562 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3563 return TokError("condition codes AL and NV are invalid for this instruction");
3565 }
3566
3567 Operands.push_back(
3568 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
3569 return false;
3570}
3571
3572ParseStatus AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3573 const AsmToken &Tok = getTok();
3574 SMLoc S = getLoc();
3575
3576 if (Tok.isNot(AsmToken::Identifier))
3577 return TokError("invalid operand for instruction");
3578
3579 unsigned PStateImm = -1;
3580 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString());
3581 if (!SVCR)
3582 return ParseStatus::NoMatch;
3583 if (SVCR->haveFeatures(getSTI().getFeatureBits()))
3584 PStateImm = SVCR->Encoding;
3585
3586 Operands.push_back(
3587 AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext()));
3588 Lex(); // Eat identifier token.
3589 return ParseStatus::Success;
3590}
3591
3592ParseStatus AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3593 const AsmToken &Tok = getTok();
3594 SMLoc S = getLoc();
3595
3596 StringRef Name = Tok.getString();
3597
3598 if (Name.equals_insensitive("za") || Name.starts_with_insensitive("za.")) {
3599 Lex(); // eat "za[.(b|h|s|d)]"
3600 unsigned ElementWidth = 0;
3601 auto DotPosition = Name.find('.');
3602 if (DotPosition != StringRef::npos) {
3603 const auto &KindRes =
3604 parseVectorKind(Name.drop_front(DotPosition), RegKind::Matrix);
3605 if (!KindRes)
3606 return TokError(
3607 "Expected the register to be followed by element width suffix");
3608 ElementWidth = KindRes->second;
3609 }
3610 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3611 AArch64::ZA, ElementWidth, MatrixKind::Array, S, getLoc(),
3612 getContext()));
3613 if (getLexer().is(AsmToken::LBrac)) {
3614 // There's no comma after matrix operand, so we can parse the next operand
3615 // immediately.
3616 if (parseOperand(Operands, false, false))
3617 return ParseStatus::NoMatch;
3618 }
3619 return ParseStatus::Success;
3620 }
3621
3622 // Try to parse matrix register.
3623 unsigned Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
3624 if (!Reg)
3625 return ParseStatus::NoMatch;
3626
3627 size_t DotPosition = Name.find('.');
3628 assert(DotPosition != StringRef::npos && "Unexpected register");
3629
3630 StringRef Head = Name.take_front(DotPosition);
3631 StringRef Tail = Name.drop_front(DotPosition);
3632 StringRef RowOrColumn = Head.take_back();
3633
3634 MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn.lower())
3635 .Case("h", MatrixKind::Row)
3636 .Case("v", MatrixKind::Col)
3637 .Default(MatrixKind::Tile);
3638
3639 // Next up, parsing the suffix
3640 const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix);
3641 if (!KindRes)
3642 return TokError(
3643 "Expected the register to be followed by element width suffix");
3644 unsigned ElementWidth = KindRes->second;
3645
3646 Lex();
3647
3648 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3649 Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3650
3651 if (getLexer().is(AsmToken::LBrac)) {
3652 // There's no comma after matrix operand, so we can parse the next operand
3653 // immediately.
3654 if (parseOperand(Operands, false, false))
3655 return ParseStatus::NoMatch;
3656 }
3657 return ParseStatus::Success;
3658}
3659
3660/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3661/// them if present.
3663AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3664 const AsmToken &Tok = getTok();
3665 std::string LowerID = Tok.getString().lower();
3668 .Case("lsl", AArch64_AM::LSL)
3669 .Case("lsr", AArch64_AM::LSR)
3670 .Case("asr", AArch64_AM::ASR)
3671 .Case("ror", AArch64_AM::ROR)
3672 .Case("msl", AArch64_AM::MSL)
3673 .Case("uxtb", AArch64_AM::UXTB)
3674 .Case("uxth", AArch64_AM::UXTH)
3675 .Case("uxtw", AArch64_AM::UXTW)
3676 .Case("uxtx", AArch64_AM::UXTX)
3677 .Case("sxtb", AArch64_AM::SXTB)
3678 .Case("sxth", AArch64_AM::SXTH)
3679 .Case("sxtw", AArch64_AM::SXTW)
3680 .Case("sxtx", AArch64_AM::SXTX)
3682
3684 return ParseStatus::NoMatch;
3685
3686 SMLoc S = Tok.getLoc();
3687 Lex();
3688
3689 bool Hash = parseOptionalToken(AsmToken::Hash);
3690
3691 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
3692 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3693 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3694 ShOp == AArch64_AM::MSL) {
3695 // We expect a number here.
3696 return TokError("expected #imm after shift specifier");
3697 }
3698
3699 // "extend" type operations don't need an immediate, #0 is implicit.
3700 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3701 Operands.push_back(
3702 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
3703 return ParseStatus::Success;
3704 }
3705
3706 // Make sure we do actually have a number, identifier or a parenthesized
3707 // expression.
3708 SMLoc E = getLoc();
3709 if (!getTok().is(AsmToken::Integer) && !getTok().is(AsmToken::LParen) &&
3710 !getTok().is(AsmToken::Identifier))
3711 return Error(E, "expected integer shift amount");
3712
3713 const MCExpr *ImmVal;
3714 if (getParser().parseExpression(ImmVal))
3715 return ParseStatus::Failure;
3716
3717 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3718 if (!MCE)
3719 return Error(E, "expected constant '#imm' after shift specifier");
3720
3721 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3722 Operands.push_back(AArch64Operand::CreateShiftExtend(
3723 ShOp, MCE->getValue(), true, S, E, getContext()));
3724 return ParseStatus::Success;
3725}
3726
3727static const struct Extension {
3728 const char *Name;
3730} ExtensionMap[] = {
3731 {"crc", {AArch64::FeatureCRC}},
3732 {"sm4", {AArch64::FeatureSM4}},
3733 {"sha3", {AArch64::FeatureSHA3}},
3734 {"sha2", {AArch64::FeatureSHA2}},
3735 {"aes", {AArch64::FeatureAES}},
3736 {"crypto", {AArch64::FeatureCrypto}},
3737 {"fp", {AArch64::FeatureFPARMv8}},
3738 {"simd", {AArch64::FeatureNEON}},
3739 {"ras", {AArch64::FeatureRAS}},
3740 {"rasv2", {AArch64::FeatureRASv2}},
3741 {"lse", {AArch64::FeatureLSE}},
3742 {"predres", {AArch64::FeaturePredRes}},
3743 {"predres2", {AArch64::FeatureSPECRES2}},
3744 {"ccdp", {AArch64::FeatureCacheDeepPersist}},
3745 {"mte", {AArch64::FeatureMTE}},
3746 {"memtag", {AArch64::FeatureMTE}},
3747 {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3748 {"pan", {AArch64::FeaturePAN}},
3749 {"pan-rwv", {AArch64::FeaturePAN_RWV}},
3750 {"ccpp", {AArch64::FeatureCCPP}},
3751 {"rcpc", {AArch64::FeatureRCPC}},
3752 {"rng", {AArch64::FeatureRandGen}},
3753 {"sve", {AArch64::FeatureSVE}},
3754 {"sve-b16b16", {AArch64::FeatureSVEB16B16}},
3755 {"sve2", {AArch64::FeatureSVE2}},
3756 {"sve-aes", {AArch64::FeatureSVEAES}},
3757 {"sve2-aes", {AArch64::FeatureAliasSVE2AES, AArch64::FeatureSVEAES}},
3758 {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
3759 {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
3760 {"sve-bitperm", {AArch64::FeatureSVEBitPerm}},
3761 {"sve2-bitperm",
3762 {AArch64::FeatureAliasSVE2BitPerm, AArch64::FeatureSVEBitPerm,
3763 AArch64::FeatureSVE2}},
3764 {"sve2p1", {AArch64::FeatureSVE2p1}},
3765 {"ls64", {AArch64::FeatureLS64}},
3766 {"xs", {AArch64::FeatureXS}},
3767 {"pauth", {AArch64::FeaturePAuth}},
3768 {"flagm", {AArch64::FeatureFlagM}},
3769 {"rme", {AArch64::FeatureRME}},
3770 {"sme", {AArch64::FeatureSME}},
3771 {"sme-f64f64", {AArch64::FeatureSMEF64F64}},
3772 {"sme-f16f16", {AArch64::FeatureSMEF16F16}},
3773 {"sme-i16i64", {AArch64::FeatureSMEI16I64}},
3774 {"sme2", {AArch64::FeatureSME2}},
3775 {"sme2p1", {AArch64::FeatureSME2p1}},
3776 {"sme-b16b16", {AArch64::FeatureSMEB16B16}},
3777 {"hbc", {AArch64::FeatureHBC}},
3778 {"mops", {AArch64::FeatureMOPS}},
3779 {"mec", {AArch64::FeatureMEC}},
3780 {"the", {AArch64::FeatureTHE}},
3781 {"d128", {AArch64::FeatureD128}},
3782 {"lse128", {AArch64::FeatureLSE128}},
3783 {"ite", {AArch64::FeatureITE}},
3784 {"cssc", {AArch64::FeatureCSSC}},
3785 {"rcpc3", {AArch64::FeatureRCPC3}},
3786 {"gcs", {AArch64::FeatureGCS}},
3787 {"bf16", {AArch64::FeatureBF16}},
3788 {"compnum", {AArch64::FeatureComplxNum}},
3789 {"dotprod", {AArch64::FeatureDotProd}},
3790 {"f32mm", {AArch64::FeatureMatMulFP32}},
3791 {"f64mm", {AArch64::FeatureMatMulFP64}},
3792 {"fp16", {AArch64::FeatureFullFP16}},
3793 {"fp16fml", {AArch64::FeatureFP16FML}},
3794 {"i8mm", {AArch64::FeatureMatMulInt8}},
3795 {"lor", {AArch64::FeatureLOR}},
3796 {"profile", {AArch64::FeatureSPE}},
3797 // "rdma" is the name documented by binutils for the feature, but
3798 // binutils also accepts incomplete prefixes of features, so "rdm"
3799 // works too. Support both spellings here.
3800 {"rdm", {AArch64::FeatureRDM}},
3801 {"rdma", {AArch64::FeatureRDM}},
3802 {"sb", {AArch64::FeatureSB}},
3803 {"ssbs", {AArch64::FeatureSSBS}},
3804 {"tme", {AArch64::FeatureTME}},
3805 {"fp8", {AArch64::FeatureFP8}},
3806 {"faminmax", {AArch64::FeatureFAMINMAX}},
3807 {"fp8fma", {AArch64::FeatureFP8FMA}},
3808 {"ssve-fp8fma", {AArch64::FeatureSSVE_FP8FMA}},
3809 {"fp8dot2", {AArch64::FeatureFP8DOT2}},
3810 {"ssve-fp8dot2", {AArch64::FeatureSSVE_FP8DOT2}},
3811 {"fp8dot4", {AArch64::FeatureFP8DOT4}},
3812 {"ssve-fp8dot4", {AArch64::FeatureSSVE_FP8DOT4}},
3813 {"lut", {AArch64::FeatureLUT}},
3814 {"sme-lutv2", {AArch64::FeatureSME_LUTv2}},
3815 {"sme-f8f16", {AArch64::FeatureSMEF8F16}},
3816 {"sme-f8f32", {AArch64::FeatureSMEF8F32}},
3817 {"sme-fa64", {AArch64::FeatureSMEFA64}},
3818 {"cpa", {AArch64::FeatureCPA}},
3819 {"tlbiw", {AArch64::FeatureTLBIW}},
3820 {"pops", {AArch64::FeaturePoPS}},
3821 {"cmpbr", {AArch64::FeatureCMPBR}},
3822 {"f8f32mm", {AArch64::FeatureF8F32MM}},
3823 {"f8f16mm", {AArch64::FeatureF8F16MM}},
3824 {"fprcvt", {AArch64::FeatureFPRCVT}},
3825 {"lsfe", {AArch64::FeatureLSFE}},
3826 {"sme2p2", {AArch64::FeatureSME2p2}},
3827 {"ssve-aes", {AArch64::FeatureSSVE_AES}},
3828 {"sve2p2", {AArch64::FeatureSVE2p2}},
3829 {"sve-aes2", {AArch64::FeatureSVEAES2}},
3830 {"sve-bfscale", {AArch64::FeatureSVEBFSCALE}},
3831 {"sve-f16f32mm", {AArch64::FeatureSVE_F16F32MM}},
3832 {"lsui", {AArch64::FeatureLSUI}},
3833 {"occmo", {AArch64::FeatureOCCMO}},
3834 {"pcdphint", {AArch64::FeaturePCDPHINT}},
3835 {"ssve-bitperm", {AArch64::FeatureSSVE_BitPerm}},
3836 {"sme-mop4", {AArch64::FeatureSME_MOP4}},
3837 {"sme-tmop", {AArch64::FeatureSME_TMOP}},
3839
3840static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3841 if (FBS[AArch64::HasV8_0aOps])
3842 Str += "ARMv8a";
3843 if (FBS[AArch64::HasV8_1aOps])
3844 Str += "ARMv8.1a";
3845 else if (FBS[AArch64::HasV8_2aOps])
3846 Str += "ARMv8.2a";
3847 else if (FBS[AArch64::HasV8_3aOps])
3848 Str += "ARMv8.3a";
3849 else if (FBS[AArch64::HasV8_4aOps])
3850 Str += "ARMv8.4a";
3851 else if (FBS[AArch64::HasV8_5aOps])
3852 Str += "ARMv8.5a";
3853 else if (FBS[AArch64::HasV8_6aOps])
3854 Str += "ARMv8.6a";
3855 else if (FBS[AArch64::HasV8_7aOps])
3856 Str += "ARMv8.7a";
3857 else if (FBS[AArch64::HasV8_8aOps])
3858 Str += "ARMv8.8a";
3859 else if (FBS[AArch64::HasV8_9aOps])
3860 Str += "ARMv8.9a";
3861 else if (FBS[AArch64::HasV9_0aOps])
3862 Str += "ARMv9-a";
3863 else if (FBS[AArch64::HasV9_1aOps])
3864 Str += "ARMv9.1a";
3865 else if (FBS[AArch64::HasV9_2aOps])
3866 Str += "ARMv9.2a";
3867 else if (FBS[AArch64::HasV9_3aOps])
3868 Str += "ARMv9.3a";
3869 else if (FBS[AArch64::HasV9_4aOps])
3870 Str += "ARMv9.4a";
3871 else if (FBS[AArch64::HasV9_5aOps])
3872 Str += "ARMv9.5a";
3873 else if (FBS[AArch64::HasV9_6aOps])
3874 Str += "ARMv9.6a";
3875 else if (FBS[AArch64::HasV8_0rOps])
3876 Str += "ARMv8r";
3877 else {
3878 SmallVector<std::string, 2> ExtMatches;
3879 for (const auto& Ext : ExtensionMap) {
3880 // Use & in case multiple features are enabled
3881 if ((FBS & Ext.Features) != FeatureBitset())
3882 ExtMatches.push_back(Ext.Name);
3883 }
3884 Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
3885 }
3886}
3887
3888void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
3889 SMLoc S) {
3890 const uint16_t Op2 = Encoding & 7;
3891 const uint16_t Cm = (Encoding & 0x78) >> 3;
3892 const uint16_t Cn = (Encoding & 0x780) >> 7;
3893 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
3894
3895 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
3896
3897 Operands.push_back(
3898 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3899 Operands.push_back(
3900 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
3901 Operands.push_back(
3902 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
3903 Expr = MCConstantExpr::create(Op2, getContext());
3904 Operands.push_back(
3905 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3906}
3907
3908/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
3909/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
3910bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
3912 if (Name.contains('.'))
3913 return TokError("invalid operand");
3914
3915 Mnemonic = Name;
3916 Operands.push_back(AArch64Operand::CreateToken("sys", NameLoc, getContext()));
3917
3918 const AsmToken &Tok = getTok();
3919 StringRef Op = Tok.getString();
3920 SMLoc S = Tok.getLoc();
3921
3922 if (Mnemonic == "ic") {
3923 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
3924 if (!IC)
3925 return TokError("invalid operand for IC instruction");
3926 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
3927 std::string Str("IC " + std::string(IC->Name) + " requires: ");
3929 return TokError(Str);
3930 }
3931 createSysAlias(IC->Encoding, Operands, S);
3932 } else if (Mnemonic == "dc") {
3933 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
3934 if (!DC)
3935 return TokError("invalid operand for DC instruction");
3936 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
3937 std::string Str("DC " + std::string(DC->Name) + " requires: ");
3939 return TokError(Str);
3940 }
3941 createSysAlias(DC->Encoding, Operands, S);
3942 } else if (Mnemonic == "at") {
3943 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
3944 if (!AT)
3945 return TokError("invalid operand for AT instruction");
3946 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
3947 std::string Str("AT " + std::string(AT->Name) + " requires: ");
3949 return TokError(Str);
3950 }
3951 createSysAlias(AT->Encoding, Operands, S);
3952 } else if (Mnemonic == "tlbi") {
3953 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
3954 if (!TLBI)
3955 return TokError("invalid operand for TLBI instruction");
3956 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
3957 std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
3959 return TokError(Str);
3960 }
3961 createSysAlias(TLBI->Encoding, Operands, S);
3962 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp" || Mnemonic == "cosp") {
3963
3964 if (Op.lower() != "rctx")
3965 return TokError("invalid operand for prediction restriction instruction");
3966
3967 bool hasAll = getSTI().hasFeature(AArch64::FeatureAll);
3968 bool hasPredres = hasAll || getSTI().hasFeature(AArch64::FeaturePredRes);
3969 bool hasSpecres2 = hasAll || getSTI().hasFeature(AArch64::FeatureSPECRES2);
3970
3971 if (Mnemonic == "cosp" && !hasSpecres2)
3972 return TokError("COSP requires: predres2");
3973 if (!hasPredres)
3974 return TokError(Mnemonic.upper() + "RCTX requires: predres");
3975
3976 uint16_t PRCTX_Op2 = Mnemonic == "cfp" ? 0b100
3977 : Mnemonic == "dvp" ? 0b101
3978 : Mnemonic == "cosp" ? 0b110
3979 : Mnemonic == "cpp" ? 0b111
3980 : 0;
3981 assert(PRCTX_Op2 &&
3982 "Invalid mnemonic for prediction restriction instruction");
3983 const auto SYS_3_7_3 = 0b01101110011; // op=3, CRn=7, CRm=3
3984 const auto Encoding = SYS_3_7_3 << 3 | PRCTX_Op2;
3985
3986 createSysAlias(Encoding, Operands, S);
3987 }
3988
3989 Lex(); // Eat operand.
3990
3991 bool ExpectRegister = !Op.contains_insensitive("all");
3992 bool HasRegister = false;
3993
3994 // Check for the optional register operand.
3995 if (parseOptionalToken(AsmToken::Comma)) {
3996 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
3997 return TokError("expected register operand");
3998 HasRegister = true;
3999 }
4000
4001 if (ExpectRegister && !HasRegister)
4002 return TokError("specified " + Mnemonic + " op requires a register");
4003 else if (!ExpectRegister && HasRegister)
4004 return TokError("specified " + Mnemonic + " op does not use a register");
4005
4006 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4007 return true;
4008
4009 return false;
4010}
4011
4012/// parseSyspAlias - The TLBIP instructions are simple aliases for
4013/// the SYSP instruction. Parse them specially so that we create a SYSP MCInst.
4014bool AArch64AsmParser::parseSyspAlias(StringRef Name, SMLoc NameLoc,
4016 if (Name.contains('.'))
4017 return TokError("invalid operand");
4018
4019 Mnemonic = Name;
4020 Operands.push_back(
4021 AArch64Operand::CreateToken("sysp", NameLoc, getContext()));
4022
4023 const AsmToken &Tok = getTok();
4024 StringRef Op = Tok.getString();
4025 SMLoc S = Tok.getLoc();
4026
4027 if (Mnemonic == "tlbip") {
4028 bool HasnXSQualifier = Op.ends_with_insensitive("nXS");
4029 if (HasnXSQualifier) {
4030 Op = Op.drop_back(3);
4031 }
4032 const AArch64TLBI::TLBI *TLBIorig = AArch64TLBI::lookupTLBIByName(Op);
4033 if (!TLBIorig)
4034 return TokError("invalid operand for TLBIP instruction");
4035 const AArch64TLBI::TLBI TLBI(
4036 TLBIorig->Name, TLBIorig->Encoding | (HasnXSQualifier ? (1 << 7) : 0),
4037 TLBIorig->NeedsReg,
4038 HasnXSQualifier
4039 ? TLBIorig->FeaturesRequired | FeatureBitset({AArch64::FeatureXS})
4040 : TLBIorig->FeaturesRequired);
4041 if (!TLBI.haveFeatures(getSTI().getFeatureBits())) {
4042 std::string Name =
4043 std::string(TLBI.Name) + (HasnXSQualifier ? "nXS" : "");
4044 std::string Str("TLBIP " + Name + " requires: ");
4046 return TokError(Str);
4047 }
4048 createSysAlias(TLBI.Encoding, Operands, S);
4049 }
4050
4051 Lex(); // Eat operand.
4052
4053 if (parseComma())
4054 return true;
4055
4056 if (Tok.isNot(AsmToken::Identifier))
4057 return TokError("expected register identifier");
4058 auto Result = tryParseSyspXzrPair(Operands);
4059 if (Result.isNoMatch())
4060 Result = tryParseGPRSeqPair(Operands);
4061 if (!Result.isSuccess())
4062 return TokError("specified " + Mnemonic +
4063 " op requires a pair of registers");
4064
4065 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4066 return true;
4067
4068 return false;
4069}
4070
4071ParseStatus AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
4072 MCAsmParser &Parser = getParser();
4073 const AsmToken &Tok = getTok();
4074
4075 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier))
4076 return TokError("'csync' operand expected");
4077 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
4078 // Immediate operand.
4079 const MCExpr *ImmVal;
4080 SMLoc ExprLoc = getLoc();
4081 AsmToken IntTok = Tok;
4082 if (getParser().parseExpression(ImmVal))
4083 return ParseStatus::Failure;
4084 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4085 if (!MCE)
4086 return Error(ExprLoc, "immediate value expected for barrier operand");
4087 int64_t Value = MCE->getValue();
4088 if (Mnemonic == "dsb" && Value > 15) {
4089 // This case is a no match here, but it might be matched by the nXS
4090 // variant. Deliberately not unlex the optional '#' as it is not necessary
4091 // to characterize an integer immediate.
4092 Parser.getLexer().UnLex(IntTok);
4093 return ParseStatus::NoMatch;
4094 }
4095 if (Value < 0 || Value > 15)
4096 return Error(ExprLoc, "barrier operand out of range");
4097 auto DB = AArch64DB::lookupDBByEncoding(Value);
4098 Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
4099 ExprLoc, getContext(),
4100 false /*hasnXSModifier*/));
4101 return ParseStatus::Success;
4102 }
4103
4104 if (Tok.isNot(AsmToken::Identifier))
4105 return TokError("invalid operand for instruction");
4106
4107 StringRef Operand = Tok.getString();
4108 auto TSB = AArch64TSB::lookupTSBByName(Operand);
4109 auto DB = AArch64DB::lookupDBByName(Operand);
4110 // The only valid named option for ISB is 'sy'
4111 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy))
4112 return TokError("'sy' or #imm operand expected");
4113 // The only valid named option for TSB is 'csync'
4114 if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync))
4115 return TokError("'csync' operand expected");
4116 if (!DB && !TSB) {
4117 if (Mnemonic == "dsb") {
4118 // This case is a no match here, but it might be matched by the nXS
4119 // variant.
4120 return ParseStatus::NoMatch;
4121 }
4122 return TokError("invalid barrier option name");
4123 }
4124
4125 Operands.push_back(AArch64Operand::CreateBarrier(
4126 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
4127 getContext(), false /*hasnXSModifier*/));
4128 Lex(); // Consume the option
4129
4130 return ParseStatus::Success;
4131}
4132
4134AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
4135 const AsmToken &Tok = getTok();
4136
4137 assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands");
4138 if (Mnemonic != "dsb")
4139 return ParseStatus::Failure;
4140
4141 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
4142 // Immediate operand.
4143 const MCExpr *ImmVal;
4144 SMLoc ExprLoc = getLoc();
4145 if (getParser().parseExpression(ImmVal))
4146 return ParseStatus::Failure;
4147 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4148 if (!MCE)
4149 return Error(ExprLoc, "immediate value expected for barrier operand");
4150 int64_t Value = MCE->getValue();
4151 // v8.7-A DSB in the nXS variant accepts only the following immediate
4152 // values: 16, 20, 24, 28.
4153 if (Value != 16 && Value != 20 && Value != 24 && Value != 28)
4154 return Error(ExprLoc, "barrier operand out of range");
4155 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
4156 Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
4157 ExprLoc, getContext(),
4158 true /*hasnXSModifier*/));
4159 return ParseStatus::Success;
4160 }
4161
4162 if (Tok.isNot(AsmToken::Identifier))
4163 return TokError("invalid operand for instruction");
4164
4165 StringRef Operand = Tok.getString();
4166 auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
4167
4168 if (!DB)
4169 return TokError("invalid barrier option name");
4170
4171 Operands.push_back(
4172 AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
4173 getContext(), true /*hasnXSModifier*/));
4174 Lex(); // Consume the option
4175
4176 return ParseStatus::Success;
4177}
4178
4179ParseStatus AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
4180 const AsmToken &Tok = getTok();
4181
4182 if (Tok.isNot(AsmToken::Identifier))
4183 return ParseStatus::NoMatch;
4184
4185 if (AArch64SVCR::lookupSVCRByName(Tok.getString()))
4186 return ParseStatus::NoMatch;
4187
4188 int MRSReg, MSRReg;
4189 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
4190 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
4191 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
4192 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
4193 } else
4194 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
4195
4196 unsigned PStateImm = -1;
4197 auto PState15 = AArch64PState::lookupPStateImm0_15ByName(Tok.getString());
4198 if (PState15 && PState15->haveFeatures(getSTI().getFeatureBits()))
4199 PStateImm = PState15->Encoding;
4200 if (!PState15) {
4201 auto PState1 = AArch64PState::lookupPStateImm0_1ByName(Tok.getString());
4202 if (PState1 && PState1->haveFeatures(getSTI().getFeatureBits()))
4203 PStateImm = PState1->Encoding;
4204 }
4205
4206 Operands.push_back(
4207 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
4208 PStateImm, getContext()));
4209 Lex(); // Eat identifier
4210
4211 return ParseStatus::Success;
4212}
4213
4215AArch64AsmParser::tryParsePHintInstOperand(OperandVector &Operands) {
4216 SMLoc S = getLoc();
4217 const AsmToken &Tok = getTok();
4218 if (Tok.isNot(AsmToken::Identifier))
4219 return TokError("invalid operand for instruction");
4220
4222 if (!PH)
4223 return TokError("invalid operand for instruction");
4224
4225 Operands.push_back(AArch64Operand::CreatePHintInst(
4226 PH->Encoding, Tok.getString(), S, getContext()));
4227 Lex(); // Eat identifier token.
4228 return ParseStatus::Success;
4229}
4230
4231/// tryParseNeonVectorRegister - Parse a vector register operand.
4232bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
4233 if (getTok().isNot(AsmToken::Identifier))
4234 return true;
4235
4236 SMLoc S = getLoc();
4237 // Check for a vector register specifier first.
4240 ParseStatus Res = tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
4241 if (!Res.isSuccess())
4242 return true;
4243
4244 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
4245 if (!KindRes)
4246 return true;
4247
4248 unsigned ElementWidth = KindRes->second;
4249 Operands.push_back(
4250 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
4251 S, getLoc(), getContext()));
4252
4253 // If there was an explicit qualifier, that goes on as a literal text
4254 // operand.
4255 if (!Kind.empty())
4256 Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
4257
4258 return tryParseVectorIndex(Operands).isFailure();
4259}
4260
4261ParseStatus AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
4262 SMLoc SIdx = getLoc();
4263 if (parseOptionalToken(AsmToken::LBrac)) {
4264 const MCExpr *ImmVal;
4265 if (getParser().parseExpression(ImmVal))
4266 return ParseStatus::NoMatch;
4267 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4268 if (!MCE)
4269 return TokError("immediate value expected for vector index");
4270
4271 SMLoc E = getLoc();
4272
4273 if (parseToken(AsmToken::RBrac, "']' expected"))
4274 return ParseStatus::Failure;
4275
4276 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
4277 E, getContext()));
4278 return ParseStatus::Success;
4279 }
4280
4281 return ParseStatus::NoMatch;
4282}
4283
4284// tryParseVectorRegister - Try to parse a vector register name with
4285// optional kind specifier. If it is a register specifier, eat the token
4286// and return it.
4287ParseStatus AArch64AsmParser::tryParseVectorRegister(MCRegister &Reg,
4288 StringRef &Kind,
4289 RegKind MatchKind) {
4290 const AsmToken &Tok = getTok();
4291
4292 if (Tok.isNot(AsmToken::Identifier))
4293 return ParseStatus::NoMatch;
4294
4295 StringRef Name = Tok.getString();
4296 // If there is a kind specifier, it's separated from the register name by
4297 // a '.'.
4298 size_t Start = 0, Next = Name.find('.');
4299 StringRef Head = Name.slice(Start, Next);
4300 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
4301
4302 if (RegNum) {
4303 if (Next != StringRef::npos) {
4304 Kind = Name.substr(Next);
4305 if (!isValidVectorKind(Kind, MatchKind))
4306 return TokError("invalid vector kind qualifier");
4307 }
4308 Lex(); // Eat the register token.
4309
4310 Reg = RegNum;
4311 return ParseStatus::Success;
4312 }
4313
4314 return ParseStatus::NoMatch;
4315}
4316
4317ParseStatus AArch64AsmParser::tryParseSVEPredicateOrPredicateAsCounterVector(
4320 tryParseSVEPredicateVector<RegKind::SVEPredicateAsCounter>(Operands);
4321 if (!Status.isSuccess())
4322 Status = tryParseSVEPredicateVector<RegKind::SVEPredicateVector>(Operands);
4323 return Status;
4324}
4325
4326/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
4327template <RegKind RK>
4329AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
4330 // Check for a SVE predicate register specifier first.
4331 const SMLoc S = getLoc();
4333 MCRegister RegNum;
4334 auto Res = tryParseVectorRegister(RegNum, Kind, RK);
4335 if (!Res.isSuccess())
4336 return Res;
4337
4338 const auto &KindRes = parseVectorKind(Kind, RK);
4339 if (!KindRes)
4340 return ParseStatus::NoMatch;
4341
4342 unsigned ElementWidth = KindRes->second;
4343 Operands.push_back(AArch64Operand::CreateVectorReg(
4344 RegNum, RK, ElementWidth, S,
4345 getLoc(), getContext()));
4346
4347 if (getLexer().is(AsmToken::LBrac)) {
4348 if (RK == RegKind::SVEPredicateAsCounter) {
4349 ParseStatus ResIndex = tryParseVectorIndex(Operands);
4350 if (ResIndex.isSuccess())
4351 return ParseStatus::Success;
4352 } else {
4353 // Indexed predicate, there's no comma so try parse the next operand
4354 // immediately.
4355 if (parseOperand(Operands, false, false))
4356 return ParseStatus::NoMatch;
4357 }
4358 }
4359
4360 // Not all predicates are followed by a '/m' or '/z'.
4361 if (getTok().isNot(AsmToken::Slash))
4362 return ParseStatus::Success;
4363
4364 // But when they do they shouldn't have an element type suffix.
4365 if (!Kind.empty())
4366 return Error(S, "not expecting size suffix");
4367
4368 // Add a literal slash as operand
4369 Operands.push_back(AArch64Operand::CreateToken("/", getLoc(), getContext()));
4370
4371 Lex(); // Eat the slash.
4372
4373 // Zeroing or merging?
4374 auto Pred = getTok().getString().lower();
4375 if (RK == RegKind::SVEPredicateAsCounter && Pred != "z")
4376 return Error(getLoc(), "expecting 'z' predication");
4377
4378 if (RK == RegKind::SVEPredicateVector && Pred != "z" && Pred != "m")
4379 return Error(getLoc(), "expecting 'm' or 'z' predication");
4380
4381 // Add zero/merge token.
4382 const char *ZM = Pred == "z" ? "z" : "m";
4383 Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
4384
4385 Lex(); // Eat zero/merge token.
4386 return ParseStatus::Success;
4387}
4388
4389/// parseRegister - Parse a register operand.
4390bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
4391 // Try for a Neon vector register.
4392 if (!tryParseNeonVectorRegister(Operands))
4393 return false;
4394
4395 if (tryParseZTOperand(Operands).isSuccess())
4396 return false;
4397
4398 // Otherwise try for a scalar register.
4399 if (tryParseGPROperand<false>(Operands).isSuccess())
4400 return false;
4401
4402 return true;
4403}
4404
4405bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
4406 bool HasELFModifier = false;
4408
4409 if (parseOptionalToken(AsmToken::Colon)) {
4410 HasELFModifier = true;
4411
4412 if (getTok().isNot(AsmToken::Identifier))
4413 return TokError("expect relocation specifier in operand after ':'");
4414
4415 std::string LowerCase = getTok().getIdentifier().lower();
4416 RefKind =
4419 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
4420 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
4421 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
4422 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
4423 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
4424 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
4425 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
4426 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
4427 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
4428 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
4429 .Case("prel_g3", AArch64MCExpr::VK_PREL_G3)
4430 .Case("prel_g2", AArch64MCExpr::VK_PREL_G2)
4431 .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC)
4432 .Case("prel_g1", AArch64MCExpr::VK_PREL_G1)
4433 .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC)
4434 .Case("prel_g0", AArch64MCExpr::VK_PREL_G0)
4435 .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC)
4436 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
4437 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
4438 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
4439 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
4440 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
4441 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
4442 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
4443 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
4444 .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
4445 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
4446 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
4447 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
4448 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
4449 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
4450 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
4451 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
4452 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
4453 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
4454 .Case("tlsdesc_auth_lo12", AArch64MCExpr::VK_TLSDESC_AUTH_LO12)
4456 .Case("gotpage_lo15", AArch64MCExpr::VK_GOT_PAGE_LO15)
4457 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
4459 .Case("got_auth_lo12", AArch64MCExpr::VK_GOT_AUTH_LO12)
4461 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
4462 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
4463 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
4466 .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
4467 .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
4469
4470 if (RefKind == AArch64MCExpr::VK_INVALID)
4471 return TokError("expect relocation specifier in operand after ':'");
4472
4473 Lex(); // Eat identifier
4474
4475 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
4476 return true;
4477 }
4478
4479 if (getParser().parseExpression(ImmVal))
4480 return true;
4481
4482 if (HasELFModifier)
4483 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
4484
4485 return false;
4486}
4487
4488ParseStatus AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
4489 if (getTok().isNot(AsmToken::LCurly))
4490 return ParseStatus::NoMatch;
4491
4492 auto ParseMatrixTile = [this](unsigned &Reg,
4493 unsigned &ElementWidth) -> ParseStatus {
4494 StringRef Name = getTok().getString();
4495 size_t DotPosition = Name.find('.');
4496 if (DotPosition == StringRef::npos)
4497 return ParseStatus::NoMatch;
4498
4499 unsigned RegNum = matchMatrixTileListRegName(Name);
4500 if (!RegNum)
4501 return ParseStatus::NoMatch;
4502
4503 StringRef Tail = Name.drop_front(DotPosition);
4504 const std::optional<std::pair<int, int>> &KindRes =
4505 parseVectorKind(Tail, RegKind::Matrix);
4506 if (!KindRes)
4507 return TokError(
4508 "Expected the register to be followed by element width suffix");
4509 ElementWidth = KindRes->second;
4510 Reg = RegNum;
4511 Lex(); // Eat the register.
4512 return ParseStatus::Success;
4513 };
4514
4515 SMLoc S = getLoc();
4516 auto LCurly = getTok();
4517 Lex(); // Eat left bracket token.
4518
4519 // Empty matrix list
4520 if (parseOptionalToken(AsmToken::RCurly)) {
4521 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4522 /*RegMask=*/0, S, getLoc(), getContext()));
4523 return ParseStatus::Success;
4524 }
4525
4526 // Try parse {za} alias early
4527 if (getTok().getString().equals_insensitive("za")) {
4528 Lex(); // Eat 'za'
4529
4530 if (parseToken(AsmToken::RCurly, "'}' expected"))
4531 return ParseStatus::Failure;
4532
4533 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4534 /*RegMask=*/0xFF, S, getLoc(), getContext()));
4535 return ParseStatus::Success;
4536 }
4537
4538 SMLoc TileLoc = getLoc();
4539
4540 unsigned FirstReg, ElementWidth;
4541 auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
4542 if (!ParseRes.isSuccess()) {
4543 getLexer().UnLex(LCurly);
4544 return ParseRes;
4545 }
4546
4547 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4548
4549 unsigned PrevReg = FirstReg;
4550
4552 AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
4553
4554 SmallSet<unsigned, 8> SeenRegs;
4555 SeenRegs.insert(FirstReg);
4556
4557 while (parseOptionalToken(AsmToken::Comma)) {
4558 TileLoc = getLoc();
4559 unsigned Reg, NextElementWidth;
4560 ParseRes = ParseMatrixTile(Reg, NextElementWidth);
4561 if (!ParseRes.isSuccess())
4562 return ParseRes;
4563
4564 // Element size must match on all regs in the list.
4565 if (ElementWidth != NextElementWidth)
4566 return Error(TileLoc, "mismatched register size suffix");
4567
4568 if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(PrevReg)))
4569 Warning(TileLoc, "tile list not in ascending order");
4570
4571 if (SeenRegs.contains(Reg))
4572 Warning(TileLoc, "duplicate tile in list");
4573 else {
4574 SeenRegs.insert(Reg);
4575 AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth);
4576 }
4577
4578 PrevReg = Reg;
4579 }
4580
4581 if (parseToken(AsmToken::RCurly, "'}' expected"))
4582 return ParseStatus::Failure;
4583
4584 unsigned RegMask = 0;
4585 for (auto Reg : DRegs)
4586 RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
4587 RI->getEncodingValue(AArch64::ZAD0));
4588 Operands.push_back(
4589 AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext()));
4590
4591 return ParseStatus::Success;
4592}
4593
4594template <RegKind VectorKind>
4595ParseStatus AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
4596 bool ExpectMatch) {
4597 MCAsmParser &Parser = getParser();
4598 if (!getTok().is(AsmToken::LCurly))
4599 return ParseStatus::NoMatch;
4600
4601 // Wrapper around parse function
4602 auto ParseVector = [this](MCRegister &Reg, StringRef &Kind, SMLoc Loc,
4603 bool NoMatchIsError) -> ParseStatus {
4604 auto RegTok = getTok();
4605 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
4606 if (ParseRes.isSuccess()) {
4607 if (parseVectorKind(Kind, VectorKind))
4608 return ParseRes;
4609 llvm_unreachable("Expected a valid vector kind");
4610 }
4611
4612 if (RegTok.is(AsmToken::Identifier) && ParseRes.isNoMatch() &&
4613 RegTok.getString().equals_insensitive("zt0"))
4614 return ParseStatus::NoMatch;
4615
4616 if (RegTok.isNot(AsmToken::Identifier) || ParseRes.isFailure() ||
4617 (ParseRes.isNoMatch() && NoMatchIsError &&
4618 !RegTok.getString().starts_with_insensitive("za")))
4619 return Error(Loc, "vector register expected");
4620
4621 return ParseStatus::NoMatch;
4622 };
4623
4624 int NumRegs = getNumRegsForRegKind(VectorKind);
4625 SMLoc S = getLoc();
4626 auto LCurly = getTok();
4627 Lex(); // Eat left bracket token.
4628
4630 MCRegister FirstReg;
4631 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
4632
4633 // Put back the original left bracket if there was no match, so that
4634 // different types of list-operands can be matched (e.g. SVE, Neon).
4635 if (ParseRes.isNoMatch())
4636 Parser.getLexer().UnLex(LCurly);
4637
4638 if (!ParseRes.isSuccess())
4639 return ParseRes;
4640
4641 int64_t PrevReg = FirstReg;
4642 unsigned Count = 1;
4643
4644 int Stride = 1;
4645 if (parseOptionalToken(AsmToken::Minus)) {
4646 SMLoc Loc = getLoc();
4647 StringRef NextKind;
4648
4650 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4651 if (!ParseRes.isSuccess())
4652 return ParseRes;
4653
4654 // Any Kind suffices must match on all regs in the list.
4655 if (Kind != NextKind)
4656 return Error(Loc, "mismatched register size suffix");
4657
4658 unsigned Space =
4659 (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + NumRegs - PrevReg);
4660
4661 if (Space == 0 || Space > 3)
4662 return Error(Loc, "invalid number of vectors");
4663
4664 Count += Space;
4665 }
4666 else {
4667 bool HasCalculatedStride = false;
4668 while (parseOptionalToken(AsmToken::Comma)) {
4669 SMLoc Loc = getLoc();
4670 StringRef NextKind;
4672 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4673 if (!ParseRes.isSuccess())
4674 return ParseRes;
4675
4676 // Any Kind suffices must match on all regs in the list.
4677 if (Kind != NextKind)
4678 return Error(Loc, "mismatched register size suffix");
4679
4680 unsigned RegVal = getContext().getRegisterInfo()->getEncodingValue(Reg);
4681 unsigned PrevRegVal =
4682 getContext().getRegisterInfo()->getEncodingValue(PrevReg);
4683 if (!HasCalculatedStride) {
4684 Stride = (PrevRegVal < RegVal) ? (RegVal - PrevRegVal)
4685 : (RegVal + NumRegs - PrevRegVal);
4686 HasCalculatedStride = true;
4687 }
4688
4689 // Register must be incremental (with a wraparound at last register).
4690 if (Stride == 0 || RegVal != ((PrevRegVal + Stride) % NumRegs))
4691 return Error(Loc, "registers must have the same sequential stride");
4692
4693 PrevReg = Reg;
4694 ++Count;
4695 }
4696 }
4697
4698 if (parseToken(AsmToken::RCurly, "'}' expected"))
4699 return ParseStatus::Failure;
4700
4701 if (Count > 4)
4702 return Error(S, "invalid number of vectors");
4703
4704 unsigned NumElements = 0;
4705 unsigned ElementWidth = 0;
4706 if (!Kind.empty()) {
4707 if (const auto &VK = parseVectorKind(Kind, VectorKind))
4708 std::tie(NumElements, ElementWidth) = *VK;
4709 }
4710
4711 Operands.push_back(AArch64Operand::CreateVectorList(
4712 FirstReg, Count, Stride, NumElements, ElementWidth, VectorKind, S,
4713 getLoc(), getContext()));
4714
4715 return ParseStatus::Success;
4716}
4717
4718/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
4719bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4720 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
4721 if (!ParseRes.isSuccess())
4722 return true;
4723
4724 return tryParseVectorIndex(Operands).isFailure();
4725}
4726
4727ParseStatus AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4728 SMLoc StartLoc = getLoc();
4729
4730 MCRegister RegNum;
4731 ParseStatus Res = tryParseScalarRegister(RegNum);
4732 if (!Res.isSuccess())
4733 return Res;
4734
4735 if (!parseOptionalToken(AsmToken::Comma)) {
4736 Operands.push_back(AArch64Operand::CreateReg(
4737 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4738 return ParseStatus::Success;
4739 }
4740
4741 parseOptionalToken(AsmToken::Hash);
4742
4743 if (getTok().isNot(AsmToken::Integer))
4744 return Error(getLoc(), "index must be absent or #0");
4745
4746 const MCExpr *ImmVal;
4747 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
4748 cast<MCConstantExpr>(ImmVal)->getValue() != 0)
4749 return Error(getLoc(), "index must be absent or #0");
4750
4751 Operands.push_back(AArch64Operand::CreateReg(
4752 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4753 return ParseStatus::Success;
4754}
4755
4756ParseStatus AArch64AsmParser::tryParseZTOperand(OperandVector &Operands) {
4757 SMLoc StartLoc = getLoc();
4758 const AsmToken &Tok = getTok();
4759 std::string Name = Tok.getString().lower();
4760
4761 unsigned RegNum = matchRegisterNameAlias(Name, RegKind::LookupTable);
4762
4763 if (RegNum == 0)
4764 return ParseStatus::NoMatch;
4765
4766 Operands.push_back(AArch64Operand::CreateReg(
4767 RegNum, RegKind::LookupTable, StartLoc, getLoc(), getContext()));
4768 Lex(); // Eat register.
4769
4770 // Check if register is followed by an index
4771 if (parseOptionalToken(AsmToken::LBrac)) {
4772 Operands.push_back(
4773 AArch64Operand::CreateToken("[", getLoc(), getContext()));
4774 const MCExpr *ImmVal;
4775 if (getParser().parseExpression(ImmVal))
4776 return ParseStatus::NoMatch;
4777 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4778 if (!MCE)
4779 return TokError("immediate value expected for vector index");
4780 Operands.push_back(AArch64Operand::CreateImm(
4781 MCConstantExpr::create(MCE->getValue(), getContext()), StartLoc,
4782 getLoc(), getContext()));
4783 if (parseOptionalToken(AsmToken::Comma))
4784 if (parseOptionalMulOperand(Operands))
4785 return ParseStatus::Failure;
4786 if (parseToken(AsmToken::RBrac, "']' expected"))
4787 return ParseStatus::Failure;
4788 Operands.push_back(
4789 AArch64Operand::CreateToken("]", getLoc(), getContext()));
4790 }
4791 return ParseStatus::Success;
4792}
4793
4794template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
4795ParseStatus AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
4796 SMLoc StartLoc = getLoc();
4797
4798 MCRegister RegNum;
4799 ParseStatus Res = tryParseScalarRegister(RegNum);
4800 if (!Res.isSuccess())
4801 return Res;
4802
4803 // No shift/extend is the default.
4804 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
4805 Operands.push_back(AArch64Operand::CreateReg(
4806 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
4807 return ParseStatus::Success;
4808 }
4809
4810 // Eat the comma
4811 Lex();
4812
4813 // Match the shift
4815 Res = tryParseOptionalShiftExtend(ExtOpnd);
4816 if (!Res.isSuccess())
4817 return Res;
4818
4819 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
4820 Operands.push_back(AArch64Operand::CreateReg(
4821 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
4822 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
4823 Ext->hasShiftExtendAmount()));
4824
4825 return ParseStatus::Success;
4826}
4827
4828bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
4829 MCAsmParser &Parser = getParser();
4830
4831 // Some SVE instructions have a decoration after the immediate, i.e.
4832 // "mul vl". We parse them here and add tokens, which must be present in the
4833 // asm string in the tablegen instruction.
4834 bool NextIsVL =
4835 Parser.getLexer().peekTok().getString().equals_insensitive("vl");
4836 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
4837 if (!getTok().getString().equals_insensitive("mul") ||
4838 !(NextIsVL || NextIsHash))
4839 return true;
4840
4841 Operands.push_back(
4842 AArch64Operand::CreateToken("mul", getLoc(), getContext()));
4843 Lex(); // Eat the "mul"
4844
4845 if (NextIsVL) {
4846 Operands.push_back(
4847 AArch64Operand::CreateToken("vl", getLoc(), getContext()));
4848 Lex(); // Eat the "vl"
4849 return false;
4850 }
4851
4852 if (NextIsHash) {
4853 Lex(); // Eat the #
4854 SMLoc S = getLoc();
4855
4856 // Parse immediate operand.
4857 const MCExpr *ImmVal;
4858 if (!Parser.parseExpression(ImmVal))
4859 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
4860 Operands.push_back(AArch64Operand::CreateImm(
4861 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
4862 getContext()));
4863 return false;
4864 }
4865 }
4866
4867 return Error(getLoc(), "expected 'vl' or '#<imm>'");
4868}
4869
4870bool AArch64AsmParser::parseOptionalVGOperand(OperandVector &Operands,
4871 StringRef &VecGroup) {
4872 MCAsmParser &Parser = getParser();
4873 auto Tok = Parser.getTok();
4874 if (Tok.isNot(AsmToken::Identifier))
4875 return true;
4876
4878 .Case("vgx2", "vgx2")
4879 .Case("vgx4", "vgx4")
4880 .Default("");
4881
4882 if (VG.empty())
4883 return true;
4884
4885 VecGroup = VG;
4886 Parser.Lex(); // Eat vgx[2|4]
4887 return false;
4888}
4889
4890bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
4891 auto Tok = getTok();
4892 if (Tok.isNot(AsmToken::Identifier))
4893 return true;
4894
4895 auto Keyword = Tok.getString();
4897 .Case("sm", "sm")
4898 .Case("za", "za")
4899 .Default(Keyword);
4900 Operands.push_back(
4901 AArch64Operand::CreateToken(Keyword, Tok.getLoc(), getContext()));
4902
4903 Lex();
4904 return false;
4905}
4906
4907/// parseOperand - Parse a arm instruction operand. For now this parses the
4908/// operand regardless of the mnemonic.
4909bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
4910 bool invertCondCode) {
4911 MCAsmParser &Parser = getParser();
4912
4913 ParseStatus ResTy =
4914 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/true);
4915
4916 // Check if the current operand has a custom associated parser, if so, try to
4917 // custom parse the operand, or fallback to the general approach.
4918 if (ResTy.isSuccess())
4919 return false;
4920 // If there wasn't a custom match, try the generic matcher below. Otherwise,
4921 // there was a match, but an error occurred, in which case, just return that
4922 // the operand parsing failed.
4923 if (ResTy.isFailure())
4924 return true;
4925
4926 // Nothing custom, so do general case parsing.
4927 SMLoc S, E;
4928 auto parseOptionalShiftExtend = [&](AsmToken SavedTok) {
4929 if (parseOptionalToken(AsmToken::Comma)) {
4930 ParseStatus Res = tryParseOptionalShiftExtend(Operands);
4931 if (!Res.isNoMatch())
4932 return Res.isFailure();
4933 getLexer().UnLex(SavedTok);
4934 }
4935 return false;
4936 };
4937 switch (getLexer().getKind()) {
4938 default: {
4939 SMLoc S = getLoc();
4940 const MCExpr *Expr;
4941 if (parseSymbolicImmVal(Expr))
4942 return Error(S, "invalid operand");
4943
4944 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4945 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
4946 return parseOptionalShiftExtend(getTok());
4947 }
4948 case AsmToken::LBrac: {
4949 Operands.push_back(
4950 AArch64Operand::CreateToken("[", getLoc(), getContext()));
4951 Lex(); // Eat '['
4952
4953 // There's no comma after a '[', so we can parse the next operand
4954 // immediately.
4955 return parseOperand(Operands, false, false);
4956 }
4957 case AsmToken::LCurly: {
4958 if (!parseNeonVectorList(Operands))
4959 return false;
4960
4961 Operands.push_back(
4962 AArch64Operand::CreateToken("{", getLoc(), getContext()));
4963 Lex(); // Eat '{'
4964
4965 // There's no comma after a '{', so we can parse the next operand
4966 // immediately.
4967 return parseOperand(Operands, false, false);
4968 }
4969 case AsmToken::Identifier: {
4970 // See if this is a "VG" decoration used by SME instructions.
4971 StringRef VecGroup;
4972 if (!parseOptionalVGOperand(Operands, VecGroup)) {
4973 Operands.push_back(
4974 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
4975 return false;
4976 }
4977 // If we're expecting a Condition Code operand, then just parse that.
4978 if (isCondCode)
4979 return parseCondCode(Operands, invertCondCode);
4980
4981 // If it's a register name, parse it.
4982 if (!parseRegister(Operands)) {
4983 // Parse an optional shift/extend modifier.
4984 AsmToken SavedTok = getTok();
4985 if (parseOptionalToken(AsmToken::Comma)) {
4986 // The operand after the register may be a label (e.g. ADR/ADRP). Check
4987 // such cases and don't report an error when <label> happens to match a
4988 // shift/extend modifier.
4989 ParseStatus Res = MatchOperandParserImpl(Operands, Mnemonic,
4990 /*ParseForAllFeatures=*/true);
4991 if (!Res.isNoMatch())
4992 return Res.isFailure();
4993 Res = tryParseOptionalShiftExtend(Operands);
4994 if (!Res.isNoMatch())
4995 return Res.isFailure();
4996 getLexer().UnLex(SavedTok);
4997 }
4998 return false;
4999 }
5000
5001 // See if this is a "mul vl" decoration or "mul #<int>" operand used
5002 // by SVE instructions.
5003 if (!parseOptionalMulOperand(Operands))
5004 return false;
5005
5006 // If this is a two-word mnemonic, parse its special keyword
5007 // operand as an identifier.
5008 if (Mnemonic == "brb" || Mnemonic == "smstart" || Mnemonic == "smstop" ||
5009 Mnemonic == "gcsb")
5010 return parseKeywordOperand(Operands);
5011
5012 // This was not a register so parse other operands that start with an
5013 // identifier (like labels) as expressions and create them as immediates.
5014 const MCExpr *IdVal;
5015 S = getLoc();
5016 if (getParser().parseExpression(IdVal))
5017 return true;
5018 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
5019 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
5020 return false;
5021 }
5022 case AsmToken::Integer:
5023 case AsmToken::Real:
5024 case AsmToken::Hash: {
5025 // #42 -> immediate.
5026 S = getLoc();
5027
5028 parseOptionalToken(AsmToken::Hash);
5029
5030 // Parse a negative sign
5031 bool isNegative = false;
5032 if (getTok().is(AsmToken::Minus)) {
5033 isNegative = true;
5034 // We need to consume this token only when we have a Real, otherwise
5035 // we let parseSymbolicImmVal take care of it
5036 if (Parser.getLexer().peekTok().is(AsmToken::Real))
5037 Lex();
5038 }
5039
5040 // The only Real that should come through here is a literal #0.0 for
5041 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
5042 // so convert the value.
5043 const AsmToken &Tok = getTok();
5044 if (Tok.is(AsmToken::Real)) {
5045 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
5046 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
5047 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
5048 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
5049 Mnemonic != "fcmlt" && Mnemonic != "fcmne")
5050 return TokError("unexpected floating point literal");
5051 else if (IntVal != 0 || isNegative)
5052 return TokError("expected floating-point constant #0.0");
5053 Lex(); // Eat the token.
5054
5055 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
5056 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
5057 return false;
5058 }
5059
5060 const MCExpr *ImmVal;
5061 if (parseSymbolicImmVal(ImmVal))
5062 return true;
5063
5064 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
5065 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
5066
5067 // Parse an optional shift/extend modifier.
5068 return parseOptionalShiftExtend(Tok);
5069 }
5070 case AsmToken::Equal: {
5071 SMLoc Loc = getLoc();
5072 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
5073 return TokError("unexpected token in operand");
5074 Lex(); // Eat '='
5075 const MCExpr *SubExprVal;
5076 if (getParser().parseExpression(SubExprVal))
5077 return true;
5078
5079 if (Operands.size() < 2 ||
5080 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
5081 return Error(Loc, "Only valid when first operand is register");
5082
5083 bool IsXReg =
5084 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5085 Operands[1]->getReg());
5086
5087 MCContext& Ctx = getContext();
5088 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
5089 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
5090 if (isa<MCConstantExpr>(SubExprVal)) {
5091 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
5092 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
5093 while (Imm > 0xFFFF && llvm::countr_zero(Imm) >= 16) {
5094 ShiftAmt += 16;
5095 Imm >>= 16;
5096 }
5097 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
5098 Operands[0] = AArch64Operand::CreateToken("movz", Loc, Ctx);
5099 Operands.push_back(AArch64Operand::CreateImm(
5100 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
5101 if (ShiftAmt)
5102 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
5103 ShiftAmt, true, S, E, Ctx));
5104 return false;
5105 }
5106 APInt Simm = APInt(64, Imm << ShiftAmt);
5107 // check if the immediate is an unsigned or signed 32-bit int for W regs
5108 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
5109 return Error(Loc, "Immediate too large for register");
5110 }
5111 // If it is a label or an imm that cannot fit in a movz, put it into CP.
5112 const MCExpr *CPLoc =
5113 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
5114 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
5115 return false;
5116 }
5117 }
5118}
5119
5120bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
5121 const MCExpr *Expr = nullptr;
5122 SMLoc L = getLoc();
5123 if (check(getParser().parseExpression(Expr), L, "expected expression"))
5124 return true;
5125 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
5126 if (check(!Value, L, "expected constant expression"))
5127 return true;
5128 Out = Value->getValue();
5129 return false;
5130}
5131
5132bool AArch64AsmParser::parseComma() {
5133 if (check(getTok().isNot(AsmToken::Comma), getLoc(), "expected comma"))
5134 return true;
5135 // Eat the comma
5136 Lex();
5137 return false;
5138}
5139
5140bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
5141 unsigned First, unsigned Last) {
5143 SMLoc Start, End;
5144 if (check(parseRegister(Reg, Start, End), getLoc(), "expected register"))
5145 return true;
5146
5147 // Special handling for FP and LR; they aren't linearly after x28 in
5148 // the registers enum.
5149 unsigned RangeEnd = Last;
5150 if (Base == AArch64::X0) {
5151 if (Last == AArch64::FP) {
5152 RangeEnd = AArch64::X28;
5153 if (Reg == AArch64::FP) {
5154 Out = 29;
5155 return false;
5156 }
5157 }
5158 if (Last == AArch64::LR) {
5159 RangeEnd = AArch64::X28;
5160 if (Reg == AArch64::FP) {
5161 Out = 29;
5162 return false;
5163 } else if (Reg == AArch64::LR) {
5164 Out = 30;
5165 return false;
5166 }
5167 }
5168 }
5169
5170 if (check(Reg < First || Reg > RangeEnd, Start,
5171 Twine("expected register in range ") +
5174 return true;
5175 Out = Reg - Base;
5176 return false;
5177}
5178
5179bool AArch64AsmParser::areEqualRegs(const MCParsedAsmOperand &Op1,
5180 const MCParsedAsmOperand &Op2) const {
5181 auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
5182 auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
5183
5184 if (AOp1.isVectorList() && AOp2.isVectorList())
5185 return AOp1.getVectorListCount() == AOp2.getVectorListCount() &&
5186 AOp1.getVectorListStart() == AOp2.getVectorListStart() &&
5187 AOp1.getVectorListStride() == AOp2.getVectorListStride();
5188
5189 if (!AOp1.isReg() || !AOp2.isReg())
5190 return false;
5191
5192 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
5193 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
5194 return MCTargetAsmParser::areEqualRegs(Op1, Op2);
5195
5196 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
5197 "Testing equality of non-scalar registers not supported");
5198
5199 // Check if a registers match their sub/super register classes.
5200 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
5201 return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
5202 if (AOp1.getRegEqualityTy() == EqualsSubReg)
5203 return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
5204 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
5205 return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
5206 if (AOp2.getRegEqualityTy() == EqualsSubReg)
5207 return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
5208
5209 return false;
5210}
5211
5212/// Parse an AArch64 instruction mnemonic followed by its operands.
5213bool AArch64AsmParser::parseInstruction(ParseInstructionInfo &Info,
5214 StringRef Name, SMLoc NameLoc,
5217 .Case("beq", "b.eq")
5218 .Case("bne", "b.ne")
5219 .Case("bhs", "b.hs")
5220 .Case("bcs", "b.cs")
5221 .Case("blo", "b.lo")
5222 .Case("bcc", "b.cc")
5223 .Case("bmi", "b.mi")
5224 .Case("bpl", "b.pl")
5225 .Case("bvs", "b.vs")
5226 .Case("bvc", "b.vc")
5227 .Case("bhi", "b.hi")
5228 .Case("bls", "b.ls")
5229 .Case("bge", "b.ge")
5230 .Case("blt", "b.lt")
5231 .Case("bgt", "b.gt")
5232 .Case("ble", "b.le")
5233 .Case("bal", "b.al")
5234 .Case("bnv", "b.nv")
5235 .Default(Name);
5236
5237 // First check for the AArch64-specific .req directive.
5238 if (getTok().is(AsmToken::Identifier) &&
5239 getTok().getIdentifier().lower() == ".req") {
5240 parseDirectiveReq(Name, NameLoc);
5241 // We always return 'error' for this, as we're done with this
5242 // statement and don't need to match the 'instruction."
5243 return true;
5244 }
5245
5246 // Create the leading tokens for the mnemonic, split by '.' characters.
5247 size_t Start = 0, Next = Name.find('.');
5248 StringRef Head = Name.slice(Start, Next);
5249
5250 // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
5251 // the SYS instruction.
5252 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
5253 Head == "cfp" || Head == "dvp" || Head == "cpp" || Head == "cosp")
5254 return parseSysAlias(Head, NameLoc, Operands);
5255
5256 // TLBIP instructions are aliases for the SYSP instruction.
5257 if (Head == "tlbip")
5258 return parseSyspAlias(Head, NameLoc, Operands);
5259
5260 Operands.push_back(AArch64Operand::CreateToken(Head, NameLoc, getContext()));
5261 Mnemonic = Head;
5262
5263 // Handle condition codes for a branch mnemonic
5264 if ((Head == "b" || Head == "bc") && Next != StringRef::npos) {
5265 Start = Next;
5266 Next = Name.find('.', Start + 1);
5267 Head = Name.slice(Start + 1, Next);
5268
5269 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
5270 (Head.data() - Name.data()));
5271 std::string Suggestion;
5272 AArch64CC::CondCode CC = parseCondCodeString(Head, Suggestion);
5273 if (CC == AArch64CC::Invalid) {
5274 std::string Msg = "invalid condition code";
5275 if (!Suggestion.empty())
5276 Msg += ", did you mean " + Suggestion + "?";
5277 return Error(SuffixLoc, Msg);
5278 }
5279 Operands.push_back(AArch64Operand::CreateToken(".", SuffixLoc, getContext(),
5280 /*IsSuffix=*/true));
5281 Operands.push_back(
5282 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
5283 }
5284
5285 // Add the remaining tokens in the mnemonic.
5286 while (Next != StringRef::npos) {
5287 Start = Next;
5288 Next = Name.find('.', Start + 1);
5289 Head = Name.slice(Start, Next);
5290 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
5291 (Head.data() - Name.data()) + 1);
5292 Operands.push_back(AArch64Operand::CreateToken(
5293 Head, SuffixLoc, getContext(), /*IsSuffix=*/true));
5294 }
5295
5296 // Conditional compare instructions have a Condition Code operand, which needs
5297 // to be parsed and an immediate operand created.
5298 bool condCodeFourthOperand =
5299 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
5300 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
5301 Head == "csinc" || Head == "csinv" || Head == "csneg");
5302
5303 // These instructions are aliases to some of the conditional select
5304 // instructions. However, the condition code is inverted in the aliased
5305 // instruction.
5306 //
5307 // FIXME: Is this the correct way to handle these? Or should the parser
5308 // generate the aliased instructions directly?
5309 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
5310 bool condCodeThirdOperand =
5311 (Head == "cinc" || Head == "cinv" || Head == "cneg");
5312
5313 // Read the remaining operands.
5314 if (getLexer().isNot(AsmToken::EndOfStatement)) {
5315
5316 unsigned N = 1;
5317 do {
5318 // Parse and remember the operand.
5319 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
5320 (N == 3 && condCodeThirdOperand) ||
5321 (N == 2 && condCodeSecondOperand),
5322 condCodeSecondOperand || condCodeThirdOperand)) {
5323 return true;
5324 }
5325
5326 // After successfully parsing some operands there are three special cases
5327 // to consider (i.e. notional operands not separated by commas). Two are
5328 // due to memory specifiers:
5329 // + An RBrac will end an address for load/store/prefetch
5330 // + An '!' will indicate a pre-indexed operation.
5331 //
5332 // And a further case is '}', which ends a group of tokens specifying the
5333 // SME accumulator array 'ZA' or tile vector, i.e.
5334 //
5335 // '{ ZA }' or '{ <ZAt><HV>.<BHSDQ>[<Wv>, #<imm>] }'
5336 //
5337 // It's someone else's responsibility to make sure these tokens are sane
5338 // in the given context!
5339
5340 if (parseOptionalToken(AsmToken::RBrac))
5341 Operands.push_back(
5342 AArch64Operand::CreateToken("]", getLoc(), getContext()));
5343 if (parseOptionalToken(AsmToken::Exclaim))
5344 Operands.push_back(
5345 AArch64Operand::CreateToken("!", getLoc(), getContext()));
5346 if (parseOptionalToken(AsmToken::RCurly))
5347 Operands.push_back(
5348 AArch64Operand::CreateToken("}", getLoc(), getContext()));
5349
5350 ++N;
5351 } while (parseOptionalToken(AsmToken::Comma));
5352 }
5353
5354 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
5355 return true;
5356
5357 return false;
5358}
5359
5360static inline bool isMatchingOrAlias(MCRegister ZReg, MCRegister Reg) {
5361 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
5362 return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
5363 (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
5364 (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
5365 (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
5366 (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
5367 (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
5368}
5369
5370// FIXME: This entire function is a giant hack to provide us with decent
5371// operand range validation/diagnostics until TableGen/MC can be extended
5372// to support autogeneration of this kind of validation.
5373bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
5375 const MCRegisterInfo *RI = getContext().getRegisterInfo();
5376 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
5377
5378 // A prefix only applies to the instruction following it. Here we extract
5379 // prefix information for the next instruction before validating the current
5380 // one so that in the case of failure we don't erronously continue using the
5381 // current prefix.
5382 PrefixInfo Prefix = NextPrefix;
5383 NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
5384
5385 // Before validating the instruction in isolation we run through the rules
5386 // applicable when it follows a prefix instruction.
5387 // NOTE: brk & hlt can be prefixed but require no additional validation.
5388 if (Prefix.isActive() &&
5389 (Inst.getOpcode() != AArch64::BRK) &&
5390 (Inst.getOpcode() != AArch64::HLT)) {
5391
5392 // Prefixed intructions must have a destructive operand.
5395 return Error(IDLoc, "instruction is unpredictable when following a"
5396 " movprfx, suggest replacing movprfx with mov");
5397
5398 // Destination operands must match.
5399 if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
5400 return Error(Loc[0], "instruction is unpredictable when following a"
5401 " movprfx writing to a different destination");
5402
5403 // Destination operand must not be used in any other location.
5404 for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
5405 if (Inst.getOperand(i).isReg() &&
5406 (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
5407 isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
5408 return Error(Loc[0], "instruction is unpredictable when following a"
5409 " movprfx and destination also used as non-destructive"
5410 " source");
5411 }
5412
5413 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
5414 if (Prefix.isPredicated()) {
5415 int PgIdx = -1;
5416
5417 // Find the instructions general predicate.
5418 for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
5419 if (Inst.getOperand(i).isReg() &&
5420 PPRRegClass.contains(Inst.getOperand(i).getReg())) {
5421 PgIdx = i;
5422 break;
5423 }
5424
5425 // Instruction must be predicated if the movprfx is predicated.
5426 if (PgIdx == -1 ||
5428 return Error(IDLoc, "instruction is unpredictable when following a"
5429 " predicated movprfx, suggest using unpredicated movprfx");
5430
5431 // Instruction must use same general predicate as the movprfx.
5432 if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
5433 return Error(IDLoc, "instruction is unpredictable when following a"
5434 " predicated movprfx using a different general predicate");
5435
5436 // Instruction element type must match the movprfx.
5437 if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
5438 return Error(IDLoc, "instruction is unpredictable when following a"
5439 " predicated movprfx with a different element size");
5440 }
5441 }
5442
5443 // On ARM64EC, only valid registers may be used. Warn against using
5444 // explicitly disallowed registers.
5445 if (IsWindowsArm64EC) {
5446 for (unsigned i = 0; i < Inst.getNumOperands(); ++i) {
5447 if (Inst.getOperand(i).isReg()) {
5448 MCRegister Reg = Inst.getOperand(i).getReg();
5449 // At this point, vector registers are matched to their
5450 // appropriately sized alias.
5451 if ((Reg == AArch64::W13 || Reg == AArch64::X13) ||
5452 (Reg == AArch64::W14 || Reg == AArch64::X14) ||
5453 (Reg == AArch64::W23 || Reg == AArch64::X23) ||
5454 (Reg == AArch64::W24 || Reg == AArch64::X24) ||
5455 (Reg == AArch64::W28 || Reg == AArch64::X28) ||
5456 (Reg >= AArch64::Q16 && Reg <= AArch64::Q31) ||
5457 (Reg >= AArch64::D16 && Reg <= AArch64::D31) ||
5458 (Reg >= AArch64::S16 && Reg <= AArch64::S31) ||
5459 (Reg >= AArch64::H16 && Reg <= AArch64::H31) ||
5460 (Reg >= AArch64::B16 && Reg <= AArch64::B31)) {
5461 Warning(IDLoc, "register " + Twine(RI->getName(Reg)) +
5462 " is disallowed on ARM64EC.");
5463 }
5464 }
5465 }
5466 }
5467
5468 // Check for indexed addressing modes w/ the base register being the
5469 // same as a destination/source register or pair load where
5470 // the Rt == Rt2. All of those are undefined behaviour.
5471 switch (Inst.getOpcode()) {
5472 case AArch64::LDPSWpre:
5473 case AArch64::LDPWpost:
5474 case AArch64::LDPWpre:
5475 case AArch64::LDPXpost:
5476 case AArch64::LDPXpre: {
5477 MCRegister Rt = Inst.getOperand(1).getReg();
5478 MCRegister Rt2 = Inst.getOperand(2).getReg();
5479 MCRegister Rn = Inst.getOperand(3).getReg();
5480 if (RI->isSubRegisterEq(Rn, Rt))
5481 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
5482 "is also a destination");
5483 if (RI->isSubRegisterEq(Rn, Rt2))
5484 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
5485 "is also a destination");
5486 [[fallthrough]];
5487 }
5488 case AArch64::LDR_ZA:
5489 case AArch64::STR_ZA: {
5490 if (Inst.getOperand(2).isImm() && Inst.getOperand(4).isImm() &&
5491 Inst.getOperand(2).getImm() != Inst.getOperand(4).getImm())
5492 return Error(Loc[1],
5493 "unpredictable instruction, immediate and offset mismatch.");
5494 break;
5495 }
5496 case AArch64::LDPDi:
5497 case AArch64::LDPQi:
5498 case AArch64::LDPSi:
5499 case AArch64::LDPSWi:
5500 case AArch64::LDPWi:
5501 case AArch64::LDPXi: {
5502 MCRegister Rt = Inst.getOperand(0).getReg();
5503 MCRegister Rt2 = Inst.getOperand(1).getReg();
5504 if (Rt == Rt2)
5505 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5506 break;
5507 }
5508 case AArch64::LDPDpost:
5509 case AArch64::LDPDpre:
5510 case AArch64::LDPQpost:
5511 case AArch64::LDPQpre:
5512 case AArch64::LDPSpost:
5513 case AArch64::LDPSpre:
5514 case AArch64::LDPSWpost: {
5515 MCRegister Rt = Inst.getOperand(1).getReg();
5516 MCRegister Rt2 = Inst.getOperand(2).getReg();
5517 if (Rt == Rt2)
5518 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5519 break;
5520 }
5521 case AArch64::STPDpost:
5522 case AArch64::STPDpre:
5523 case AArch64::STPQpost:
5524 case AArch64::STPQpre:
5525 case AArch64::STPSpost:
5526 case AArch64::STPSpre:
5527 case AArch64::STPWpost:
5528 case AArch64::STPWpre:
5529 case AArch64::STPXpost:
5530 case AArch64::STPXpre: {
5531 MCRegister Rt = Inst.getOperand(1).getReg();
5532 MCRegister Rt2 = Inst.getOperand(2).getReg();
5533 MCRegister Rn = Inst.getOperand(3).getReg();
5534 if (RI->isSubRegisterEq(Rn, Rt))
5535 return Error(Loc[0], "unpredictable STP instruction, writeback base "
5536 "is also a source");
5537 if (RI->isSubRegisterEq(Rn, Rt2))
5538 return Error(Loc[1], "unpredictable STP instruction, writeback base "
5539 "is also a source");
5540 break;
5541 }
5542 case AArch64::LDRBBpre:
5543 case AArch64::LDRBpre:
5544 case AArch64::LDRHHpre:
5545 case AArch64::LDRHpre:
5546 case AArch64::LDRSBWpre:
5547 case AArch64::LDRSBXpre:
5548 case AArch64::LDRSHWpre:
5549 case AArch64::LDRSHXpre:
5550 case AArch64::LDRSWpre:
5551 case AArch64::LDRWpre:
5552 case AArch64::LDRXpre:
5553 case AArch64::LDRBBpost:
5554 case AArch64::LDRBpost:
5555 case AArch64::LDRHHpost:
5556 case AArch64::LDRHpost:
5557 case AArch64::LDRSBWpost:
5558 case AArch64::LDRSBXpost:
5559 case AArch64::LDRSHWpost:
5560 case AArch64::LDRSHXpost:
5561 case AArch64::LDRSWpost:
5562 case AArch64::LDRWpost:
5563 case AArch64::LDRXpost: {
5564 MCRegister Rt = Inst.getOperand(1).getReg();
5565 MCRegister Rn = Inst.getOperand(2).getReg();
5566 if (RI->isSubRegisterEq(Rn, Rt))
5567 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
5568 "is also a source");
5569 break;
5570 }
5571 case AArch64::STRBBpost:
5572 case AArch64::STRBpost:
5573 case AArch64::STRHHpost:
5574 case AArch64::STRHpost:
5575 case AArch64::STRWpost:
5576 case AArch64::STRXpost:
5577 case AArch64::STRBBpre:
5578 case AArch64::STRBpre:
5579 case AArch64::STRHHpre:
5580 case AArch64::STRHpre:
5581 case AArch64::STRWpre:
5582 case AArch64::STRXpre: {
5583 MCRegister Rt = Inst.getOperand(1).getReg();
5584 MCRegister Rn = Inst.getOperand(2).getReg();
5585 if (RI->isSubRegisterEq(Rn, Rt))
5586 return Error(Loc[0], "unpredictable STR instruction, writeback base "
5587 "is also a source");
5588 break;
5589 }
5590 case AArch64::STXRB:
5591 case AArch64::STXRH:
5592 case AArch64::STXRW:
5593 case AArch64::STXRX:
5594 case AArch64::STLXRB:
5595 case AArch64::STLXRH:
5596 case AArch64::STLXRW:
5597 case AArch64::STLXRX: {
5598 MCRegister Rs = Inst.getOperand(0).getReg();
5599 MCRegister Rt = Inst.getOperand(1).getReg();
5600 MCRegister Rn = Inst.getOperand(2).getReg();
5601 if (RI->isSubRegisterEq(Rt, Rs) ||
5602 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5603 return Error(Loc[0],
5604 "unpredictable STXR instruction, status is also a source");
5605 break;
5606 }
5607 case AArch64::STXPW:
5608 case AArch64::STXPX:
5609 case AArch64::STLXPW:
5610 case AArch64::STLXPX: {
5611 MCRegister Rs = Inst.getOperand(0).getReg();
5612 MCRegister Rt1 = Inst.getOperand(1).getReg();
5613 MCRegister Rt2 = Inst.getOperand(2).getReg();
5614 MCRegister Rn = Inst.getOperand(3).getReg();
5615 if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
5616 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5617 return Error(Loc[0],
5618 "unpredictable STXP instruction, status is also a source");
5619 break;
5620 }
5621 case AArch64::LDRABwriteback:
5622 case AArch64::LDRAAwriteback: {
5623 MCRegister Xt = Inst.getOperand(0).getReg();
5624 MCRegister Xn = Inst.getOperand(1).getReg();
5625 if (Xt == Xn)
5626 return Error(Loc[0],
5627 "unpredictable LDRA instruction, writeback base"
5628 " is also a destination");
5629 break;
5630 }
5631 }
5632
5633 // Check v8.8-A memops instructions.
5634 switch (Inst.getOpcode()) {
5635 case AArch64::CPYFP:
5636 case AArch64::CPYFPWN:
5637 case AArch64::CPYFPRN:
5638 case AArch64::CPYFPN:
5639 case AArch64::CPYFPWT:
5640 case AArch64::CPYFPWTWN:
5641 case AArch64::CPYFPWTRN:
5642 case AArch64::CPYFPWTN:
5643 case AArch64::CPYFPRT:
5644 case AArch64::CPYFPRTWN:
5645 case AArch64::CPYFPRTRN:
5646 case AArch64::CPYFPRTN:
5647 case AArch64::CPYFPT:
5648 case AArch64::CPYFPTWN:
5649 case AArch64::CPYFPTRN:
5650 case AArch64::CPYFPTN:
5651 case AArch64::CPYFM:
5652 case AArch64::CPYFMWN:
5653 case AArch64::CPYFMRN:
5654 case AArch64::CPYFMN:
5655 case AArch64::CPYFMWT:
5656 case AArch64::CPYFMWTWN:
5657 case AArch64::CPYFMWTRN:
5658 case AArch64::CPYFMWTN:
5659 case AArch64::CPYFMRT:
5660 case AArch64::CPYFMRTWN:
5661 case AArch64::CPYFMRTRN:
5662 case AArch64::CPYFMRTN:
5663 case AArch64::CPYFMT:
5664 case AArch64::CPYFMTWN:
5665 case AArch64::CPYFMTRN:
5666 case AArch64::CPYFMTN:
5667 case AArch64::CPYFE:
5668 case AArch64::CPYFEWN:
5669 case AArch64::CPYFERN:
5670 case AArch64::CPYFEN:
5671 case AArch64::CPYFEWT:
5672 case AArch64::CPYFEWTWN:
5673 case AArch64::CPYFEWTRN:
5674 case AArch64::CPYFEWTN:
5675 case AArch64::CPYFERT:
5676 case AArch64::CPYFERTWN:
5677 case AArch64::CPYFERTRN:
5678 case AArch64::CPYFERTN:
5679 case AArch64::CPYFET:
5680 case AArch64::CPYFETWN:
5681 case AArch64::CPYFETRN:
5682 case AArch64::CPYFETN:
5683 case AArch64::CPYP:
5684 case AArch64::CPYPWN:
5685 case AArch64::CPYPRN:
5686 case AArch64::CPYPN:
5687 case AArch64::CPYPWT:
5688 case AArch64::CPYPWTWN:
5689 case AArch64::CPYPWTRN:
5690 case AArch64::CPYPWTN:
5691 case AArch64::CPYPRT:
5692 case AArch64::CPYPRTWN:
5693 case AArch64::CPYPRTRN:
5694 case AArch64::CPYPRTN:
5695 case AArch64::CPYPT:
5696 case AArch64::CPYPTWN:
5697 case AArch64::CPYPTRN:
5698 case AArch64::CPYPTN:
5699 case AArch64::CPYM:
5700 case AArch64::CPYMWN:
5701 case AArch64::CPYMRN:
5702 case AArch64::CPYMN:
5703 case AArch64::CPYMWT:
5704 case AArch64::CPYMWTWN:
5705 case AArch64::CPYMWTRN:
5706 case AArch64::CPYMWTN:
5707 case AArch64::CPYMRT:
5708 case AArch64::CPYMRTWN:
5709 case AArch64::CPYMRTRN:
5710 case AArch64::CPYMRTN:
5711 case AArch64::CPYMT:
5712 case AArch64::CPYMTWN:
5713 case AArch64::CPYMTRN:
5714 case AArch64::CPYMTN:
5715 case AArch64::CPYE:
5716 case AArch64::CPYEWN:
5717 case AArch64::CPYERN:
5718 case AArch64::CPYEN:
5719 case AArch64::CPYEWT:
5720 case AArch64::CPYEWTWN:
5721 case AArch64::CPYEWTRN:
5722 case AArch64::CPYEWTN:
5723 case AArch64::CPYERT:
5724 case AArch64::CPYERTWN:
5725 case AArch64::CPYERTRN:
5726 case AArch64::CPYERTN:
5727 case AArch64::CPYET:
5728 case AArch64::CPYETWN:
5729 case AArch64::CPYETRN:
5730 case AArch64::CPYETN: {
5731 MCRegister Xd_wb = Inst.getOperand(0).getReg();
5732 MCRegister Xs_wb = Inst.getOperand(1).getReg();
5733 MCRegister Xn_wb = Inst.getOperand(2).getReg();
5734 MCRegister Xd = Inst.getOperand(3).getReg();
5735 MCRegister Xs = Inst.getOperand(4).getReg();
5736 MCRegister Xn = Inst.getOperand(5).getReg();
5737 if (Xd_wb != Xd)
5738 return Error(Loc[0],
5739 "invalid CPY instruction, Xd_wb and Xd do not match");
5740 if (Xs_wb != Xs)
5741 return Error(Loc[0],
5742 "invalid CPY instruction, Xs_wb and Xs do not match");
5743 if (Xn_wb != Xn)
5744 return Error(Loc[0],
5745 "invalid CPY instruction, Xn_wb and Xn do not match");
5746 if (Xd == Xs)
5747 return Error(Loc[0], "invalid CPY instruction, destination and source"
5748 " registers are the same");
5749 if (Xd == Xn)
5750 return Error(Loc[0], "invalid CPY instruction, destination and size"
5751 " registers are the same");
5752 if (Xs == Xn)
5753 return Error(Loc[0], "invalid CPY instruction, source and size"
5754 " registers are the same");
5755 break;
5756 }
5757 case AArch64::SETP:
5758 case AArch64::SETPT:
5759 case AArch64::SETPN:
5760 case AArch64::SETPTN:
5761 case AArch64::SETM:
5762 case AArch64::SETMT:
5763 case AArch64::SETMN:
5764 case AArch64::SETMTN:
5765 case AArch64::SETE:
5766 case AArch64::SETET:
5767 case AArch64::SETEN:
5768 case AArch64::SETETN:
5769 case AArch64::SETGP:
5770 case AArch64::SETGPT:
5771 case AArch64::SETGPN:
5772 case AArch64::SETGPTN:
5773 case AArch64::SETGM:
5774 case AArch64::SETGMT:
5775 case AArch64::SETGMN:
5776 case AArch64::SETGMTN:
5777 case AArch64::MOPSSETGE:
5778 case AArch64::MOPSSETGET:
5779 case AArch64::MOPSSETGEN:
5780 case AArch64::MOPSSETGETN: {
5781 MCRegister Xd_wb = Inst.getOperand(0).getReg();
5782 MCRegister Xn_wb = Inst.getOperand(1).getReg();
5783 MCRegister Xd = Inst.getOperand(2).getReg();
5784 MCRegister Xn = Inst.getOperand(3).getReg();
5785 MCRegister Xm = Inst.getOperand(4).getReg();
5786 if (Xd_wb != Xd)
5787 return Error(Loc[0],
5788 "invalid SET instruction, Xd_wb and Xd do not match");
5789 if (Xn_wb != Xn)
5790 return Error(Loc[0],
5791 "invalid SET instruction, Xn_wb and Xn do not match");
5792 if (Xd == Xn)
5793 return Error(Loc[0], "invalid SET instruction, destination and size"
5794 " registers are the same");
5795 if (Xd == Xm)
5796 return Error(Loc[0], "invalid SET instruction, destination and source"
5797 " registers are the same");
5798 if (Xn == Xm)
5799 return Error(Loc[0], "invalid SET instruction, source and size"
5800 " registers are the same");
5801 break;
5802 }
5803 }
5804
5805 // Now check immediate ranges. Separate from the above as there is overlap
5806 // in the instructions being checked and this keeps the nested conditionals
5807 // to a minimum.
5808 switch (Inst.getOpcode()) {
5809 case AArch64::ADDSWri:
5810 case AArch64::ADDSXri:
5811 case AArch64::ADDWri:
5812 case AArch64::ADDXri:
5813 case AArch64::SUBSWri:
5814 case AArch64::SUBSXri:
5815 case AArch64::SUBWri:
5816 case AArch64::SUBXri: {
5817 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
5818 // some slight duplication here.
5819 if (Inst.getOperand(2).isExpr()) {
5820 const MCExpr *Expr = Inst.getOperand(2).getExpr();
5821 AArch64MCExpr::VariantKind ELFRefKind;
5822 MCSymbolRefExpr::VariantKind DarwinRefKind;
5823 int64_t Addend;
5824 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
5825
5826 // Only allow these with ADDXri.
5827 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
5828 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
5829 Inst.getOpcode() == AArch64::ADDXri)
5830 return false;
5831
5832 // Only allow these with ADDXri/ADDWri
5833 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
5834 ELFRefKind == AArch64MCExpr::VK_GOT_AUTH_LO12 ||
5835 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
5836 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
5837 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
5838 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
5839 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
5840 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
5841 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
5843 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
5844 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
5845 (Inst.getOpcode() == AArch64::ADDXri ||
5846 Inst.getOpcode() == AArch64::ADDWri))
5847 return false;
5848
5849 // Don't allow symbol refs in the immediate field otherwise
5850 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
5851 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
5852 // 'cmp w0, 'borked')
5853 return Error(Loc.back(), "invalid immediate expression");
5854 }
5855 // We don't validate more complex expressions here
5856 }
5857 return false;
5858 }
5859 default:
5860 return false;
5861 }
5862}
5863
5865 const FeatureBitset &FBS,
5866 unsigned VariantID = 0);
5867
5868bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
5871 switch (ErrCode) {
5872 case Match_InvalidTiedOperand: {
5873 auto &Op = static_cast<const AArch64Operand &>(*Operands[ErrorInfo]);
5874 if (Op.isVectorList())
5875 return Error(Loc, "operand must match destination register list");
5876
5877 assert(Op.isReg() && "Unexpected operand type");
5878 switch (Op.getRegEqualityTy()) {
5879 case RegConstraintEqualityTy::EqualsSubReg:
5880 return Error(Loc, "operand must be 64-bit form of destination register");
5881 case RegConstraintEqualityTy::EqualsSuperReg:
5882 return Error(Loc, "operand must be 32-bit form of destination register");
5883 case RegConstraintEqualityTy::EqualsReg:
5884 return Error(Loc, "operand must match destination register");
5885 }
5886 llvm_unreachable("Unknown RegConstraintEqualityTy");
5887 }
5888 case Match_MissingFeature:
5889 return Error(Loc,
5890 "instruction requires a CPU feature not currently enabled");
5891 case Match_InvalidOperand:
5892 return Error(Loc, "invalid operand for instruction");
5893 case Match_InvalidSuffix:
5894 return Error(Loc, "invalid type suffix for instruction");
5895 case Match_InvalidCondCode:
5896 return Error(Loc, "expected AArch64 condition code");
5897 case Match_AddSubRegExtendSmall:
5898 return Error(Loc,
5899 "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
5900 case Match_AddSubRegExtendLarge:
5901 return Error(Loc,
5902 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
5903 case Match_AddSubSecondSource:
5904 return Error(Loc,
5905 "expected compatible register, symbol or integer in range [0, 4095]");
5906 case Match_LogicalSecondSource:
5907 return Error(Loc, "expected compatible register or logical immediate");
5908 case Match_InvalidMovImm32Shift:
5909 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
5910 case Match_InvalidMovImm64Shift:
5911 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
5912 case Match_AddSubRegShift32:
5913 return Error(Loc,
5914 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
5915 case Match_AddSubRegShift64:
5916 return Error(Loc,
5917 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
5918 case Match_InvalidFPImm:
5919 return Error(Loc,
5920 "expected compatible register or floating-point constant");
5921 case Match_InvalidMemoryIndexedSImm6:
5922 return Error(Loc, "index must be an integer in range [-32, 31].");
5923 case Match_InvalidMemoryIndexedSImm5:
5924 return Error(Loc, "index must be an integer in range [-16, 15].");
5925 case Match_InvalidMemoryIndexed1SImm4:
5926 return Error(Loc, "index must be an integer in range [-8, 7].");
5927 case Match_InvalidMemoryIndexed2SImm4:
5928 return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
5929 case Match_InvalidMemoryIndexed3SImm4:
5930 return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
5931 case Match_InvalidMemoryIndexed4SImm4:
5932 return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
5933 case Match_InvalidMemoryIndexed16SImm4:
5934 return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
5935 case Match_InvalidMemoryIndexed32SImm4:
5936 return Error(Loc, "index must be a multiple of 32 in range [-256, 224].");
5937 case Match_InvalidMemoryIndexed1SImm6:
5938 return Error(Loc, "index must be an integer in range [-32, 31].");
5939 case Match_InvalidMemoryIndexedSImm8:
5940 return Error(Loc, "index must be an integer in range [-128, 127].");
5941 case Match_InvalidMemoryIndexedSImm9:
5942 return Error(Loc, "index must be an integer in range [-256, 255].");
5943 case Match_InvalidMemoryIndexed16SImm9:
5944 return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
5945 case Match_InvalidMemoryIndexed8SImm10:
5946 return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
5947 case Match_InvalidMemoryIndexed4SImm7:
5948 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
5949 case Match_InvalidMemoryIndexed8SImm7:
5950 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
5951 case Match_InvalidMemoryIndexed16SImm7:
5952 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
5953 case Match_InvalidMemoryIndexed8UImm5:
5954 return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
5955 case Match_InvalidMemoryIndexed8UImm3:
5956 return Error(Loc, "index must be a multiple of 8 in range [0, 56].");
5957 case Match_InvalidMemoryIndexed4UImm5:
5958 return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
5959 case Match_InvalidMemoryIndexed2UImm5:
5960 return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
5961 case Match_InvalidMemoryIndexed8UImm6:
5962 return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
5963 case Match_InvalidMemoryIndexed16UImm6:
5964 return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
5965 case Match_InvalidMemoryIndexed4UImm6:
5966 return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
5967 case Match_InvalidMemoryIndexed2UImm6:
5968 return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
5969 case Match_InvalidMemoryIndexed1UImm6:
5970 return Error(Loc, "index must be in range [0, 63].");
5971 case Match_InvalidMemoryWExtend8:
5972 return Error(Loc,
5973 "expected 'uxtw' or 'sxtw' with optional shift of #0");
5974 case Match_InvalidMemoryWExtend16:
5975 return Error(Loc,
5976 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
5977 case Match_InvalidMemoryWExtend32:
5978 return Error(Loc,
5979 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
5980 case Match_InvalidMemoryWExtend64:
5981 return Error(Loc,
5982 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
5983 case Match_InvalidMemoryWExtend128:
5984 return Error(Loc,
5985 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
5986 case Match_InvalidMemoryXExtend8:
5987 return Error(Loc,
5988 "expected 'lsl' or 'sxtx' with optional shift of #0");
5989 case Match_InvalidMemoryXExtend16:
5990 return Error(Loc,
5991 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
5992 case Match_InvalidMemoryXExtend32:
5993 return Error(Loc,
5994 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
5995 case Match_InvalidMemoryXExtend64:
5996 return Error(Loc,
5997 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
5998 case Match_InvalidMemoryXExtend128:
5999 return Error(Loc,
6000 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
6001 case Match_InvalidMemoryIndexed1:
6002 return Error(Loc, "index must be an integer in range [0, 4095].");
6003 case Match_InvalidMemoryIndexed2:
6004 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
6005 case Match_InvalidMemoryIndexed4:
6006 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
6007 case Match_InvalidMemoryIndexed8:
6008 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
6009 case Match_InvalidMemoryIndexed16:
6010 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
6011 case Match_InvalidImm0_0:
6012 return Error(Loc, "immediate must be 0.");
6013 case Match_InvalidImm0_1:
6014 return Error(Loc, "immediate must be an integer in range [0, 1].");
6015 case Match_InvalidImm0_3:
6016 return Error(Loc, "immediate must be an integer in range [0, 3].");
6017 case Match_InvalidImm0_7:
6018 return Error(Loc, "immediate must be an integer in range [0, 7].");
6019 case Match_InvalidImm0_15:
6020 return Error(Loc, "immediate must be an integer in range [0, 15].");
6021 case Match_InvalidImm0_31:
6022 return Error(Loc, "immediate must be an integer in range [0, 31].");
6023 case Match_InvalidImm0_63:
6024 return Error(Loc, "immediate must be an integer in range [0, 63].");
6025 case Match_InvalidImm0_127:
6026 return Error(Loc, "immediate must be an integer in range [0, 127].");
6027 case Match_InvalidImm0_255:
6028 return Error(Loc, "immediate must be an integer in range [0, 255].");
6029 case Match_InvalidImm0_65535:
6030 return Error(Loc, "immediate must be an integer in range [0, 65535].");
6031 case Match_InvalidImm1_8:
6032 return Error(Loc, "immediate must be an integer in range [1, 8].");
6033 case Match_InvalidImm1_16:
6034 return Error(Loc, "immediate must be an integer in range [1, 16].");
6035 case Match_InvalidImm1_32:
6036 return Error(Loc, "immediate must be an integer in range [1, 32].");
6037 case Match_InvalidImm1_64:
6038 return Error(Loc, "immediate must be an integer in range [1, 64].");
6039 case Match_InvalidImmM1_62:
6040 return Error(Loc, "immediate must be an integer in range [-1, 62].");
6041 case Match_InvalidMemoryIndexedRange2UImm0:
6042 return Error(Loc, "vector select offset must be the immediate range 0:1.");
6043 case Match_InvalidMemoryIndexedRange2UImm1:
6044 return Error(Loc, "vector select offset must be an immediate range of the "
6045 "form <immf>:<imml>, where the first "
6046 "immediate is a multiple of 2 in the range [0, 2], and "
6047 "the second immediate is immf + 1.");
6048 case Match_InvalidMemoryIndexedRange2UImm2:
6049 case Match_InvalidMemoryIndexedRange2UImm3:
6050 return Error(
6051 Loc,
6052 "vector select offset must be an immediate range of the form "
6053 "<immf>:<imml>, "
6054 "where the first immediate is a multiple of 2 in the range [0, 6] or "
6055 "[0, 14] "
6056 "depending on the instruction, and the second immediate is immf + 1.");
6057 case Match_InvalidMemoryIndexedRange4UImm0:
6058 return Error(Loc, "vector select offset must be the immediate range 0:3.");
6059 case Match_InvalidMemoryIndexedRange4UImm1:
6060 case Match_InvalidMemoryIndexedRange4UImm2:
6061 return Error(
6062 Loc,
6063 "vector select offset must be an immediate range of the form "
6064 "<immf>:<imml>, "
6065 "where the first immediate is a multiple of 4 in the range [0, 4] or "
6066 "[0, 12] "
6067 "depending on the instruction, and the second immediate is immf + 3.");
6068 case Match_InvalidSVEAddSubImm8:
6069 return Error(Loc, "immediate must be an integer in range [0, 255]"
6070 " with a shift amount of 0");
6071 case Match_InvalidSVEAddSubImm16:
6072 case Match_InvalidSVEAddSubImm32:
6073 case Match_InvalidSVEAddSubImm64:
6074 return Error(Loc, "immediate must be an integer in range [0, 255] or a "
6075 "multiple of 256 in range [256, 65280]");
6076 case Match_InvalidSVECpyImm8:
6077 return Error(Loc, "immediate must be an integer in range [-128, 255]"
6078 " with a shift amount of 0");
6079 case Match_InvalidSVECpyImm16:
6080 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
6081 "multiple of 256 in range [-32768, 65280]");
6082 case Match_InvalidSVECpyImm32:
6083 case Match_InvalidSVECpyImm64:
6084 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
6085 "multiple of 256 in range [-32768, 32512]");
6086 case Match_InvalidIndexRange0_0:
6087 return Error(Loc, "expected lane specifier '[0]'");
6088 case Match_InvalidIndexRange1_1:
6089 return Error(Loc, "expected lane specifier '[1]'");
6090 case Match_InvalidIndexRange0_15:
6091 return Error(Loc, "vector lane must be an integer in range [0, 15].");
6092 case Match_InvalidIndexRange0_7:
6093 return Error(Loc, "vector lane must be an integer in range [0, 7].");
6094 case Match_InvalidIndexRange0_3:
6095 return Error(Loc, "vector lane must be an integer in range [0, 3].");
6096 case Match_InvalidIndexRange0_1:
6097 return Error(Loc, "vector lane must be an integer in range [0, 1].");
6098 case Match_InvalidSVEIndexRange0_63:
6099 return Error(Loc, "vector lane must be an integer in range [0, 63].");
6100 case Match_InvalidSVEIndexRange0_31:
6101 return Error(Loc, "vector lane must be an integer in range [0, 31].");
6102 case Match_InvalidSVEIndexRange0_15:
6103 return Error(Loc, "vector lane must be an integer in range [0, 15].");
6104 case Match_InvalidSVEIndexRange0_7:
6105 return Error(Loc, "vector lane must be an integer in range [0, 7].");
6106 case Match_InvalidSVEIndexRange0_3:
6107 return Error(Loc, "vector lane must be an integer in range [0, 3].");
6108 case Match_InvalidLabel:
6109 return Error(Loc, "expected label or encodable integer pc offset");
6110 case Match_MRS:
6111 return Error(Loc, "expected readable system register");
6112 case Match_MSR:
6113 case Match_InvalidSVCR:
6114 return Error(Loc, "expected writable system register or pstate");
6115 case Match_InvalidComplexRotationEven:
6116 return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
6117 case Match_InvalidComplexRotationOdd:
6118 return Error(Loc, "complex rotation must be 90 or 270.");
6119 case Match_MnemonicFail: {
6120 std::string Suggestion = AArch64MnemonicSpellCheck(
6121 ((AArch64Operand &)*Operands[0]).getToken(),
6122 ComputeAvailableFeatures(STI->getFeatureBits()));
6123 return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
6124 }
6125 case Match_InvalidGPR64shifted8:
6126 return Error(Loc, "register must be x0..x30 or xzr, without shift");
6127 case Match_InvalidGPR64shifted16:
6128 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
6129 case Match_InvalidGPR64shifted32:
6130 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
6131 case Match_InvalidGPR64shifted64:
6132 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
6133 case Match_InvalidGPR64shifted128:
6134 return Error(
6135 Loc, "register must be x0..x30 or xzr, with required shift 'lsl #4'");
6136 case Match_InvalidGPR64NoXZRshifted8:
6137 return Error(Loc, "register must be x0..x30 without shift");
6138 case Match_InvalidGPR64NoXZRshifted16:
6139 return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
6140 case Match_InvalidGPR64NoXZRshifted32:
6141 return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
6142 case Match_InvalidGPR64NoXZRshifted64:
6143 return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
6144 case Match_InvalidGPR64NoXZRshifted128:
6145 return Error(Loc, "register must be x0..x30 with required shift 'lsl #4'");
6146 case Match_InvalidZPR32UXTW8:
6147 case Match_InvalidZPR32SXTW8:
6148 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
6149 case Match_InvalidZPR32UXTW16:
6150 case Match_InvalidZPR32SXTW16:
6151 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
6152 case Match_InvalidZPR32UXTW32:
6153 case Match_InvalidZPR32SXTW32:
6154 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
6155 case Match_InvalidZPR32UXTW64:
6156 case Match_InvalidZPR32SXTW64:
6157 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
6158 case Match_InvalidZPR64UXTW8:
6159 case Match_InvalidZPR64SXTW8:
6160 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
6161 case Match_InvalidZPR64UXTW16:
6162 case Match_InvalidZPR64SXTW16:
6163 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
6164 case Match_InvalidZPR64UXTW32:
6165 case Match_InvalidZPR64SXTW32:
6166 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
6167 case Match_InvalidZPR64UXTW64:
6168 case Match_InvalidZPR64SXTW64:
6169 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
6170 case Match_InvalidZPR32LSL8:
6171 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
6172 case Match_InvalidZPR32LSL16:
6173 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
6174 case Match_InvalidZPR32LSL32:
6175 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
6176 case Match_InvalidZPR32LSL64:
6177 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
6178 case Match_InvalidZPR64LSL8:
6179 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
6180 case Match_InvalidZPR64LSL16:
6181 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
6182 case Match_InvalidZPR64LSL32:
6183 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
6184 case Match_InvalidZPR64LSL64:
6185 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
6186 case Match_InvalidZPR0:
6187 return Error(Loc, "expected register without element width suffix");
6188 case Match_InvalidZPR8:
6189 case Match_InvalidZPR16:
6190 case Match_InvalidZPR32:
6191 case Match_InvalidZPR64:
6192 case Match_InvalidZPR128:
6193 return Error(Loc, "invalid element width");
6194 case Match_InvalidZPR_3b8:
6195 return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
6196 case Match_InvalidZPR_3b16:
6197 return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
6198 case Match_InvalidZPR_3b32:
6199 return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
6200 case Match_InvalidZPR_4b8:
6201 return Error(Loc,
6202 "Invalid restricted vector register, expected z0.b..z15.b");
6203 case Match_InvalidZPR_4b16:
6204 return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
6205 case Match_InvalidZPR_4b32:
6206 return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
6207 case Match_InvalidZPR_4b64:
6208 return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
6209 case Match_InvalidZPRMul2_Lo8:
6210 return Error(Loc, "Invalid restricted vector register, expected even "
6211 "register in z0.b..z14.b");
6212 case Match_InvalidZPRMul2_Hi8:
6213 return Error(Loc, "Invalid restricted vector register, expected even "
6214 "register in z16.b..z30.b");
6215 case Match_InvalidZPRMul2_Lo16:
6216 return Error(Loc, "Invalid restricted vector register, expected even "
6217 "register in z0.h..z14.h");
6218 case Match_InvalidZPRMul2_Hi16:
6219 return Error(Loc, "Invalid restricted vector register, expected even "
6220 "register in z16.h..z30.h");
6221 case Match_InvalidZPRMul2_Lo32:
6222 return Error(Loc, "Invalid restricted vector register, expected even "
6223 "register in z0.s..z14.s");
6224 case Match_InvalidZPRMul2_Hi32:
6225 return Error(Loc, "Invalid restricted vector register, expected even "
6226 "register in z16.s..z30.s");
6227 case Match_InvalidZPRMul2_Lo64:
6228 return Error(Loc, "Invalid restricted vector register, expected even "
6229 "register in z0.d..z14.d");
6230 case Match_InvalidZPRMul2_Hi64:
6231 return Error(Loc, "Invalid restricted vector register, expected even "
6232 "register in z16.d..z30.d");
6233 case Match_InvalidZPR_K0:
6234 return Error(Loc, "invalid restricted vector register, expected register "
6235 "in z20..z23 or z28..z31");
6236 case Match_InvalidSVEPattern:
6237 return Error(Loc, "invalid predicate pattern");
6238 case Match_InvalidSVEPPRorPNRAnyReg:
6239 case Match_InvalidSVEPPRorPNRBReg:
6240 case Match_InvalidSVEPredicateAnyReg:
6241 case Match_InvalidSVEPredicateBReg:
6242 case Match_InvalidSVEPredicateHReg:
6243 case Match_InvalidSVEPredicateSReg:
6244 case Match_InvalidSVEPredicateDReg:
6245 return Error(Loc, "invalid predicate register.");
6246 case Match_InvalidSVEPredicate3bAnyReg:
6247 return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)");
6248 case Match_InvalidSVEPNPredicateB_p8to15Reg:
6249 case Match_InvalidSVEPNPredicateH_p8to15Reg:
6250 case Match_InvalidSVEPNPredicateS_p8to15Reg:
6251 case Match_InvalidSVEPNPredicateD_p8to15Reg:
6252 return Error(Loc, "Invalid predicate register, expected PN in range "
6253 "pn8..pn15 with element suffix.");
6254 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
6255 return Error(Loc, "invalid restricted predicate-as-counter register "
6256 "expected pn8..pn15");
6257 case Match_InvalidSVEPNPredicateBReg:
6258 case Match_InvalidSVEPNPredicateHReg:
6259 case Match_InvalidSVEPNPredicateSReg:
6260 case Match_InvalidSVEPNPredicateDReg:
6261 return Error(Loc, "Invalid predicate register, expected PN in range "
6262 "pn0..pn15 with element suffix.");
6263 case Match_InvalidSVEVecLenSpecifier:
6264 return Error(Loc, "Invalid vector length specifier, expected VLx2 or VLx4");
6265 case Match_InvalidSVEPredicateListMul2x8:
6266 case Match_InvalidSVEPredicateListMul2x16:
6267 case Match_InvalidSVEPredicateListMul2x32:
6268 case Match_InvalidSVEPredicateListMul2x64:
6269 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6270 "predicate registers, where the first vector is a multiple of 2 "
6271 "and with correct element type");
6272 case Match_InvalidSVEExactFPImmOperandHalfOne:
6273 return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
6274 case Match_InvalidSVEExactFPImmOperandHalfTwo:
6275 return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
6276 case Match_InvalidSVEExactFPImmOperandZeroOne:
6277 return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
6278 case Match_InvalidMatrixTileVectorH8:
6279 case Match_InvalidMatrixTileVectorV8:
6280 return Error(Loc, "invalid matrix operand, expected za0h.b or za0v.b");
6281 case Match_InvalidMatrixTileVectorH16:
6282 case Match_InvalidMatrixTileVectorV16:
6283 return Error(Loc,
6284 "invalid matrix operand, expected za[0-1]h.h or za[0-1]v.h");
6285 case Match_InvalidMatrixTileVectorH32:
6286 case Match_InvalidMatrixTileVectorV32:
6287 return Error(Loc,
6288 "invalid matrix operand, expected za[0-3]h.s or za[0-3]v.s");
6289 case Match_InvalidMatrixTileVectorH64:
6290 case Match_InvalidMatrixTileVectorV64:
6291 return Error(Loc,
6292 "invalid matrix operand, expected za[0-7]h.d or za[0-7]v.d");
6293 case Match_InvalidMatrixTileVectorH128:
6294 case Match_InvalidMatrixTileVectorV128:
6295 return Error(Loc,
6296 "invalid matrix operand, expected za[0-15]h.q or za[0-15]v.q");
6297 case Match_InvalidMatrixTile16:
6298 return Error(Loc, "invalid matrix operand, expected za[0-1].h");
6299 case Match_InvalidMatrixTile32:
6300 return Error(Loc, "invalid matrix operand, expected za[0-3].s");
6301 case Match_InvalidMatrixTile64:
6302 return Error(Loc, "invalid matrix operand, expected za[0-7].d");
6303 case Match_InvalidMatrix:
6304 return Error(Loc, "invalid matrix operand, expected za");
6305 case Match_InvalidMatrix8:
6306 return Error(Loc, "invalid matrix operand, expected suffix .b");
6307 case Match_InvalidMatrix16:
6308 return Error(Loc, "invalid matrix operand, expected suffix .h");
6309 case Match_InvalidMatrix32:
6310 return Error(Loc, "invalid matrix operand, expected suffix .s");
6311 case Match_InvalidMatrix64:
6312 return Error(Loc, "invalid matrix operand, expected suffix .d");
6313 case Match_InvalidMatrixIndexGPR32_12_15:
6314 return Error(Loc, "operand must be a register in range [w12, w15]");
6315 case Match_InvalidMatrixIndexGPR32_8_11:
6316 return Error(Loc, "operand must be a register in range [w8, w11]");
6317 case Match_InvalidSVEVectorList2x8Mul2:
6318 case Match_InvalidSVEVectorList2x16Mul2:
6319 case Match_InvalidSVEVectorList2x32Mul2:
6320 case Match_InvalidSVEVectorList2x64Mul2:
6321 case Match_InvalidSVEVectorList2x128Mul2:
6322 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6323 "SVE vectors, where the first vector is a multiple of 2 "
6324 "and with matching element types");
6325 case Match_InvalidSVEVectorList2x8Mul2_Lo:
6326 case Match_InvalidSVEVectorList2x16Mul2_Lo:
6327 case Match_InvalidSVEVectorList2x32Mul2_Lo:
6328 case Match_InvalidSVEVectorList2x64Mul2_Lo:
6329 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6330 "SVE vectors in the range z0-z14, where the first vector "
6331 "is a multiple of 2 "
6332 "and with matching element types");
6333 case Match_InvalidSVEVectorList2x8Mul2_Hi:
6334 case Match_InvalidSVEVectorList2x16Mul2_Hi:
6335 case Match_InvalidSVEVectorList2x32Mul2_Hi:
6336 case Match_InvalidSVEVectorList2x64Mul2_Hi:
6337 return Error(Loc,
6338 "Invalid vector list, expected list with 2 consecutive "
6339 "SVE vectors in the range z16-z30, where the first vector "
6340 "is a multiple of 2 "
6341 "and with matching element types");
6342 case Match_InvalidSVEVectorList4x8Mul4:
6343 case Match_InvalidSVEVectorList4x16Mul4:
6344 case Match_InvalidSVEVectorList4x32Mul4:
6345 case Match_InvalidSVEVectorList4x64Mul4:
6346 case Match_InvalidSVEVectorList4x128Mul4:
6347 return Error(Loc, "Invalid vector list, expected list with 4 consecutive "
6348 "SVE vectors, where the first vector is a multiple of 4 "
6349 "and with matching element types");
6350 case Match_InvalidLookupTable:
6351 return Error(Loc, "Invalid lookup table, expected zt0");
6352 case Match_InvalidSVEVectorListStrided2x8:
6353 case Match_InvalidSVEVectorListStrided2x16:
6354 case Match_InvalidSVEVectorListStrided2x32:
6355 case Match_InvalidSVEVectorListStrided2x64:
6356 return Error(
6357 Loc,
6358 "Invalid vector list, expected list with each SVE vector in the list "
6359 "8 registers apart, and the first register in the range [z0, z7] or "
6360 "[z16, z23] and with correct element type");
6361 case Match_InvalidSVEVectorListStrided4x8:
6362 case Match_InvalidSVEVectorListStrided4x16:
6363 case Match_InvalidSVEVectorListStrided4x32:
6364 case Match_InvalidSVEVectorListStrided4x64:
6365 return Error(
6366 Loc,
6367 "Invalid vector list, expected list with each SVE vector in the list "
6368 "4 registers apart, and the first register in the range [z0, z3] or "
6369 "[z16, z19] and with correct element type");
6370 case Match_AddSubLSLImm3ShiftLarge:
6371 return Error(Loc,
6372 "expected 'lsl' with optional integer in range [0, 7]");
6373 default:
6374 llvm_unreachable("unexpected error code!");
6375 }
6376}
6377
6378static const char *getSubtargetFeatureName(uint64_t Val);
6379
6380bool AArch64AsmParser::matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
6382 MCStreamer &Out,
6384 bool MatchingInlineAsm) {
6385 assert(!Operands.empty() && "Unexpect empty operand list!");
6386 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
6387 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
6388
6389 StringRef Tok = Op.getToken();
6390 unsigned NumOperands = Operands.size();
6391
6392 if (NumOperands == 4 && Tok == "lsl") {
6393 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6394 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6395 if (Op2.isScalarReg() && Op3.isImm()) {
6396 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6397 if (Op3CE) {
6398 uint64_t Op3Val = Op3CE->getValue();
6399 uint64_t NewOp3Val = 0;
6400 uint64_t NewOp4Val = 0;
6401 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
6402 Op2.getReg())) {
6403 NewOp3Val = (32 - Op3Val) & 0x1f;
6404 NewOp4Val = 31 - Op3Val;
6405 } else {
6406 NewOp3Val = (64 - Op3Val) & 0x3f;
6407 NewOp4Val = 63 - Op3Val;
6408 }
6409
6410 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
6411 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
6412
6413 Operands[0] =
6414 AArch64Operand::CreateToken("ubfm", Op.getStartLoc(), getContext());
6415 Operands.push_back(AArch64Operand::CreateImm(
6416 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
6417 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
6418 Op3.getEndLoc(), getContext());
6419 }
6420 }
6421 } else if (NumOperands == 4 && Tok == "bfc") {
6422 // FIXME: Horrible hack to handle BFC->BFM alias.
6423 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6424 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
6425 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
6426
6427 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
6428 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
6429 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
6430
6431 if (LSBCE && WidthCE) {
6432 uint64_t LSB = LSBCE->getValue();
6433 uint64_t Width = WidthCE->getValue();
6434
6435 uint64_t RegWidth = 0;
6436 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6437 Op1.getReg()))
6438 RegWidth = 64;
6439 else
6440 RegWidth = 32;
6441
6442 if (LSB >= RegWidth)
6443 return Error(LSBOp.getStartLoc(),
6444 "expected integer in range [0, 31]");
6445 if (Width < 1 || Width > RegWidth)
6446 return Error(WidthOp.getStartLoc(),
6447 "expected integer in range [1, 32]");
6448
6449 uint64_t ImmR = 0;
6450 if (RegWidth == 32)
6451 ImmR = (32 - LSB) & 0x1f;
6452 else
6453 ImmR = (64 - LSB) & 0x3f;
6454
6455 uint64_t ImmS = Width - 1;
6456
6457 if (ImmR != 0 && ImmS >= ImmR)
6458 return Error(WidthOp.getStartLoc(),
6459 "requested insert overflows register");
6460
6461 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
6462 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
6463 Operands[0] =
6464 AArch64Operand::CreateToken("bfm", Op.getStartLoc(), getContext());
6465 Operands[2] = AArch64Operand::CreateReg(
6466 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
6467 SMLoc(), SMLoc(), getContext());
6468 Operands[3] = AArch64Operand::CreateImm(
6469 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
6470 Operands.emplace_back(
6471 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
6472 WidthOp.getEndLoc(), getContext()));
6473 }
6474 }
6475 } else if (NumOperands == 5) {
6476 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
6477 // UBFIZ -> UBFM aliases.
6478 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
6479 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6480 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6481 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6482
6483 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6484 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6485 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
6486
6487 if (Op3CE && Op4CE) {
6488 uint64_t Op3Val = Op3CE->getValue();
6489 uint64_t Op4Val = Op4CE->getValue();
6490
6491 uint64_t RegWidth = 0;
6492 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6493 Op1.getReg()))
6494 RegWidth = 64;
6495 else
6496 RegWidth = 32;
6497
6498 if (Op3Val >= RegWidth)
6499 return Error(Op3.getStartLoc(),
6500 "expected integer in range [0, 31]");
6501 if (Op4Val < 1 || Op4Val > RegWidth)
6502 return Error(Op4.getStartLoc(),
6503 "expected integer in range [1, 32]");
6504
6505 uint64_t NewOp3Val = 0;
6506 if (RegWidth == 32)
6507 NewOp3Val = (32 - Op3Val) & 0x1f;
6508 else
6509 NewOp3Val = (64 - Op3Val) & 0x3f;
6510
6511 uint64_t NewOp4Val = Op4Val - 1;
6512
6513 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
6514 return Error(Op4.getStartLoc(),
6515 "requested insert overflows register");
6516
6517 const MCExpr *NewOp3 =
6518 MCConstantExpr::create(NewOp3Val, getContext());
6519 const MCExpr *NewOp4 =
6520 MCConstantExpr::create(NewOp4Val, getContext());
6521 Operands[3] = AArch64Operand::CreateImm(
6522 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
6523 Operands[4] = AArch64Operand::CreateImm(
6524 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
6525 if (Tok == "bfi")
6526 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
6527 getContext());
6528 else if (Tok == "sbfiz")
6529 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
6530 getContext());
6531 else if (Tok == "ubfiz")
6532 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
6533 getContext());
6534 else
6535 llvm_unreachable("No valid mnemonic for alias?");
6536 }
6537 }
6538
6539 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
6540 // UBFX -> UBFM aliases.
6541 } else if (NumOperands == 5 &&
6542 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
6543 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6544 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6545 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6546
6547 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6548 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6549 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
6550
6551 if (Op3CE && Op4CE) {
6552 uint64_t Op3Val = Op3CE->getValue();
6553 uint64_t Op4Val = Op4CE->getValue();
6554
6555 uint64_t RegWidth = 0;
6556 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6557 Op1.getReg()))
6558 RegWidth = 64;
6559 else
6560 RegWidth = 32;
6561
6562 if (Op3Val >= RegWidth)
6563 return Error(Op3.getStartLoc(),
6564 "expected integer in range [0, 31]");
6565 if (Op4Val < 1 || Op4Val > RegWidth)
6566 return Error(Op4.getStartLoc(),
6567 "expected integer in range [1, 32]");
6568
6569 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
6570
6571 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
6572 return Error(Op4.getStartLoc(),
6573 "requested extract overflows register");
6574
6575 const MCExpr *NewOp4 =
6576 MCConstantExpr::create(NewOp4Val, getContext());
6577 Operands[4] = AArch64Operand::CreateImm(
6578 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
6579 if (Tok == "bfxil")
6580 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
6581 getContext());
6582 else if (Tok == "sbfx")
6583 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
6584 getContext());
6585 else if (Tok == "ubfx")
6586 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
6587 getContext());
6588 else
6589 llvm_unreachable("No valid mnemonic for alias?");
6590 }
6591 }
6592 }
6593 }
6594
6595 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
6596 // instruction for FP registers correctly in some rare circumstances. Convert
6597 // it to a safe instruction and warn (because silently changing someone's
6598 // assembly is rude).
6599 if (getSTI().hasFeature(AArch64::FeatureZCZeroingFPWorkaround) &&
6600 NumOperands == 4 && Tok == "movi") {
6601 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6602 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6603 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6604 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
6605 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
6606 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
6607 if (Suffix.lower() == ".2d" &&
6608 cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
6609 Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
6610 " correctly on this CPU, converting to equivalent movi.16b");
6611 // Switch the suffix to .16b.
6612 unsigned Idx = Op1.isToken() ? 1 : 2;
6613 Operands[Idx] =
6614 AArch64Operand::CreateToken(".16b", IDLoc, getContext());
6615 }
6616 }
6617 }
6618
6619 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
6620 // InstAlias can't quite handle this since the reg classes aren't
6621 // subclasses.
6622 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
6623 // The source register can be Wn here, but the matcher expects a
6624 // GPR64. Twiddle it here if necessary.
6625 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6626 if (Op.isScalarReg()) {
6627 MCRegister Reg = getXRegFromWReg(Op.getReg());
6628 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6629 Op.getStartLoc(), Op.getEndLoc(),
6630 getContext());
6631 }
6632 }
6633 // FIXME: Likewise for sxt[bh] with a Xd dst operand
6634 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
6635 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6636 if (Op.isScalarReg() &&
6637 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6638 Op.getReg())) {
6639 // The source register can be Wn here, but the matcher expects a
6640 // GPR64. Twiddle it here if necessary.
6641 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6642 if (Op.isScalarReg()) {
6643 MCRegister Reg = getXRegFromWReg(Op.getReg());
6644 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6645 Op.getStartLoc(),
6646 Op.getEndLoc(), getContext());
6647 }
6648 }
6649 }
6650 // FIXME: Likewise for uxt[bh] with a Xd dst operand
6651 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
6652 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6653 if (Op.isScalarReg() &&
6654 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6655 Op.getReg())) {
6656 // The source register can be Wn here, but the matcher expects a
6657 // GPR32. Twiddle it here if necessary.
6658 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6659 if (Op.isScalarReg()) {
6660 MCRegister Reg = getWRegFromXReg(Op.getReg());
6661 Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6662 Op.getStartLoc(),
6663 Op.getEndLoc(), getContext());
6664 }
6665 }
6666 }
6667
6668 MCInst Inst;
6669 FeatureBitset MissingFeatures;
6670 // First try to match against the secondary set of tables containing the
6671 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
6672 unsigned MatchResult =
6673 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6674 MatchingInlineAsm, 1);
6675
6676 // If that fails, try against the alternate table containing long-form NEON:
6677 // "fadd v0.2s, v1.2s, v2.2s"
6678 if (MatchResult != Match_Success) {
6679 // But first, save the short-form match result: we can use it in case the
6680 // long-form match also fails.
6681 auto ShortFormNEONErrorInfo = ErrorInfo;
6682 auto ShortFormNEONMatchResult = MatchResult;
6683 auto ShortFormNEONMissingFeatures = MissingFeatures;
6684
6685 MatchResult =
6686 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6687 MatchingInlineAsm, 0);
6688
6689 // Now, both matches failed, and the long-form match failed on the mnemonic
6690 // suffix token operand. The short-form match failure is probably more
6691 // relevant: use it instead.
6692 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
6693 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
6694 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
6695 MatchResult = ShortFormNEONMatchResult;
6696 ErrorInfo = ShortFormNEONErrorInfo;
6697 MissingFeatures = ShortFormNEONMissingFeatures;
6698 }
6699 }
6700
6701 switch (MatchResult) {
6702 case Match_Success: {
6703 // Perform range checking and other semantic validations
6704 SmallVector<SMLoc, 8> OperandLocs;
6705 NumOperands = Operands.size();
6706 for (unsigned i = 1; i < NumOperands; ++i)
6707 OperandLocs.push_back(Operands[i]->getStartLoc());
6708 if (validateInstruction(Inst, IDLoc, OperandLocs))
6709 return true;
6710
6711 Inst.setLoc(IDLoc);
6712 Out.emitInstruction(Inst, getSTI());
6713 return false;
6714 }
6715 case Match_MissingFeature: {
6716 assert(MissingFeatures.any() && "Unknown missing feature!");
6717 // Special case the error message for the very common case where only
6718 // a single subtarget feature is missing (neon, e.g.).
6719 std::string Msg = "instruction requires:";
6720 for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
6721 if (MissingFeatures[i]) {
6722 Msg += " ";
6723 Msg += getSubtargetFeatureName(i);
6724 }
6725 }
6726 return Error(IDLoc, Msg);
6727 }
6728 case Match_MnemonicFail:
6729 return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
6730 case Match_InvalidOperand: {
6731 SMLoc ErrorLoc = IDLoc;
6732
6733 if (ErrorInfo != ~0ULL) {
6734 if (ErrorInfo >= Operands.size())
6735 return Error(IDLoc, "too few operands for instruction",
6736 SMRange(IDLoc, getTok().getLoc()));
6737
6738 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
6739 if (ErrorLoc == SMLoc())
6740 ErrorLoc = IDLoc;
6741 }
6742 // If the match failed on a suffix token operand, tweak the diagnostic
6743 // accordingly.
6744 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
6745 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
6746 MatchResult = Match_InvalidSuffix;
6747
6748 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
6749 }
6750 case Match_InvalidTiedOperand:
6751 case Match_InvalidMemoryIndexed1:
6752 case Match_InvalidMemoryIndexed2:
6753 case Match_InvalidMemoryIndexed4:
6754 case Match_InvalidMemoryIndexed8:
6755 case Match_InvalidMemoryIndexed16:
6756 case Match_InvalidCondCode:
6757 case Match_AddSubLSLImm3ShiftLarge:
6758 case Match_AddSubRegExtendSmall:
6759 case Match_AddSubRegExtendLarge:
6760 case Match_AddSubSecondSource:
6761 case Match_LogicalSecondSource:
6762 case Match_AddSubRegShift32:
6763 case Match_AddSubRegShift64:
6764 case Match_InvalidMovImm32Shift:
6765 case Match_InvalidMovImm64Shift:
6766 case Match_InvalidFPImm:
6767 case Match_InvalidMemoryWExtend8:
6768 case Match_InvalidMemoryWExtend16:
6769 case Match_InvalidMemoryWExtend32:
6770 case Match_InvalidMemoryWExtend64:
6771 case Match_InvalidMemoryWExtend128:
6772 case Match_InvalidMemoryXExtend8:
6773 case Match_InvalidMemoryXExtend16:
6774 case Match_InvalidMemoryXExtend32:
6775 case Match_InvalidMemoryXExtend64:
6776 case Match_InvalidMemoryXExtend128:
6777 case Match_InvalidMemoryIndexed1SImm4:
6778 case Match_InvalidMemoryIndexed2SImm4:
6779 case Match_InvalidMemoryIndexed3SImm4:
6780 case Match_InvalidMemoryIndexed4SImm4:
6781 case Match_InvalidMemoryIndexed1SImm6:
6782 case Match_InvalidMemoryIndexed16SImm4:
6783 case Match_InvalidMemoryIndexed32SImm4:
6784 case Match_InvalidMemoryIndexed4SImm7:
6785 case Match_InvalidMemoryIndexed8SImm7:
6786 case Match_InvalidMemoryIndexed16SImm7:
6787 case Match_InvalidMemoryIndexed8UImm5:
6788 case Match_InvalidMemoryIndexed8UImm3:
6789 case Match_InvalidMemoryIndexed4UImm5:
6790 case Match_InvalidMemoryIndexed2UImm5:
6791 case Match_InvalidMemoryIndexed1UImm6:
6792 case Match_InvalidMemoryIndexed2UImm6:
6793 case Match_InvalidMemoryIndexed4UImm6:
6794 case Match_InvalidMemoryIndexed8UImm6:
6795 case Match_InvalidMemoryIndexed16UImm6:
6796 case Match_InvalidMemoryIndexedSImm6:
6797 case Match_InvalidMemoryIndexedSImm5:
6798 case Match_InvalidMemoryIndexedSImm8:
6799 case Match_InvalidMemoryIndexedSImm9:
6800 case Match_InvalidMemoryIndexed16SImm9:
6801 case Match_InvalidMemoryIndexed8SImm10:
6802 case Match_InvalidImm0_0:
6803 case Match_InvalidImm0_1:
6804 case Match_InvalidImm0_3:
6805 case Match_InvalidImm0_7:
6806 case Match_InvalidImm0_15:
6807 case Match_InvalidImm0_31:
6808 case Match_InvalidImm0_63:
6809 case Match_InvalidImm0_127:
6810 case Match_InvalidImm0_255:
6811 case Match_InvalidImm0_65535:
6812 case Match_InvalidImm1_8:
6813 case Match_InvalidImm1_16:
6814 case Match_InvalidImm1_32:
6815 case Match_InvalidImm1_64:
6816 case Match_InvalidImmM1_62:
6817 case Match_InvalidMemoryIndexedRange2UImm0:
6818 case Match_InvalidMemoryIndexedRange2UImm1:
6819 case Match_InvalidMemoryIndexedRange2UImm2:
6820 case Match_InvalidMemoryIndexedRange2UImm3:
6821 case Match_InvalidMemoryIndexedRange4UImm0:
6822 case Match_InvalidMemoryIndexedRange4UImm1:
6823 case Match_InvalidMemoryIndexedRange4UImm2:
6824 case Match_InvalidSVEAddSubImm8:
6825 case Match_InvalidSVEAddSubImm16:
6826 case Match_InvalidSVEAddSubImm32:
6827 case Match_InvalidSVEAddSubImm64:
6828 case Match_InvalidSVECpyImm8:
6829 case Match_InvalidSVECpyImm16:
6830 case Match_InvalidSVECpyImm32:
6831 case Match_InvalidSVECpyImm64:
6832 case Match_InvalidIndexRange0_0:
6833 case Match_InvalidIndexRange1_1:
6834 case Match_InvalidIndexRange0_15:
6835 case Match_InvalidIndexRange0_7:
6836 case Match_InvalidIndexRange0_3:
6837 case Match_InvalidIndexRange0_1:
6838 case Match_InvalidSVEIndexRange0_63:
6839 case Match_InvalidSVEIndexRange0_31:
6840 case Match_InvalidSVEIndexRange0_15:
6841 case Match_InvalidSVEIndexRange0_7:
6842 case Match_InvalidSVEIndexRange0_3:
6843 case Match_InvalidLabel:
6844 case Match_InvalidComplexRotationEven:
6845 case Match_InvalidComplexRotationOdd:
6846 case Match_InvalidGPR64shifted8:
6847 case Match_InvalidGPR64shifted16:
6848 case Match_InvalidGPR64shifted32:
6849 case Match_InvalidGPR64shifted64:
6850 case Match_InvalidGPR64shifted128:
6851 case Match_InvalidGPR64NoXZRshifted8:
6852 case Match_InvalidGPR64NoXZRshifted16:
6853 case Match_InvalidGPR64NoXZRshifted32:
6854 case Match_InvalidGPR64NoXZRshifted64:
6855 case Match_InvalidGPR64NoXZRshifted128:
6856 case Match_InvalidZPR32UXTW8:
6857 case Match_InvalidZPR32UXTW16:
6858 case Match_InvalidZPR32UXTW32:
6859 case Match_InvalidZPR32UXTW64:
6860 case Match_InvalidZPR32SXTW8:
6861 case Match_InvalidZPR32SXTW16:
6862 case Match_InvalidZPR32SXTW32:
6863 case Match_InvalidZPR32SXTW64:
6864 case Match_InvalidZPR64UXTW8:
6865 case Match_InvalidZPR64SXTW8:
6866 case Match_InvalidZPR64UXTW16:
6867 case Match_InvalidZPR64SXTW16:
6868 case Match_InvalidZPR64UXTW32:
6869 case Match_InvalidZPR64SXTW32:
6870 case Match_InvalidZPR64UXTW64:
6871 case Match_InvalidZPR64SXTW64:
6872 case Match_InvalidZPR32LSL8:
6873 case Match_InvalidZPR32LSL16:
6874 case Match_InvalidZPR32LSL32:
6875 case Match_InvalidZPR32LSL64:
6876 case Match_InvalidZPR64LSL8:
6877 case Match_InvalidZPR64LSL16:
6878 case Match_InvalidZPR64LSL32:
6879 case Match_InvalidZPR64LSL64:
6880 case Match_InvalidZPR0:
6881 case Match_InvalidZPR8:
6882 case Match_InvalidZPR16:
6883 case Match_InvalidZPR32:
6884 case Match_InvalidZPR64:
6885 case Match_InvalidZPR128:
6886 case Match_InvalidZPR_3b8:
6887 case Match_InvalidZPR_3b16:
6888 case Match_InvalidZPR_3b32:
6889 case Match_InvalidZPR_4b8:
6890 case Match_InvalidZPR_4b16:
6891 case Match_InvalidZPR_4b32:
6892 case Match_InvalidZPR_4b64:
6893 case Match_InvalidSVEPPRorPNRAnyReg:
6894 case Match_InvalidSVEPPRorPNRBReg:
6895 case Match_InvalidSVEPredicateAnyReg:
6896 case Match_InvalidSVEPattern:
6897 case Match_InvalidSVEVecLenSpecifier:
6898 case Match_InvalidSVEPredicateBReg:
6899 case Match_InvalidSVEPredicateHReg:
6900 case Match_InvalidSVEPredicateSReg:
6901 case Match_InvalidSVEPredicateDReg:
6902 case Match_InvalidSVEPredicate3bAnyReg:
6903 case Match_InvalidSVEPNPredicateB_p8to15Reg:
6904 case Match_InvalidSVEPNPredicateH_p8to15Reg:
6905 case Match_InvalidSVEPNPredicateS_p8to15Reg:
6906 case Match_InvalidSVEPNPredicateD_p8to15Reg:
6907 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
6908 case Match_InvalidSVEPNPredicateBReg:
6909 case Match_InvalidSVEPNPredicateHReg:
6910 case Match_InvalidSVEPNPredicateSReg:
6911 case Match_InvalidSVEPNPredicateDReg:
6912 case Match_InvalidSVEPredicateListMul2x8:
6913 case Match_InvalidSVEPredicateListMul2x16:
6914 case Match_InvalidSVEPredicateListMul2x32:
6915 case Match_InvalidSVEPredicateListMul2x64:
6916 case Match_InvalidSVEExactFPImmOperandHalfOne:
6917 case Match_InvalidSVEExactFPImmOperandHalfTwo:
6918 case Match_InvalidSVEExactFPImmOperandZeroOne:
6919 case Match_InvalidMatrixTile16:
6920 case Match_InvalidMatrixTile32:
6921 case Match_InvalidMatrixTile64:
6922 case Match_InvalidMatrix:
6923 case Match_InvalidMatrix8:
6924 case Match_InvalidMatrix16:
6925 case Match_InvalidMatrix32:
6926 case Match_InvalidMatrix64:
6927 case Match_InvalidMatrixTileVectorH8:
6928 case Match_InvalidMatrixTileVectorH16:
6929 case Match_InvalidMatrixTileVectorH32:
6930 case Match_InvalidMatrixTileVectorH64:
6931 case Match_InvalidMatrixTileVectorH128:
6932 case Match_InvalidMatrixTileVectorV8:
6933 case Match_InvalidMatrixTileVectorV16:
6934 case Match_InvalidMatrixTileVectorV32:
6935 case Match_InvalidMatrixTileVectorV64:
6936 case Match_InvalidMatrixTileVectorV128:
6937 case Match_InvalidSVCR:
6938 case Match_InvalidMatrixIndexGPR32_12_15:
6939 case Match_InvalidMatrixIndexGPR32_8_11:
6940 case Match_InvalidLookupTable:
6941 case Match_InvalidZPRMul2_Lo8:
6942 case Match_InvalidZPRMul2_Hi8:
6943 case Match_InvalidZPRMul2_Lo16:
6944 case Match_InvalidZPRMul2_Hi16:
6945 case Match_InvalidZPRMul2_Lo32:
6946 case Match_InvalidZPRMul2_Hi32:
6947 case Match_InvalidZPRMul2_Lo64:
6948 case Match_InvalidZPRMul2_Hi64:
6949 case Match_InvalidZPR_K0:
6950 case Match_InvalidSVEVectorList2x8Mul2:
6951 case Match_InvalidSVEVectorList2x16Mul2:
6952 case Match_InvalidSVEVectorList2x32Mul2:
6953 case Match_InvalidSVEVectorList2x64Mul2:
6954 case Match_InvalidSVEVectorList2x128Mul2:
6955 case Match_InvalidSVEVectorList4x8Mul4:
6956 case Match_InvalidSVEVectorList4x16Mul4:
6957 case Match_InvalidSVEVectorList4x32Mul4:
6958 case Match_InvalidSVEVectorList4x64Mul4:
6959 case Match_InvalidSVEVectorList4x128Mul4:
6960 case Match_InvalidSVEVectorList2x8Mul2_Lo:
6961 case Match_InvalidSVEVectorList2x16Mul2_Lo:
6962 case Match_InvalidSVEVectorList2x32Mul2_Lo:
6963 case Match_InvalidSVEVectorList2x64Mul2_Lo:
6964 case Match_InvalidSVEVectorList2x8Mul2_Hi:
6965 case Match_InvalidSVEVectorList2x16Mul2_Hi:
6966 case Match_InvalidSVEVectorList2x32Mul2_Hi:
6967 case Match_InvalidSVEVectorList2x64Mul2_Hi:
6968 case Match_InvalidSVEVectorListStrided2x8:
6969 case Match_InvalidSVEVectorListStrided2x16:
6970 case Match_InvalidSVEVectorListStrided2x32:
6971 case Match_InvalidSVEVectorListStrided2x64:
6972 case Match_InvalidSVEVectorListStrided4x8:
6973 case Match_InvalidSVEVectorListStrided4x16:
6974 case Match_InvalidSVEVectorListStrided4x32:
6975 case Match_InvalidSVEVectorListStrided4x64:
6976 case Match_MSR:
6977 case Match_MRS: {
6978 if (ErrorInfo >= Operands.size())
6979 return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
6980 // Any time we get here, there's nothing fancy to do. Just get the
6981 // operand SMLoc and display the diagnostic.
6982 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
6983 if (ErrorLoc == SMLoc())
6984 ErrorLoc = IDLoc;
6985 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
6986 }
6987 }
6988
6989 llvm_unreachable("Implement any new match types added!");
6990}
6991
6992/// ParseDirective parses the arm specific directives
6993bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
6994 const MCContext::Environment Format = getContext().getObjectFileType();
6995 bool IsMachO = Format == MCContext::IsMachO;
6996 bool IsCOFF = Format == MCContext::IsCOFF;
6997 bool IsELF = Format == MCContext::IsELF;
6998
6999 auto IDVal = DirectiveID.getIdentifier().lower();
7000 SMLoc Loc = DirectiveID.getLoc();
7001 if (IDVal == ".arch")
7002 parseDirectiveArch(Loc);
7003 else if (IDVal == ".cpu")
7004 parseDirectiveCPU(Loc);
7005 else if (IDVal == ".tlsdesccall")
7006 parseDirectiveTLSDescCall(Loc);
7007 else if (IDVal == ".ltorg" || IDVal == ".pool")
7008 parseDirectiveLtorg(Loc);
7009 else if (IDVal == ".unreq")
7010 parseDirectiveUnreq(Loc);
7011 else if (IDVal == ".inst")
7012 parseDirectiveInst(Loc);
7013 else if (IDVal == ".cfi_negate_ra_state")
7014 parseDirectiveCFINegateRAState();
7015 else if (IDVal == ".cfi_negate_ra_state_with_pc")
7016 parseDirectiveCFINegateRAStateWithPC();
7017 else if (IDVal == ".cfi_b_key_frame")
7018 parseDirectiveCFIBKeyFrame();
7019 else if (IDVal == ".cfi_mte_tagged_frame")
7020 parseDirectiveCFIMTETaggedFrame();
7021 else if (IDVal == ".arch_extension")
7022 parseDirectiveArchExtension(Loc);
7023 else if (IDVal == ".variant_pcs")
7024 parseDirectiveVariantPCS(Loc);
7025 else if (IsMachO) {
7026 if (IDVal == MCLOHDirectiveName())
7027 parseDirectiveLOH(IDVal, Loc);
7028 else
7029 return true;
7030 } else if (IsCOFF) {
7031 if (IDVal == ".seh_stackalloc")
7032 parseDirectiveSEHAllocStack(Loc);
7033 else if (IDVal == ".seh_endprologue")
7034 parseDirectiveSEHPrologEnd(Loc);
7035 else if (IDVal == ".seh_save_r19r20_x")
7036 parseDirectiveSEHSaveR19R20X(Loc);
7037 else if (IDVal == ".seh_save_fplr")
7038 parseDirectiveSEHSaveFPLR(Loc);
7039 else if (IDVal == ".seh_save_fplr_x")
7040 parseDirectiveSEHSaveFPLRX(Loc);
7041 else if (IDVal == ".seh_save_reg")
7042 parseDirectiveSEHSaveReg(Loc);
7043 else if (IDVal == ".seh_save_reg_x")
7044 parseDirectiveSEHSaveRegX(Loc);
7045 else if (IDVal == ".seh_save_regp")
7046 parseDirectiveSEHSaveRegP(Loc);
7047 else if (IDVal == ".seh_save_regp_x")
7048 parseDirectiveSEHSaveRegPX(Loc);
7049 else if (IDVal == ".seh_save_lrpair")
7050 parseDirectiveSEHSaveLRPair(Loc);
7051 else if (IDVal == ".seh_save_freg")
7052 parseDirectiveSEHSaveFReg(Loc);
7053 else if (IDVal == ".seh_save_freg_x")
7054 parseDirectiveSEHSaveFRegX(Loc);
7055 else if (IDVal == ".seh_save_fregp")
7056 parseDirectiveSEHSaveFRegP(Loc);
7057 else if (IDVal == ".seh_save_fregp_x")
7058 parseDirectiveSEHSaveFRegPX(Loc);
7059 else if (IDVal == ".seh_set_fp")
7060 parseDirectiveSEHSetFP(Loc);
7061 else if (IDVal == ".seh_add_fp")
7062 parseDirectiveSEHAddFP(Loc);
7063 else if (IDVal == ".seh_nop")
7064 parseDirectiveSEHNop(Loc);
7065 else if (IDVal == ".seh_save_next")
7066 parseDirectiveSEHSaveNext(Loc);
7067 else if (IDVal == ".seh_startepilogue")
7068 parseDirectiveSEHEpilogStart(Loc);
7069 else if (IDVal == ".seh_endepilogue")
7070 parseDirectiveSEHEpilogEnd(Loc);
7071 else if (IDVal == ".seh_trap_frame")
7072 parseDirectiveSEHTrapFrame(Loc);
7073 else if (IDVal == ".seh_pushframe")
7074 parseDirectiveSEHMachineFrame(Loc);
7075 else if (IDVal == ".seh_context")
7076 parseDirectiveSEHContext(Loc);
7077 else if (IDVal == ".seh_ec_context")
7078 parseDirectiveSEHECContext(Loc);
7079 else if (IDVal == ".seh_clear_unwound_to_call")
7080 parseDirectiveSEHClearUnwoundToCall(Loc);
7081 else if (IDVal == ".seh_pac_sign_lr")
7082 parseDirectiveSEHPACSignLR(Loc);
7083 else if (IDVal == ".seh_save_any_reg")
7084 parseDirectiveSEHSaveAnyReg(Loc, false, false);
7085 else if (IDVal == ".seh_save_any_reg_p")
7086 parseDirectiveSEHSaveAnyReg(Loc, true, false);
7087 else if (IDVal == ".seh_save_any_reg_x")
7088 parseDirectiveSEHSaveAnyReg(Loc, false, true);
7089 else if (IDVal == ".seh_save_any_reg_px")
7090 parseDirectiveSEHSaveAnyReg(Loc, true, true);
7091 else
7092 return true;
7093 } else if (IsELF) {
7094 if (IDVal == ".aeabi_subsection")
7095 parseDirectiveAeabiSubSectionHeader(Loc);
7096 else if (IDVal == ".aeabi_attribute")
7097 parseDirectiveAeabiAArch64Attr(Loc);
7098 else
7099 return true;
7100 } else
7101 return true;
7102 return false;
7103}
7104
7105static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo,
7106 SmallVector<StringRef, 4> &RequestedExtensions) {
7107 const bool NoCrypto = llvm::is_contained(RequestedExtensions, "nocrypto");
7108 const bool Crypto = llvm::is_contained(RequestedExtensions, "crypto");
7109
7110 if (!NoCrypto && Crypto) {
7111 // Map 'generic' (and others) to sha2 and aes, because
7112 // that was the traditional meaning of crypto.
7113 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
7114 ArchInfo == AArch64::ARMV8_3A) {
7115 RequestedExtensions.push_back("sha2");
7116 RequestedExtensions.push_back("aes");
7117 }
7118 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
7119 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
7120 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
7121 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
7122 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
7123 ArchInfo == AArch64::ARMV9_4A || ArchInfo == AArch64::ARMV8R) {
7124 RequestedExtensions.push_back("sm4");
7125 RequestedExtensions.push_back("sha3");
7126 RequestedExtensions.push_back("sha2");
7127 RequestedExtensions.push_back("aes");
7128 }
7129 } else if (NoCrypto) {
7130 // Map 'generic' (and others) to sha2 and aes, because
7131 // that was the traditional meaning of crypto.
7132 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
7133 ArchInfo == AArch64::ARMV8_3A) {
7134 RequestedExtensions.push_back("nosha2");
7135 RequestedExtensions.push_back("noaes");
7136 }
7137 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
7138 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
7139 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
7140 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
7141 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
7142 ArchInfo == AArch64::ARMV9_4A) {
7143 RequestedExtensions.push_back("nosm4");
7144 RequestedExtensions.push_back("nosha3");
7145 RequestedExtensions.push_back("nosha2");
7146 RequestedExtensions.push_back("noaes");
7147 }
7148 }
7149}
7150
7152 return SMLoc::getFromPointer(L.getPointer() + Offset);
7153}
7154
7155/// parseDirectiveArch
7156/// ::= .arch token
7157bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
7158 SMLoc CurLoc = getLoc();
7159
7160 StringRef Arch, ExtensionString;
7161 std::tie(Arch, ExtensionString) =
7162 getParser().parseStringToEndOfStatement().trim().split('+');
7163
7164 const AArch64::ArchInfo *ArchInfo = AArch64::parseArch(Arch);
7165 if (!ArchInfo)
7166 return Error(CurLoc, "unknown arch name");
7167
7168 if (parseToken(AsmToken::EndOfStatement))
7169 return true;
7170
7171 // Get the architecture and extension features.
7172 std::vector<StringRef> AArch64Features;
7173 AArch64Features.push_back(ArchInfo->ArchFeature);
7174 AArch64::getExtensionFeatures(ArchInfo->DefaultExts, AArch64Features);
7175
7176 MCSubtargetInfo &STI = copySTI();
7177 std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
7178 STI.setDefaultFeatures("generic", /*TuneCPU*/ "generic",
7179 join(ArchFeatures.begin(), ArchFeatures.end(), ","));
7180
7181 SmallVector<StringRef, 4> RequestedExtensions;
7182 if (!ExtensionString.empty())
7183 ExtensionString.split(RequestedExtensions, '+');
7184
7185 ExpandCryptoAEK(*ArchInfo, RequestedExtensions);
7186 CurLoc = incrementLoc(CurLoc, Arch.size());
7187
7188 for (auto Name : RequestedExtensions) {
7189 // Advance source location past '+'.
7190 CurLoc = incrementLoc(CurLoc, 1);
7191
7192 bool EnableFeature = !Name.consume_front_insensitive("no");
7193
7194 auto It = llvm::find_if(ExtensionMap, [&Name](const auto &Extension) {
7195 return Extension.Name == Name;
7196 });
7197
7198 if (It == std::end(ExtensionMap))
7199 return Error(CurLoc, "unsupported architectural extension: " + Name);
7200
7201 if (EnableFeature)
7202 STI.SetFeatureBitsTransitively(It->Features);
7203 else
7204 STI.ClearFeatureBitsTransitively(It->Features);
7205 CurLoc = incrementLoc(CurLoc, Name.size());
7206 }
7207 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
7208 setAvailableFeatures(Features);
7209 return false;
7210}
7211
7212/// parseDirectiveArchExtension
7213/// ::= .arch_extension [no]feature
7214bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
7215 SMLoc ExtLoc = getLoc();
7216
7217 StringRef Name = getParser().parseStringToEndOfStatement().trim();
7218
7219 if (parseEOL())
7220 return true;
7221
7222 bool EnableFeature = true;
7223 if (Name.starts_with_insensitive("no")) {
7224 EnableFeature = false;
7225 Name = Name.substr(2);
7226 }
7227
7228 auto It = llvm::find_if(ExtensionMap, [&Name](const auto &Extension) {
7229 return Extension.Name == Name;
7230 });
7231
7232 if (It == std::end(ExtensionMap))
7233 return Error(ExtLoc, "unsupported architectural extension: " + Name);
7234
7235 MCSubtargetInfo &STI = copySTI();
7236 if (EnableFeature)
7237 STI.SetFeatureBitsTransitively(It->Features);
7238 else
7239 STI.ClearFeatureBitsTransitively(It->Features);
7240 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
7241 setAvailableFeatures(Features);
7242 return false;
7243}
7244
7245/// parseDirectiveCPU
7246/// ::= .cpu id
7247bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
7248 SMLoc CurLoc = getLoc();
7249
7250 StringRef CPU, ExtensionString;
7251 std::tie(CPU, ExtensionString) =
7252 getParser().parseStringToEndOfStatement().trim().split('+');
7253
7254 if (parseToken(AsmToken::EndOfStatement))
7255 return true;
7256
7257 SmallVector<StringRef, 4> RequestedExtensions;
7258 if (!ExtensionString.empty())
7259 ExtensionString.split(RequestedExtensions, '+');
7260
7262 if (!CpuArch) {
7263 Error(CurLoc, "unknown CPU name");
7264 return false;
7265 }
7266 ExpandCryptoAEK(*CpuArch, RequestedExtensions);
7267
7268 MCSubtargetInfo &STI = copySTI();
7269 STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, "");
7270 CurLoc = incrementLoc(CurLoc, CPU.size());
7271
7272 for (auto Name : RequestedExtensions) {
7273 // Advance source location past '+'.
7274 CurLoc = incrementLoc(CurLoc, 1);
7275
7276 bool EnableFeature = !Name.consume_front_insensitive("no");
7277
7278 auto It = llvm::find_if(ExtensionMap, [&Name](const auto &Extension) {
7279 return Extension.Name == Name;
7280 });
7281
7282 if (It == std::end(ExtensionMap))
7283 return Error(CurLoc, "unsupported architectural extension: " + Name);
7284
7285 if (EnableFeature)
7286 STI.SetFeatureBitsTransitively(It->Features);
7287 else
7288 STI.ClearFeatureBitsTransitively(It->Features);
7289 CurLoc = incrementLoc(CurLoc, Name.size());
7290 }
7291 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
7292 setAvailableFeatures(Features);
7293 return false;
7294}
7295
7296/// parseDirectiveInst
7297/// ::= .inst opcode [, ...]
7298bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
7299 if (getLexer().is(AsmToken::EndOfStatement))
7300 return Error(Loc, "expected expression following '.inst' directive");
7301
7302 auto parseOp = [&]() -> bool {
7303 SMLoc L = getLoc();
7304 const MCExpr *Expr = nullptr;
7305 if (check(getParser().parseExpression(Expr), L, "expected expression"))
7306 return true;
7307 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
7308 if (check(!Value, L, "expected constant expression"))
7309 return true;
7310 getTargetStreamer().emitInst(Value->getValue());
7311 return false;
7312 };
7313
7314 return parseMany(parseOp);
7315}
7316
7317// parseDirectiveTLSDescCall:
7318// ::= .tlsdesccall symbol
7319bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
7321 if (check(getParser().parseIdentifier(Name), L, "expected symbol") ||
7322 parseToken(AsmToken::EndOfStatement))
7323 return true;
7324
7325 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
7326 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
7327 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
7328
7329 MCInst Inst;
7330 Inst.setOpcode(AArch64::TLSDESCCALL);
7332
7333 getParser().getStreamer().emitInstruction(Inst, getSTI());
7334 return false;
7335}
7336
7337/// ::= .loh <lohName | lohId> label1, ..., labelN
7338/// The number of arguments depends on the loh identifier.
7339bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
7341 if (getTok().isNot(AsmToken::Identifier)) {
7342 if (getTok().isNot(AsmToken::Integer))
7343 return TokError("expected an identifier or a number in directive");
7344 // We successfully get a numeric value for the identifier.
7345 // Check if it is valid.
7346 int64_t Id = getTok().getIntVal();
7347 if (Id <= -1U && !isValidMCLOHType(Id))
7348 return TokError("invalid numeric identifier in directive");
7349 Kind = (MCLOHType)Id;
7350 } else {
7351 StringRef Name = getTok().getIdentifier();
7352 // We successfully parse an identifier.
7353 // Check if it is a recognized one.
7354 int Id = MCLOHNameToId(Name);
7355
7356 if (Id == -1)
7357 return TokError("invalid identifier in directive");
7358 Kind = (MCLOHType)Id;
7359 }
7360 // Consume the identifier.
7361 Lex();
7362 // Get the number of arguments of this LOH.
7363 int NbArgs = MCLOHIdToNbArgs(Kind);
7364
7365 assert(NbArgs != -1 && "Invalid number of arguments");
7366
7368 for (int Idx = 0; Idx < NbArgs; ++Idx) {
7370 if (getParser().parseIdentifier(Name))
7371 return TokError("expected identifier in directive");
7372 Args.push_back(getContext().getOrCreateSymbol(Name));
7373
7374 if (Idx + 1 == NbArgs)
7375 break;
7376 if (parseComma())
7377 return true;
7378 }
7379 if (parseEOL())
7380 return true;
7381
7382 getStreamer().emitLOHDirective((MCLOHType)Kind, Args);
7383 return false;
7384}
7385
7386/// parseDirectiveLtorg
7387/// ::= .ltorg | .pool
7388bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
7389 if (parseEOL())
7390 return true;
7391 getTargetStreamer().emitCurrentConstantPool();
7392 return false;
7393}
7394
7395/// parseDirectiveReq
7396/// ::= name .req registername
7397bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7398 Lex(); // Eat the '.req' token.
7399 SMLoc SRegLoc = getLoc();
7400 RegKind RegisterKind = RegKind::Scalar;
7401 MCRegister RegNum;
7402 ParseStatus ParseRes = tryParseScalarRegister(RegNum);
7403
7404 if (!ParseRes.isSuccess()) {
7406 RegisterKind = RegKind::NeonVector;
7407 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
7408
7409 if (ParseRes.isFailure())
7410 return true;
7411
7412 if (ParseRes.isSuccess() && !Kind.empty())
7413 return Error(SRegLoc, "vector register without type specifier expected");
7414 }
7415
7416 if (!ParseRes.isSuccess()) {
7418 RegisterKind = RegKind::SVEDataVector;
7419 ParseRes =
7420 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
7421
7422 if (ParseRes.isFailure())
7423 return true;
7424
7425 if (ParseRes.isSuccess() && !Kind.empty())
7426 return Error(SRegLoc,
7427 "sve vector register without type specifier expected");
7428 }
7429
7430 if (!ParseRes.isSuccess()) {
7432 RegisterKind = RegKind::SVEPredicateVector;
7433 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
7434
7435 if (ParseRes.isFailure())
7436 return true;
7437
7438 if (ParseRes.isSuccess() && !Kind.empty())
7439 return Error(SRegLoc,
7440 "sve predicate register without type specifier expected");
7441 }
7442
7443 if (!ParseRes.isSuccess())
7444 return Error(SRegLoc, "register name or alias expected");
7445
7446 // Shouldn't be anything else.
7447 if (parseEOL())
7448 return true;
7449
7450 auto pair = std::make_pair(RegisterKind, (unsigned) RegNum);
7451 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
7452 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
7453
7454 return false;
7455}
7456
7457/// parseDirectiveUneq
7458/// ::= .unreq registername
7459bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
7460 if (getTok().isNot(AsmToken::Identifier))
7461 return TokError("unexpected input in .unreq directive.");
7462 RegisterReqs.erase(getTok().getIdentifier().lower());
7463 Lex(); // Eat the identifier.
7464 return parseToken(AsmToken::EndOfStatement);
7465}
7466
7467bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
7468 if (parseEOL())
7469 return true;
7470 getStreamer().emitCFINegateRAState();
7471 return false;
7472}
7473
7474bool AArch64AsmParser::parseDirectiveCFINegateRAStateWithPC() {
7475 if (parseEOL())
7476 return true;
7477 getStreamer().emitCFINegateRAStateWithPC();
7478 return false;
7479}
7480
7481/// parseDirectiveCFIBKeyFrame
7482/// ::= .cfi_b_key
7483bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
7484 if (parseEOL())
7485 return true;
7486 getStreamer().emitCFIBKeyFrame();
7487 return false;
7488}
7489
7490/// parseDirectiveCFIMTETaggedFrame
7491/// ::= .cfi_mte_tagged_frame
7492bool AArch64AsmParser::parseDirectiveCFIMTETaggedFrame() {
7493 if (parseEOL())
7494 return true;
7495 getStreamer().emitCFIMTETaggedFrame();
7496 return false;
7497}
7498
7499/// parseDirectiveVariantPCS
7500/// ::= .variant_pcs symbolname
7501bool AArch64AsmParser::parseDirectiveVariantPCS(SMLoc L) {
7503 if (getParser().parseIdentifier(Name))
7504 return TokError("expected symbol name");
7505 if (parseEOL())
7506 return true;
7507 getTargetStreamer().emitDirectiveVariantPCS(
7508 getContext().getOrCreateSymbol(Name));
7509 return false;
7510}
7511
7512/// parseDirectiveSEHAllocStack
7513/// ::= .seh_stackalloc
7514bool AArch64AsmParser::parseDirectiveSEHAllocStack(SMLoc L) {
7515 int64_t Size;
7516 if (parseImmExpr(Size))
7517 return true;
7518 getTargetStreamer().emitARM64WinCFIAllocStack(Size);
7519 return false;
7520}
7521
7522/// parseDirectiveSEHPrologEnd
7523/// ::= .seh_endprologue
7524bool AArch64AsmParser::parseDirectiveSEHPrologEnd(SMLoc L) {
7525 getTargetStreamer().emitARM64WinCFIPrologEnd();
7526 return false;
7527}
7528
7529/// parseDirectiveSEHSaveR19R20X
7530/// ::= .seh_save_r19r20_x
7531bool AArch64AsmParser::parseDirectiveSEHSaveR19R20X(SMLoc L) {
7532 int64_t Offset;
7533 if (parseImmExpr(Offset))
7534 return true;
7535 getTargetStreamer().emitARM64WinCFISaveR19R20X(Offset);
7536 return false;
7537}
7538
7539/// parseDirectiveSEHSaveFPLR
7540/// ::= .seh_save_fplr
7541bool AArch64AsmParser::parseDirectiveSEHSaveFPLR(SMLoc L) {
7542 int64_t Offset;
7543 if (parseImmExpr(Offset))
7544 return true;
7545 getTargetStreamer().emitARM64WinCFISaveFPLR(Offset);
7546 return false;
7547}
7548
7549/// parseDirectiveSEHSaveFPLRX
7550/// ::= .seh_save_fplr_x
7551bool AArch64AsmParser::parseDirectiveSEHSaveFPLRX(SMLoc L) {
7552 int64_t Offset;
7553 if (parseImmExpr(Offset))
7554 return true;
7555 getTargetStreamer().emitARM64WinCFISaveFPLRX(Offset);
7556 return false;
7557}
7558
7559/// parseDirectiveSEHSaveReg
7560/// ::= .seh_save_reg
7561bool AArch64AsmParser::parseDirectiveSEHSaveReg(SMLoc L) {
7562 unsigned Reg;
7563 int64_t Offset;
7564 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7565 parseComma() || parseImmExpr(Offset))
7566 return true;
7567 getTargetStreamer().emitARM64WinCFISaveReg(Reg, Offset);
7568 return false;
7569}
7570
7571/// parseDirectiveSEHSaveRegX
7572/// ::= .seh_save_reg_x
7573bool AArch64AsmParser::parseDirectiveSEHSaveRegX(SMLoc L) {
7574 unsigned Reg;
7575 int64_t Offset;
7576 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7577 parseComma() || parseImmExpr(Offset))
7578 return true;
7579 getTargetStreamer().emitARM64WinCFISaveRegX(Reg, Offset);
7580 return false;
7581}
7582
7583/// parseDirectiveSEHSaveRegP
7584/// ::= .seh_save_regp
7585bool AArch64AsmParser::parseDirectiveSEHSaveRegP(SMLoc L) {
7586 unsigned Reg;
7587 int64_t Offset;
7588 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7589 parseComma() || parseImmExpr(Offset))
7590 return true;
7591 getTargetStreamer().emitARM64WinCFISaveRegP(Reg, Offset);
7592 return false;
7593}
7594
7595/// parseDirectiveSEHSaveRegPX
7596/// ::= .seh_save_regp_x
7597bool AArch64AsmParser::parseDirectiveSEHSaveRegPX(SMLoc L) {
7598 unsigned Reg;
7599 int64_t Offset;
7600 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7601 parseComma() || parseImmExpr(Offset))
7602 return true;
7603 getTargetStreamer().emitARM64WinCFISaveRegPX(Reg, Offset);
7604 return false;
7605}
7606
7607/// parseDirectiveSEHSaveLRPair
7608/// ::= .seh_save_lrpair
7609bool AArch64AsmParser::parseDirectiveSEHSaveLRPair(SMLoc L) {
7610 unsigned Reg;
7611 int64_t Offset;
7612 L = getLoc();
7613 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7614 parseComma() || parseImmExpr(Offset))
7615 return true;
7616 if (check(((Reg - 19) % 2 != 0), L,
7617 "expected register with even offset from x19"))
7618 return true;
7619 getTargetStreamer().emitARM64WinCFISaveLRPair(Reg, Offset);
7620 return false;
7621}
7622
7623/// parseDirectiveSEHSaveFReg
7624/// ::= .seh_save_freg
7625bool AArch64AsmParser::parseDirectiveSEHSaveFReg(SMLoc L) {
7626 unsigned Reg;
7627 int64_t Offset;
7628 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7629 parseComma() || parseImmExpr(Offset))
7630 return true;
7631 getTargetStreamer().emitARM64WinCFISaveFReg(Reg, Offset);
7632 return false;
7633}
7634
7635/// parseDirectiveSEHSaveFRegX
7636/// ::= .seh_save_freg_x
7637bool AArch64AsmParser::parseDirectiveSEHSaveFRegX(SMLoc L) {
7638 unsigned Reg;
7639 int64_t Offset;
7640 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7641 parseComma() || parseImmExpr(Offset))
7642 return true;
7643 getTargetStreamer().emitARM64WinCFISaveFRegX(Reg, Offset);
7644 return false;
7645}
7646
7647/// parseDirectiveSEHSaveFRegP
7648/// ::= .seh_save_fregp
7649bool AArch64AsmParser::parseDirectiveSEHSaveFRegP(SMLoc L) {
7650 unsigned Reg;
7651 int64_t Offset;
7652 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7653 parseComma() || parseImmExpr(Offset))
7654 return true;
7655 getTargetStreamer().emitARM64WinCFISaveFRegP(Reg, Offset);
7656 return false;
7657}
7658
7659/// parseDirectiveSEHSaveFRegPX
7660/// ::= .seh_save_fregp_x
7661bool AArch64AsmParser::parseDirectiveSEHSaveFRegPX(SMLoc L) {
7662 unsigned Reg;
7663 int64_t Offset;
7664 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7665 parseComma() || parseImmExpr(Offset))
7666 return true;
7667 getTargetStreamer().emitARM64WinCFISaveFRegPX(Reg, Offset);
7668 return false;
7669}
7670
7671/// parseDirectiveSEHSetFP
7672/// ::= .seh_set_fp
7673bool AArch64AsmParser::parseDirectiveSEHSetFP(SMLoc L) {
7674 getTargetStreamer().emitARM64WinCFISetFP();
7675 return false;
7676}
7677
7678/// parseDirectiveSEHAddFP
7679/// ::= .seh_add_fp
7680bool AArch64AsmParser::parseDirectiveSEHAddFP(SMLoc L) {
7681 int64_t Size;
7682 if (parseImmExpr(Size))
7683 return true;
7684 getTargetStreamer().emitARM64WinCFIAddFP(Size);
7685 return false;
7686}
7687
7688/// parseDirectiveSEHNop
7689/// ::= .seh_nop
7690bool AArch64AsmParser::parseDirectiveSEHNop(SMLoc L) {
7691 getTargetStreamer().emitARM64WinCFINop();
7692 return false;
7693}
7694
7695/// parseDirectiveSEHSaveNext
7696/// ::= .seh_save_next
7697bool AArch64AsmParser::parseDirectiveSEHSaveNext(SMLoc L) {
7698 getTargetStreamer().emitARM64WinCFISaveNext();
7699 return false;
7700}
7701
7702/// parseDirectiveSEHEpilogStart
7703/// ::= .seh_startepilogue
7704bool AArch64AsmParser::parseDirectiveSEHEpilogStart(SMLoc L) {
7705 getTargetStreamer().emitARM64WinCFIEpilogStart();
7706 return false;
7707}
7708
7709/// parseDirectiveSEHEpilogEnd
7710/// ::= .seh_endepilogue
7711bool AArch64AsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) {
7712 getTargetStreamer().emitARM64WinCFIEpilogEnd();
7713 return false;
7714}
7715
7716/// parseDirectiveSEHTrapFrame
7717/// ::= .seh_trap_frame
7718bool AArch64AsmParser::parseDirectiveSEHTrapFrame(SMLoc L) {
7719 getTargetStreamer().emitARM64WinCFITrapFrame();
7720 return false;
7721}
7722
7723/// parseDirectiveSEHMachineFrame
7724/// ::= .seh_pushframe
7725bool AArch64AsmParser::parseDirectiveSEHMachineFrame(SMLoc L) {
7726 getTargetStreamer().emitARM64WinCFIMachineFrame();
7727 return false;
7728}
7729
7730/// parseDirectiveSEHContext
7731/// ::= .seh_context
7732bool AArch64AsmParser::parseDirectiveSEHContext(SMLoc L) {
7733 getTargetStreamer().emitARM64WinCFIContext();
7734 return false;
7735}
7736
7737/// parseDirectiveSEHECContext
7738/// ::= .seh_ec_context
7739bool AArch64AsmParser::parseDirectiveSEHECContext(SMLoc L) {
7740 getTargetStreamer().emitARM64WinCFIECContext();
7741 return false;
7742}
7743
7744/// parseDirectiveSEHClearUnwoundToCall
7745/// ::= .seh_clear_unwound_to_call
7746bool AArch64AsmParser::parseDirectiveSEHClearUnwoundToCall(SMLoc L) {
7747 getTargetStreamer().emitARM64WinCFIClearUnwoundToCall();
7748 return false;
7749}
7750
7751/// parseDirectiveSEHPACSignLR
7752/// ::= .seh_pac_sign_lr
7753bool AArch64AsmParser::parseDirectiveSEHPACSignLR(SMLoc L) {
7754 getTargetStreamer().emitARM64WinCFIPACSignLR();
7755 return false;
7756}
7757
7758/// parseDirectiveSEHSaveAnyReg
7759/// ::= .seh_save_any_reg
7760/// ::= .seh_save_any_reg_p
7761/// ::= .seh_save_any_reg_x
7762/// ::= .seh_save_any_reg_px
7763bool AArch64AsmParser::parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired,
7764 bool Writeback) {
7766 SMLoc Start, End;
7767 int64_t Offset;
7768 if (check(parseRegister(Reg, Start, End), getLoc(), "expected register") ||
7769 parseComma() || parseImmExpr(Offset))
7770 return true;
7771
7772 if (Reg == AArch64::FP || Reg == AArch64::LR ||
7773 (Reg >= AArch64::X0 && Reg <= AArch64::X28)) {
7774 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
7775 return Error(L, "invalid save_any_reg offset");
7776 unsigned EncodedReg;
7777 if (Reg == AArch64::FP)
7778 EncodedReg = 29;
7779 else if (Reg == AArch64::LR)
7780 EncodedReg = 30;
7781 else
7782 EncodedReg = Reg - AArch64::X0;
7783 if (Paired) {
7784 if (Reg == AArch64::LR)
7785 return Error(Start, "lr cannot be paired with another register");
7786 if (Writeback)
7787 getTargetStreamer().emitARM64WinCFISaveAnyRegIPX(EncodedReg, Offset);
7788 else
7789 getTargetStreamer().emitARM64WinCFISaveAnyRegIP(EncodedReg, Offset);
7790 } else {
7791 if (Writeback)
7792 getTargetStreamer().emitARM64WinCFISaveAnyRegIX(EncodedReg, Offset);
7793 else
7794 getTargetStreamer().emitARM64WinCFISaveAnyRegI(EncodedReg, Offset);
7795 }
7796 } else if (Reg >= AArch64::D0 && Reg <= AArch64::D31) {
7797 unsigned EncodedReg = Reg - AArch64::D0;
7798 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
7799 return Error(L, "invalid save_any_reg offset");
7800 if (Paired) {
7801 if (Reg == AArch64::D31)
7802 return Error(Start, "d31 cannot be paired with another register");
7803 if (Writeback)
7804 getTargetStreamer().emitARM64WinCFISaveAnyRegDPX(EncodedReg, Offset);
7805 else
7806 getTargetStreamer().emitARM64WinCFISaveAnyRegDP(EncodedReg, Offset);
7807 } else {
7808 if (Writeback)
7809 getTargetStreamer().emitARM64WinCFISaveAnyRegDX(EncodedReg, Offset);
7810 else
7811 getTargetStreamer().emitARM64WinCFISaveAnyRegD(EncodedReg, Offset);
7812 }
7813 } else if (Reg >= AArch64::Q0 && Reg <= AArch64::Q31) {
7814 unsigned EncodedReg = Reg - AArch64::Q0;
7815 if (Offset < 0 || Offset % 16)
7816 return Error(L, "invalid save_any_reg offset");
7817 if (Paired) {
7818 if (Reg == AArch64::Q31)
7819 return Error(Start, "q31 cannot be paired with another register");
7820 if (Writeback)
7821 getTargetStreamer().emitARM64WinCFISaveAnyRegQPX(EncodedReg, Offset);
7822 else
7823 getTargetStreamer().emitARM64WinCFISaveAnyRegQP(EncodedReg, Offset);
7824 } else {
7825 if (Writeback)
7826 getTargetStreamer().emitARM64WinCFISaveAnyRegQX(EncodedReg, Offset);
7827 else
7828 getTargetStreamer().emitARM64WinCFISaveAnyRegQ(EncodedReg, Offset);
7829 }
7830 } else {
7831 return Error(Start, "save_any_reg register must be x, q or d register");
7832 }
7833 return false;
7834}
7835
7836bool AArch64AsmParser::parseDirectiveAeabiSubSectionHeader(SMLoc L) {
7837 // Expecting 3 AsmToken::Identifier after '.aeabi_subsection', a name and 2
7838 // parameters, e.g.: .aeabi_subsection (1)aeabi_feature_and_bits, (2)optional,
7839 // (3)uleb128 separated by 2 commas.
7840 MCAsmParser &Parser = getParser();
7841
7842 // Consume the name (subsection name)
7843 StringRef SubsectionName;
7844 AArch64BuildAttributes::VendorID SubsectionNameID;
7845 if (Parser.getTok().is(AsmToken::Identifier)) {
7846 SubsectionName = Parser.getTok().getIdentifier();
7847 SubsectionNameID = AArch64BuildAttributes::getVendorID(SubsectionName);
7848 } else {
7849 Error(Parser.getTok().getLoc(), "subsection name not found");
7850 return true;
7851 }
7852 Parser.Lex();
7853 // consume a comma
7854 // parseComma() return *false* on success, and call Lex(), no need to call
7855 // Lex() again.
7856 if (Parser.parseComma()) {
7857 return true;
7858 }
7859
7860 std::unique_ptr<MCELFStreamer::AttributeSubSection> SubsectionExists =
7861 getTargetStreamer().getAtributesSubsectionByName(SubsectionName);
7862
7863 // Consume the first parameter (optionality parameter)
7865 // options: optional/required
7866 if (Parser.getTok().is(AsmToken::Identifier)) {
7867 StringRef Optionality = Parser.getTok().getIdentifier();
7868 IsOptional = AArch64BuildAttributes::getOptionalID(Optionality);
7870 Error(Parser.getTok().getLoc(),
7872 Optionality);
7873 return true;
7874 }
7875 if (SubsectionExists) {
7876 if (IsOptional != SubsectionExists->IsOptional) {
7877 Error(Parser.getTok().getLoc(),
7878 "optionality mismatch! subsection '" + SubsectionName +
7879 "' already exists with optionality defined as '" +
7881 SubsectionExists->IsOptional) +
7882 "' and not '" +
7883 AArch64BuildAttributes::getOptionalStr(IsOptional) + "'");
7884 return true;
7885 }
7886 }
7887 } else {
7888 Error(Parser.getTok().getLoc(),
7889 "optionality parameter not found, expected required|optional");
7890 return true;
7891 }
7892 // Check for possible IsOptional unaccepted values for known subsections
7893 if (AArch64BuildAttributes::AEABI_FEATURE_AND_BITS == SubsectionNameID) {
7894 if (AArch64BuildAttributes::REQUIRED == IsOptional) {
7895 Error(Parser.getTok().getLoc(),
7896 "aeabi_feature_and_bits must be marked as optional");
7897 return true;
7898 }
7899 }
7900 if (AArch64BuildAttributes::AEABI_PAUTHABI == SubsectionNameID) {
7901 if (AArch64BuildAttributes::OPTIONAL == IsOptional) {
7902 Error(Parser.getTok().getLoc(),
7903 "aeabi_pauthabi must be marked as required");
7904 return true;
7905 }
7906 }
7907 Parser.Lex();
7908 // consume a comma
7909 if (Parser.parseComma()) {
7910 return true;
7911 }
7912
7913 // Consume the second parameter (type parameter)
7915 if (Parser.getTok().is(AsmToken::Identifier)) {
7916 StringRef Name = Parser.getTok().getIdentifier();
7919 Error(Parser.getTok().getLoc(),
7921 Name);
7922 return true;
7923 }
7924 if (SubsectionExists) {
7925 if (Type != SubsectionExists->ParameterType) {
7926 Error(Parser.getTok().getLoc(),
7927 "type mismatch! subsection '" + SubsectionName +
7928 "' already exists with type defined as '" +
7930 SubsectionExists->ParameterType) +
7931 "' and not '" + AArch64BuildAttributes::getTypeStr(Type) +
7932 "'");
7933 return true;
7934 }
7935 }
7936 } else {
7937 Error(Parser.getTok().getLoc(),
7938 "type parameter not found, expected uleb128|ntbs");
7939 return true;
7940 }
7941 // Check for possible unaccepted 'type' values for known subsections
7942 if (AArch64BuildAttributes::AEABI_FEATURE_AND_BITS == SubsectionNameID ||
7943 AArch64BuildAttributes::AEABI_PAUTHABI == SubsectionNameID) {
7945 Error(Parser.getTok().getLoc(),
7946 SubsectionName + " must be marked as ULEB128");
7947 return true;
7948 }
7949 }
7950 Parser.Lex();
7951 // Parsing finished, check for trailing tokens.
7953 Error(Parser.getTok().getLoc(), "unexpected token for AArch64 build "
7954 "attributes subsection header directive");
7955 return true;
7956 }
7957
7958 getTargetStreamer().emitAtributesSubsection(SubsectionName, IsOptional, Type);
7959
7960 return false;
7961}
7962
7963bool AArch64AsmParser::parseDirectiveAeabiAArch64Attr(SMLoc L) {
7964 // Expecting 2 Tokens: after '.aeabi_attribute', e.g.:
7965 // .aeabi_attribute (1)Tag_Feature_BTI, (2)[uleb128|ntbs]
7966 // separated by a comma.
7967 MCAsmParser &Parser = getParser();
7968
7969 std::unique_ptr<MCELFStreamer::AttributeSubSection> ActiveSubsection =
7970 getTargetStreamer().getActiveAtributesSubsection();
7971 if (nullptr == ActiveSubsection) {
7972 Error(Parser.getTok().getLoc(),
7973 "no active subsection, build attribute can not be added");
7974 return true;
7975 }
7976 StringRef ActiveSubsectionName = ActiveSubsection->VendorName;
7977 unsigned ActiveSubsectionType = ActiveSubsection->ParameterType;
7978
7979 unsigned ActiveSubsectionID = AArch64BuildAttributes::VENDOR_UNKNOWN;
7981 AArch64BuildAttributes::AEABI_PAUTHABI) == ActiveSubsectionName)
7982 ActiveSubsectionID = AArch64BuildAttributes::AEABI_PAUTHABI;
7985 ActiveSubsectionName)
7987
7988 StringRef TagStr = "";
7989 unsigned Tag;
7990 if (Parser.getTok().is(AsmToken::Identifier)) {
7991 TagStr = Parser.getTok().getIdentifier();
7992 switch (ActiveSubsectionID) {
7993 default:
7994 assert(0 && "Subsection name error");
7995 break;
7997 // Private subsection, accept any tag.
7998 break;
8002 Error(Parser.getTok().getLoc(), "unknown AArch64 build attribute '" +
8003 TagStr + "' for subsection '" +
8004 ActiveSubsectionName + "'");
8005 return true;
8006 }
8007 break;
8011 Error(Parser.getTok().getLoc(), "unknown AArch64 build attribute '" +
8012 TagStr + "' for subsection '" +
8013 ActiveSubsectionName + "'");
8014 return true;
8015 }
8016 break;
8017 }
8018 } else if (Parser.getTok().is(AsmToken::Integer)) {
8019 Tag = getTok().getIntVal();
8020 } else {
8021 Error(Parser.getTok().getLoc(), "AArch64 build attributes tag not found");
8022 return true;
8023 }
8024 Parser.Lex();
8025 // consume a comma
8026 // parseComma() return *false* on success, and call Lex(), no need to call
8027 // Lex() again.
8028 if (Parser.parseComma()) {
8029 return true;
8030 }
8031
8032 // Consume the second parameter (attribute value)
8033 unsigned ValueInt = unsigned(-1);
8034 std::string ValueStr = "";
8035 if (Parser.getTok().is(AsmToken::Integer)) {
8036 if (AArch64BuildAttributes::NTBS == ActiveSubsectionType) {
8037 Error(
8038 Parser.getTok().getLoc(),
8039 "active subsection type is NTBS (string), found ULEB128 (unsigned)");
8040 return true;
8041 }
8042 ValueInt = getTok().getIntVal();
8043 } else if (Parser.getTok().is(AsmToken::Identifier)) {
8044 if (AArch64BuildAttributes::ULEB128 == ActiveSubsectionType) {
8045 Error(
8046 Parser.getTok().getLoc(),
8047 "active subsection type is ULEB128 (unsigned), found NTBS (string)");
8048 return true;
8049 }
8050 ValueStr = Parser.getTok().getIdentifier();
8051 } else if (Parser.getTok().is(AsmToken::String)) {
8052 if (AArch64BuildAttributes::ULEB128 == ActiveSubsectionType) {
8053 Error(
8054 Parser.getTok().getLoc(),
8055 "active subsection type is ULEB128 (unsigned), found NTBS (string)");
8056 return true;
8057 }
8058 ValueStr = Parser.getTok().getString();
8059 } else {
8060 Error(Parser.getTok().getLoc(), "AArch64 build attributes value not found");
8061 return true;
8062 }
8063 // Check for possible unaccepted values for known tags (AEABI_PAUTHABI,
8064 // AEABI_FEATURE_AND_BITS)
8065 if (!(ActiveSubsectionID == AArch64BuildAttributes::VENDOR_UNKNOWN) &&
8066 TagStr != "") { // TagStr was a recognized string
8067 if (0 != ValueInt && 1 != ValueInt) {
8068 Error(Parser.getTok().getLoc(),
8069 "unknown AArch64 build attributes Value for Tag '" + TagStr +
8070 "' options are 0|1");
8071 return true;
8072 }
8073 }
8074 Parser.Lex();
8075 // Parsing finished, check for trailing tokens.
8077 Error(Parser.getTok().getLoc(),
8078 "unexpected token for AArch64 build attributes tag and value "
8079 "attribute directive");
8080 return true;
8081 }
8082
8083 if (unsigned(-1) != ValueInt) {
8084 getTargetStreamer().emitAttribute(ActiveSubsectionName, Tag, ValueInt, "",
8085 false);
8086 }
8087
8088 if ("" != ValueStr) {
8089 getTargetStreamer().emitAttribute(ActiveSubsectionName, Tag, unsigned(-1),
8090 ValueStr, false);
8091 }
8092 return false;
8093}
8094
8095bool AArch64AsmParser::parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) {
8096 // Try @AUTH expressions: they're more complex than the usual symbol variants.
8097 if (!parseAuthExpr(Res, EndLoc))
8098 return false;
8099 return getParser().parsePrimaryExpr(Res, EndLoc, nullptr);
8100}
8101
8102/// parseAuthExpr
8103/// ::= _sym@AUTH(ib,123[,addr])
8104/// ::= (_sym + 5)@AUTH(ib,123[,addr])
8105/// ::= (_sym - 5)@AUTH(ib,123[,addr])
8106bool AArch64AsmParser::parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc) {
8107 MCAsmParser &Parser = getParser();
8108 MCContext &Ctx = getContext();
8109
8110 AsmToken Tok = Parser.getTok();
8111
8112 // Look for '_sym@AUTH' ...
8113 if (Tok.is(AsmToken::Identifier) && Tok.getIdentifier().ends_with("@AUTH")) {
8114 StringRef SymName = Tok.getIdentifier().drop_back(strlen("@AUTH"));
8115 if (SymName.contains('@'))
8116 return TokError(
8117 "combination of @AUTH with other modifiers not supported");
8118 Res = MCSymbolRefExpr::create(Ctx.getOrCreateSymbol(SymName), Ctx);
8119
8120 Parser.Lex(); // Eat the identifier.
8121 } else {
8122 // ... or look for a more complex symbol reference, such as ...
8124
8125 // ... '"_long sym"@AUTH' ...
8126 if (Tok.is(AsmToken::String))
8127 Tokens.resize(2);
8128 // ... or '(_sym + 5)@AUTH'.
8129 else if (Tok.is(AsmToken::LParen))
8130 Tokens.resize(6);
8131 else
8132 return true;
8133
8134 if (Parser.getLexer().peekTokens(Tokens) != Tokens.size())
8135 return true;
8136
8137 // In either case, the expression ends with '@' 'AUTH'.
8138 if (Tokens[Tokens.size() - 2].isNot(AsmToken::At) ||
8139 Tokens[Tokens.size() - 1].isNot(AsmToken::Identifier) ||
8140 Tokens[Tokens.size() - 1].getIdentifier() != "AUTH")
8141 return true;
8142
8143 if (Tok.is(AsmToken::String)) {
8144 StringRef SymName;
8145 if (Parser.parseIdentifier(SymName))
8146 return true;
8147 Res = MCSymbolRefExpr::create(Ctx.getOrCreateSymbol(SymName), Ctx);
8148 } else {
8149 if (Parser.parsePrimaryExpr(Res, EndLoc, nullptr))
8150 return true;
8151 }
8152
8153 Parser.Lex(); // '@'
8154 Parser.Lex(); // 'AUTH'
8155 }
8156
8157 // At this point, we encountered "<id>@AUTH". There is no fallback anymore.
8158 if (parseToken(AsmToken::LParen, "expected '('"))
8159 return true;
8160
8161 if (Parser.getTok().isNot(AsmToken::Identifier))
8162 return TokError("expected key name");
8163
8164 StringRef KeyStr = Parser.getTok().getIdentifier();
8165 auto KeyIDOrNone = AArch64StringToPACKeyID(KeyStr);
8166 if (!KeyIDOrNone)
8167 return TokError("invalid key '" + KeyStr + "'");
8168 Parser.Lex();
8169
8170 if (parseToken(AsmToken::Comma, "expected ','"))
8171 return true;
8172
8173 if (Parser.getTok().isNot(AsmToken::Integer))
8174 return TokError("expected integer discriminator");
8175 int64_t Discriminator = Parser.getTok().getIntVal();
8176
8177 if (!isUInt<16>(Discriminator))
8178 return TokError("integer discriminator " + Twine(Discriminator) +
8179 " out of range [0, 0xFFFF]");
8180 Parser.Lex();
8181
8182 bool UseAddressDiversity = false;
8183 if (Parser.getTok().is(AsmToken::Comma)) {
8184 Parser.Lex();
8185 if (Parser.getTok().isNot(AsmToken::Identifier) ||
8186 Parser.getTok().getIdentifier() != "addr")
8187 return TokError("expected 'addr'");
8188 UseAddressDiversity = true;
8189 Parser.Lex();
8190 }
8191
8192 EndLoc = Parser.getTok().getEndLoc();
8193 if (parseToken(AsmToken::RParen, "expected ')'"))
8194 return true;
8195
8196 Res = AArch64AuthMCExpr::create(Res, Discriminator, *KeyIDOrNone,
8197 UseAddressDiversity, Ctx);
8198 return false;
8199}
8200
8201bool
8202AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
8203 AArch64MCExpr::VariantKind &ELFRefKind,
8204 MCSymbolRefExpr::VariantKind &DarwinRefKind,
8205 int64_t &Addend) {
8206 ELFRefKind = AArch64MCExpr::VK_INVALID;
8207 DarwinRefKind = MCSymbolRefExpr::VK_None;
8208 Addend = 0;
8209
8210 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
8211 ELFRefKind = AE->getKind();
8212 Expr = AE->getSubExpr();
8213 }
8214
8215 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
8216 if (SE) {
8217 // It's a simple symbol reference with no addend.
8218 DarwinRefKind = SE->getKind();
8219 return true;
8220 }
8221
8222 // Check that it looks like a symbol + an addend
8223 MCValue Res;
8224 bool Relocatable = Expr->evaluateAsRelocatable(Res, nullptr, nullptr);
8225 if (!Relocatable || Res.getSymB())
8226 return false;
8227
8228 // Treat expressions with an ELFRefKind (like ":abs_g1:3", or
8229 // ":abs_g1:x" where x is constant) as symbolic even if there is no symbol.
8230 if (!Res.getSymA() && ELFRefKind == AArch64MCExpr::VK_INVALID)
8231 return false;
8232
8233 if (Res.getSymA())
8234 DarwinRefKind = Res.getSymA()->getKind();
8235 Addend = Res.getConstant();
8236
8237 // It's some symbol reference + a constant addend, but really
8238 // shouldn't use both Darwin and ELF syntax.
8239 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
8240 DarwinRefKind == MCSymbolRefExpr::VK_None;
8241}
8242
8243/// Force static initialization.
8250}
8251
8252#define GET_REGISTER_MATCHER
8253#define GET_SUBTARGET_FEATURE_NAME
8254#define GET_MATCHER_IMPLEMENTATION
8255#define GET_MNEMONIC_SPELL_CHECKER
8256#include "AArch64GenAsmMatcher.inc"
8257
8258// Define this matcher function after the auto-generated include so we
8259// have the match class enum definitions.
8260unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
8261 unsigned Kind) {
8262 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
8263
8264 auto MatchesOpImmediate = [&](int64_t ExpectedVal) -> MatchResultTy {
8265 if (!Op.isImm())
8266 return Match_InvalidOperand;
8267 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
8268 if (!CE)
8269 return Match_InvalidOperand;
8270 if (CE->getValue() == ExpectedVal)
8271 return Match_Success;
8272 return Match_InvalidOperand;
8273 };
8274
8275 switch (Kind) {
8276 default:
8277 return Match_InvalidOperand;
8278 case MCK_MPR:
8279 // If the Kind is a token for the MPR register class which has the "za"
8280 // register (SME accumulator array), check if the asm is a literal "za"
8281 // token. This is for the "smstart za" alias that defines the register
8282 // as a literal token.
8283 if (Op.isTokenEqual("za"))
8284 return Match_Success;
8285 return Match_InvalidOperand;
8286
8287 // If the kind is a token for a literal immediate, check if our asm operand
8288 // matches. This is for InstAliases which have a fixed-value immediate in
8289 // the asm string, such as hints which are parsed into a specific
8290 // instruction definition.
8291#define MATCH_HASH(N) \
8292 case MCK__HASH_##N: \
8293 return MatchesOpImmediate(N);
8294 MATCH_HASH(0)
8295 MATCH_HASH(1)
8296 MATCH_HASH(2)
8297 MATCH_HASH(3)
8298 MATCH_HASH(4)
8299 MATCH_HASH(6)
8300 MATCH_HASH(7)
8301 MATCH_HASH(8)
8302 MATCH_HASH(10)
8303 MATCH_HASH(12)
8304 MATCH_HASH(14)
8305 MATCH_HASH(16)
8306 MATCH_HASH(24)
8307 MATCH_HASH(25)
8308 MATCH_HASH(26)
8309 MATCH_HASH(27)
8310 MATCH_HASH(28)
8311 MATCH_HASH(29)
8312 MATCH_HASH(30)
8313 MATCH_HASH(31)
8314 MATCH_HASH(32)
8315 MATCH_HASH(40)
8316 MATCH_HASH(48)
8317 MATCH_HASH(64)
8318#undef MATCH_HASH
8319#define MATCH_HASH_MINUS(N) \
8320 case MCK__HASH__MINUS_##N: \
8321 return MatchesOpImmediate(-N);
8325#undef MATCH_HASH_MINUS
8326 }
8327}
8328
8329ParseStatus AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
8330
8331 SMLoc S = getLoc();
8332
8333 if (getTok().isNot(AsmToken::Identifier))
8334 return Error(S, "expected register");
8335
8336 MCRegister FirstReg;
8337 ParseStatus Res = tryParseScalarRegister(FirstReg);
8338 if (!Res.isSuccess())
8339 return Error(S, "expected first even register of a consecutive same-size "
8340 "even/odd register pair");
8341
8342 const MCRegisterClass &WRegClass =
8343 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
8344 const MCRegisterClass &XRegClass =
8345 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
8346
8347 bool isXReg = XRegClass.contains(FirstReg),
8348 isWReg = WRegClass.contains(FirstReg);
8349 if (!isXReg && !isWReg)
8350 return Error(S, "expected first even register of a consecutive same-size "
8351 "even/odd register pair");
8352
8353 const MCRegisterInfo *RI = getContext().getRegisterInfo();
8354 unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
8355
8356 if (FirstEncoding & 0x1)
8357 return Error(S, "expected first even register of a consecutive same-size "
8358 "even/odd register pair");
8359
8360 if (getTok().isNot(AsmToken::Comma))
8361 return Error(getLoc(), "expected comma");
8362 // Eat the comma
8363 Lex();
8364
8365 SMLoc E = getLoc();
8366 MCRegister SecondReg;
8367 Res = tryParseScalarRegister(SecondReg);
8368 if (!Res.isSuccess())
8369 return Error(E, "expected second odd register of a consecutive same-size "
8370 "even/odd register pair");
8371
8372 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
8373 (isXReg && !XRegClass.contains(SecondReg)) ||
8374 (isWReg && !WRegClass.contains(SecondReg)))
8375 return Error(E, "expected second odd register of a consecutive same-size "
8376 "even/odd register pair");
8377
8378 MCRegister Pair;
8379 if (isXReg) {
8380 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
8381 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
8382 } else {
8383 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
8384 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
8385 }
8386
8387 Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
8388 getLoc(), getContext()));
8389
8390 return ParseStatus::Success;
8391}
8392
8393template <bool ParseShiftExtend, bool ParseSuffix>
8394ParseStatus AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
8395 const SMLoc S = getLoc();
8396 // Check for a SVE vector register specifier first.
8397 MCRegister RegNum;
8399
8400 ParseStatus Res =
8401 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
8402
8403 if (!Res.isSuccess())
8404 return Res;
8405
8406 if (ParseSuffix && Kind.empty())
8407 return ParseStatus::NoMatch;
8408
8409 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEDataVector);
8410 if (!KindRes)
8411 return ParseStatus::NoMatch;
8412
8413 unsigned ElementWidth = KindRes->second;
8414
8415 // No shift/extend is the default.
8416 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
8417 Operands.push_back(AArch64Operand::CreateVectorReg(
8418 RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext()));
8419
8420 ParseStatus Res = tryParseVectorIndex(Operands);
8421 if (Res.isFailure())
8422 return ParseStatus::Failure;
8423 return ParseStatus::Success;
8424 }
8425
8426 // Eat the comma
8427 Lex();
8428
8429 // Match the shift
8431 Res = tryParseOptionalShiftExtend(ExtOpnd);
8432 if (!Res.isSuccess())
8433 return Res;
8434
8435 auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
8436 Operands.push_back(AArch64Operand::CreateVectorReg(
8437 RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(),
8438 getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
8439 Ext->hasShiftExtendAmount()));
8440
8441 return ParseStatus::Success;
8442}
8443
8444ParseStatus AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
8445 MCAsmParser &Parser = getParser();
8446
8447 SMLoc SS = getLoc();
8448 const AsmToken &TokE = getTok();
8449 bool IsHash = TokE.is(AsmToken::Hash);
8450
8451 if (!IsHash && TokE.isNot(AsmToken::Identifier))
8452 return ParseStatus::NoMatch;
8453
8454 int64_t Pattern;
8455 if (IsHash) {
8456 Lex(); // Eat hash
8457
8458 // Parse the immediate operand.
8459 const MCExpr *ImmVal;
8460 SS = getLoc();
8461 if (Parser.parseExpression(ImmVal))
8462 return ParseStatus::Failure;
8463
8464 auto *MCE = dyn_cast<MCConstantExpr>(ImmVal);
8465 if (!MCE)
8466 return TokError("invalid operand for instruction");
8467
8468 Pattern = MCE->getValue();
8469 } else {
8470 // Parse the pattern
8471 auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString());
8472 if (!Pat)
8473 return ParseStatus::NoMatch;
8474
8475 Lex();
8476 Pattern = Pat->Encoding;
8477 assert(Pattern >= 0 && Pattern < 32);
8478 }
8479
8480 Operands.push_back(
8481 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
8482 SS, getLoc(), getContext()));
8483
8484 return ParseStatus::Success;
8485}
8486
8488AArch64AsmParser::tryParseSVEVecLenSpecifier(OperandVector &Operands) {
8489 int64_t Pattern;
8490 SMLoc SS = getLoc();
8491 const AsmToken &TokE = getTok();
8492 // Parse the pattern
8493 auto Pat = AArch64SVEVecLenSpecifier::lookupSVEVECLENSPECIFIERByName(
8494 TokE.getString());
8495 if (!Pat)
8496 return ParseStatus::NoMatch;
8497
8498 Lex();
8499 Pattern = Pat->Encoding;
8500 assert(Pattern >= 0 && Pattern <= 1 && "Pattern does not exist");
8501
8502 Operands.push_back(
8503 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
8504 SS, getLoc(), getContext()));
8505
8506 return ParseStatus::Success;
8507}
8508
8509ParseStatus AArch64AsmParser::tryParseGPR64x8(OperandVector &Operands) {
8510 SMLoc SS = getLoc();
8511
8512 MCRegister XReg;
8513 if (!tryParseScalarRegister(XReg).isSuccess())
8514 return ParseStatus::NoMatch;
8515
8516 MCContext &ctx = getContext();
8517 const MCRegisterInfo *RI = ctx.getRegisterInfo();
8518 MCRegister X8Reg = RI->getMatchingSuperReg(
8519 XReg, AArch64::x8sub_0,
8520 &AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID]);
8521 if (!X8Reg)
8522 return Error(SS,
8523 "expected an even-numbered x-register in the range [x0,x22]");
8524
8525 Operands.push_back(
8526 AArch64Operand::CreateReg(X8Reg, RegKind::Scalar, SS, getLoc(), ctx));
8527 return ParseStatus::Success;
8528}
8529
8530ParseStatus AArch64AsmParser::tryParseImmRange(OperandVector &Operands) {
8531 SMLoc S = getLoc();
8532
8533 if (getTok().isNot(AsmToken::Integer))
8534 return ParseStatus::NoMatch;
8535
8536 if (getLexer().peekTok().isNot(AsmToken::Colon))
8537 return ParseStatus::NoMatch;
8538
8539 const MCExpr *ImmF;
8540 if (getParser().parseExpression(ImmF))
8541 return ParseStatus::NoMatch;
8542
8543 if (getTok().isNot(AsmToken::Colon))
8544 return ParseStatus::NoMatch;
8545
8546 Lex(); // Eat ':'
8547 if (getTok().isNot(AsmToken::Integer))
8548 return ParseStatus::NoMatch;
8549
8550 SMLoc E = getTok().getLoc();
8551 const MCExpr *ImmL;
8552 if (getParser().parseExpression(ImmL))
8553 return ParseStatus::NoMatch;
8554
8555 unsigned ImmFVal = cast<MCConstantExpr>(ImmF)->getValue();
8556 unsigned ImmLVal = cast<MCConstantExpr>(ImmL)->getValue();
8557
8558 Operands.push_back(
8559 AArch64Operand::CreateImmRange(ImmFVal, ImmLVal, S, E, getContext()));
8560 return ParseStatus::Success;
8561}
8562
8563template <int Adj>
8564ParseStatus AArch64AsmParser::tryParseAdjImm0_63(OperandVector &Operands) {
8565 SMLoc S = getLoc();
8566
8567 parseOptionalToken(AsmToken::Hash);
8568 bool IsNegative = parseOptionalToken(AsmToken::Minus);
8569
8570 if (getTok().isNot(AsmToken::Integer))
8571 return ParseStatus::NoMatch;
8572
8573 const MCExpr *Ex;
8574 if (getParser().parseExpression(Ex))
8575 return ParseStatus::NoMatch;
8576
8577 int64_t Imm = dyn_cast<MCConstantExpr>(Ex)->getValue();
8578 if (IsNegative)
8579 Imm = -Imm;
8580
8581 // We want an adjusted immediate in the range [0, 63]. If we don't have one,
8582 // return a value, which is certain to trigger a error message about invalid
8583 // immediate range instead of a non-descriptive invalid operand error.
8584 static_assert(Adj == 1 || Adj == -1, "Unsafe immediate adjustment");
8585 if (Imm == INT64_MIN || Imm == INT64_MAX || Imm + Adj < 0 || Imm + Adj > 63)
8586 Imm = -2;
8587 else
8588 Imm += Adj;
8589
8590 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
8591 Operands.push_back(AArch64Operand::CreateImm(
8592 MCConstantExpr::create(Imm, getContext()), S, E, getContext()));
8593
8594 return ParseStatus::Success;
8595}
#define MATCH_HASH_MINUS(N)
static unsigned matchSVEDataVectorRegName(StringRef Name)
static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind)
static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo, SmallVector< StringRef, 4 > &RequestedExtensions)
static unsigned matchSVEPredicateAsCounterRegName(StringRef Name)
static MCRegister MatchRegisterName(StringRef Name)
static bool isMatchingOrAlias(MCRegister ZReg, MCRegister Reg)
static const char * getSubtargetFeatureName(uint64_t Val)
static unsigned MatchNeonVectorRegName(StringRef Name)
}
static std::optional< std::pair< int, int > > parseVectorKind(StringRef Suffix, RegKind VectorKind)
Returns an optional pair of (#elements, element-width) if Suffix is a valid vector kind.
LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64AsmParser()
Force static initialization.
static unsigned matchMatrixRegName(StringRef Name)
static unsigned matchMatrixTileListRegName(StringRef Name)
static std::string AArch64MnemonicSpellCheck(StringRef S, const FeatureBitset &FBS, unsigned VariantID=0)
static SMLoc incrementLoc(SMLoc L, int Offset)
#define MATCH_HASH(N)
static const struct Extension ExtensionMap[]
static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str)
static unsigned matchSVEPredicateVectorRegName(StringRef Name)
This file defines the StringMap class.
static bool isNot(const MachineRegisterInfo &MRI, const MachineInstr &MI)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_EXTERNAL_VISIBILITY
Definition: Compiler.h:128
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Given that RA is a live value
@ Default
Definition: DwarfDebug.cpp:87
std::string Name
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
Symbol * Sym
Definition: ELF_riscv.cpp:479
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static LVOptions Options
Definition: LVOptions.cpp:25
Live Register Matrix
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
mir Rename Register Operands
static MSP430CC::CondCodes getCondCode(unsigned Cond)
unsigned Reg
#define T
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static bool isReg(const MCInst &MI, unsigned OpNo)
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:469
This file defines the SmallSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static const AArch64AuthMCExpr * create(const MCExpr *Expr, uint16_t Discriminator, AArch64PACKey::ID Key, bool HasAddressDiversity, MCContext &Ctx)
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
static const AArch64MCExpr * create(const MCExpr *Expr, VariantKind Kind, MCContext &Ctx)
APInt bitcastToAPInt() const
Definition: APFloat.h:1351
Class for arbitrary precision integers.
Definition: APInt.h:78
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
Definition: APInt.h:435
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
Definition: APInt.h:432
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1542
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
Target independent representation for an assembler token.
Definition: MCAsmMacro.h:21
SMLoc getLoc() const
Definition: MCAsmLexer.cpp:26
int64_t getIntVal() const
Definition: MCAsmMacro.h:115
bool isNot(TokenKind K) const
Definition: MCAsmMacro.h:83
StringRef getString() const
Get the string for the current token, this includes all characters (for example, the quotes on string...
Definition: MCAsmMacro.h:110
bool is(TokenKind K) const
Definition: MCAsmMacro.h:82
SMLoc getEndLoc() const
Definition: MCAsmLexer.cpp:30
StringRef getIdentifier() const
Get the identifier string for the current token, which should be an identifier or a string.
Definition: MCAsmMacro.h:99
This class represents an Operation in the Expression.
Base class for user error types.
Definition: Error.h:355
Lightweight error class with error context and mandatory checking.
Definition: Error.h:160
Container class for subtarget features.
constexpr size_t size() const
void UnLex(AsmToken const &Token)
Definition: MCAsmLexer.h:93
const AsmToken peekTok(bool ShouldSkipSpace=true)
Look ahead at the next token to be lexed.
Definition: MCAsmLexer.h:111
virtual size_t peekTokens(MutableArrayRef< AsmToken > Buf, bool ShouldSkipSpace=true)=0
Look ahead an arbitrary number of tokens.
virtual void Initialize(MCAsmParser &Parser)
Initialize the extension for parsing using the given Parser.
Generic assembler parser interface, for use by target specific assembly parsers.
Definition: MCAsmParser.h:123
virtual MCStreamer & getStreamer()=0
Return the output streamer for the assembler.
virtual bool parseExpression(const MCExpr *&Res, SMLoc &EndLoc)=0
Parse an arbitrary expression.
virtual bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc, AsmTypeInfo *TypeInfo)=0
Parse a primary expression.
const AsmToken & getTok() const
Get the current AsmToken from the stream.
Definition: MCAsmParser.cpp:40
virtual bool parseIdentifier(StringRef &Res)=0
Parse an identifier or string (as a quoted identifier) and set Res to the identifier contents.
virtual const AsmToken & Lex()=0
Get the next AsmToken in the stream, possibly handling file inclusion first.
virtual MCAsmLexer & getLexer()=0
virtual void addAliasForDirective(StringRef Directive, StringRef Alias)=0
int64_t getValue() const
Definition: MCExpr.h:173
static const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition: MCExpr.cpp:222
Context object for machine code objects.
Definition: MCContext.h:83
const MCRegisterInfo * getRegisterInfo() const
Definition: MCContext.h:414
MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
Definition: MCContext.cpp:212
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:34
bool evaluateAsRelocatable(MCValue &Res, const MCAssembler *Asm, const MCFixup *Fixup) const
Try to evaluate the expression to a relocatable value, i.e.
Definition: MCExpr.cpp:819
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:185
unsigned getNumOperands() const
Definition: MCInst.h:209
void setLoc(SMLoc loc)
Definition: MCInst.h:204
unsigned getOpcode() const
Definition: MCInst.h:199
void addOperand(const MCOperand Op)
Definition: MCInst.h:211
void setOpcode(unsigned Op)
Definition: MCInst.h:198
const MCOperand & getOperand(unsigned i) const
Definition: MCInst.h:207
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
Definition: MCInstrDesc.h:219
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:26
static MCOperand createExpr(const MCExpr *Val)
Definition: MCInst.h:163
int64_t getImm() const
Definition: MCInst.h:81
static MCOperand createReg(MCRegister Reg)
Definition: MCInst.h:135
static MCOperand createImm(int64_t Val)
Definition: MCInst.h:142
bool isImm() const
Definition: MCInst.h:63
bool isReg() const
Definition: MCInst.h:62
MCRegister getReg() const
Returns the register number.
Definition: MCInst.h:70
const MCExpr * getExpr() const
Definition: MCInst.h:115
bool isExpr() const
Definition: MCInst.h:66
MCParsedAsmOperand - This abstract class represents a source-level assembly instruction operand.
virtual MCRegister getReg() const =0
MCRegisterClass - Base class of TargetRegisterClass.
unsigned getRegister(unsigned i) const
getRegister - Return the specified register in the class.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
MCRegister getMatchingSuperReg(MCRegister Reg, unsigned SubIdx, const MCRegisterClass *RC) const
Return a super-register of the specified register Reg so its sub-register of index SubIdx is Reg.
const char * getName(MCRegister RegNo) const
Return the human-readable symbolic target-specific name for the specified physical register.
uint16_t getEncodingValue(MCRegister Reg) const
Returns the encoding for Reg.
bool isSubRegisterEq(MCRegister RegA, MCRegister RegB) const
Returns true if RegB is a sub-register of RegA or if RegB == RegA.
const MCRegisterClass & getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
Streaming machine code generation interface.
Definition: MCStreamer.h:213
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
MCTargetStreamer * getTargetStreamer()
Definition: MCStreamer.h:309
Generic base class for all target subtargets.
const Triple & getTargetTriple() const
const FeatureBitset & getFeatureBits() const
FeatureBitset SetFeatureBitsTransitively(const FeatureBitset &FB)
Set/clear additional feature bits, including all other bits they imply.
void setDefaultFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS)
Set the features to the default for the given CPU and TuneCPU, with ano appended feature string.
FeatureBitset ClearFeatureBitsTransitively(const FeatureBitset &FB)
Represent a reference to a symbol from inside an expression.
Definition: MCExpr.h:192
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
Definition: MCExpr.h:398
VariantKind getKind() const
Definition: MCExpr.h:413
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
MCTargetAsmParser - Generic interface to target specific assembly parsers.
virtual bool parseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, OperandVector &Operands)=0
Parse one assembly instruction.
virtual bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc)=0
virtual bool ParseDirective(AsmToken DirectiveID)
ParseDirective - Parse a target specific assembler directive This method is deprecated,...
virtual bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc)
virtual ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc)=0
tryParseRegister - parse one register if possible
virtual bool areEqualRegs(const MCParsedAsmOperand &Op1, const MCParsedAsmOperand &Op2) const
Returns whether two operands are registers and are equal.
virtual bool matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, OperandVector &Operands, MCStreamer &Out, uint64_t &ErrorInfo, bool MatchingInlineAsm)=0
Recognize a series of operands of a parsed instruction as an actual MCInst and emit it to the specifi...
void setAvailableFeatures(const FeatureBitset &Value)
const MCSubtargetInfo & getSTI() const
virtual unsigned validateTargetOperandClass(MCParsedAsmOperand &Op, unsigned Kind)
Allow a target to add special case operand matching for things that tblgen doesn't/can't handle effec...
Target specific streamer interface.
Definition: MCStreamer.h:94
This represents an "assembler immediate".
Definition: MCValue.h:36
int64_t getConstant() const
Definition: MCValue.h:43
const MCSymbolRefExpr * getSymB() const
Definition: MCValue.h:45
const MCSymbolRefExpr * getSymA() const
Definition: MCValue.h:44
Ternary parse status returned by various parse* methods.
constexpr bool isFailure() const
static constexpr StatusTy Failure
constexpr bool isSuccess() const
static constexpr StatusTy Success
static constexpr StatusTy NoMatch
constexpr bool isNoMatch() const
Represents a location in source code.
Definition: SMLoc.h:23
static SMLoc getFromPointer(const char *Ptr)
Definition: SMLoc.h:36
constexpr const char * getPointer() const
Definition: SMLoc.h:34
Represents a range in source code.
Definition: SMLoc.h:48
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:132
bool contains(const T &V) const
Check if the SmallSet contains the given element.
Definition: SmallSet.h:222
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:181
bool empty() const
Definition: SmallVector.h:81
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
void resize(size_type N)
Definition: SmallVector.h:638
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
StringMap - This is an unconventional map that is specialized for handling keys that are "strings",...
Definition: StringMap.h:128
iterator end()
Definition: StringMap.h:220
iterator find(StringRef Key)
Definition: StringMap.h:233
void erase(iterator I)
Definition: StringMap.h:416
bool insert(MapEntryTy *KeyValue)
insert - Insert the specified key/value pair into the map.
Definition: StringMap.h:308
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition: StringRef.h:700
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition: StringRef.h:470
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:265
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:147
StringRef drop_front(size_t N=1) const
Return a StringRef equal to 'this' but with the first N elements dropped.
Definition: StringRef.h:609
std::string upper() const
Convert the given ASCII string to uppercase.
Definition: StringRef.cpp:118
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:150
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:144
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition: StringRef.h:424
StringRef take_back(size_t N=1) const
Return a StringRef equal to 'this' but with only the last N elements remaining.
Definition: StringRef.h:589
StringRef trim(char Char) const
Return string with consecutive Char characters starting from the left and right removed.
Definition: StringRef.h:815
std::string lower() const
Definition: StringRef.cpp:113
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
Definition: StringRef.h:277
static constexpr size_t npos
Definition: StringRef.h:53
StringRef drop_back(size_t N=1) const
Return a StringRef equal to 'this' but with the last N elements dropped.
Definition: StringRef.h:616
bool equals_insensitive(StringRef RHS) const
Check for string equality, ignoring case.
Definition: StringRef.h:176
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:44
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:69
R Default(T Value)
Definition: StringSwitch.h:182
EnvironmentType getEnvironment() const
Get the parsed environment type of this triple.
Definition: Triple.h:412
bool isWindowsArm64EC() const
Definition: Triple.h:668
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVM Value Representation.
Definition: Value.h:74
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define INT64_MIN
Definition: DataTypes.h:74
#define INT64_MAX
Definition: DataTypes.h:71
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
SubsectionType getTypeID(StringRef Type)
StringRef getVendorName(unsigned const Vendor)
StringRef getOptionalStr(unsigned Optional)
VendorID
AArch64 build attributes vendors IDs (a.k.a subsection name)
SubsectionOptional getOptionalID(StringRef Optional)
FeatureAndBitsTags getFeatureAndBitsTagsID(StringRef FeatureAndBitsTag)
VendorID getVendorID(StringRef const Vendor)
PauthABITags getPauthABITagsID(StringRef PauthABITag)
StringRef getTypeStr(unsigned Type)
static CondCode getInvertedCondCode(CondCode Code)
const PHint * lookupPHintByName(StringRef)
uint32_t parseGenericRegister(StringRef Name)
static bool isMOVNMovAlias(uint64_t Value, int Shift, int RegWidth)
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
static bool isLogicalImmediate(uint64_t imm, unsigned regSize)
isLogicalImmediate - Return true if the immediate is valid for a logical immediate instruction of the...
static unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, unsigned Imm)
getArithExtendImm - Encode the extend type and shift amount for an arithmetic instruction: imm: 3-bit...
static float getFPImmFloat(unsigned Imm)
static uint8_t encodeAdvSIMDModImmType10(uint64_t Imm)
static bool isMOVZMovAlias(uint64_t Value, int Shift, int RegWidth)
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static const char * getShiftExtendName(AArch64_AM::ShiftExtendType ST)
getShiftName - Get the string encoding for the shift type.
static int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
static bool isAdvSIMDModImmType10(uint64_t Imm)
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
const ArchInfo * parseArch(StringRef Arch)
const ArchInfo * getArchForCpu(StringRef CPU)
bool getExtensionFeatures(const AArch64::ExtensionBitset &Extensions, std::vector< StringRef > &Features)
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
bool isPredicated(const MCInst &MI, const MCInstrInfo *MCII)
@ Entry
Definition: COFF.h:844
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition: CallingConv.h:76
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Definition: ISDOpcodes.h:1610
float getFPImm(unsigned Imm)
@ CE
Windows NT (Windows on ARM)
@ SS
Definition: X86.h:212
Reg
All possible values of the reg field in the ModR/M byte.
constexpr double e
Definition: MathExtras.h:48
NodeAddr< CodeNode * > Code
Definition: RDFGraph.h:388
Format
The format used for serializing/deserializing remarks.
Definition: RemarkFormat.h:25
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
static std::optional< AArch64PACKey::ID > AArch64StringToPACKeyID(StringRef Name)
Return numeric key ID for 2-letter identifier string.
bool errorToBool(Error Err)
Helper for converting an Error to a bool.
Definition: Error.h:1099
@ Offset
Definition: DWP.cpp:480
@ Length
Definition: DWP.cpp:480
static int MCLOHNameToId(StringRef Name)
static bool isMem(const MachineInstr &MI, unsigned Op)
Definition: X86InstrInfo.h:170
Target & getTheAArch64beTarget()
static StringRef MCLOHDirectiveName()
static bool isValidMCLOHType(unsigned Kind)
Target & getTheAArch64leTarget()
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:215
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:342
Target & getTheAArch64_32Target()
Target & getTheARM64_32Target()
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
static int MCLOHIdToNbArgs(MCLOHType Kind)
static MCRegister getXRegFromWReg(MCRegister Reg)
MCLOHType
Linker Optimization Hint Type.
Target & getTheARM64Target()
DWARFExpression::Operation Op
static MCRegister getWRegFromXReg(MCRegister Reg)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1766
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1903
#define N
const FeatureBitset Features
const char * Name
A record for a potential prefetch made during the initial scan of the loop.
AArch64::ExtensionBitset DefaultExts
Description of the encoding of one expression Op.
RegisterMCAsmParser - Helper template for registering a target specific assembly parser,...
bool haveFeatures(FeatureBitset ActiveFeatures) const
FeatureBitset getRequiredFeatures() const
const char * Name
FeatureBitset FeaturesRequired