LLVM 20.0.0git
MachineVerifier.cpp
Go to the documentation of this file.
1//===- MachineVerifier.cpp - Machine Code Verifier ------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Pass to verify generated machine code. The following is checked:
10//
11// Operand counts: All explicit operands must be present.
12//
13// Register classes: All physical and virtual register operands must be
14// compatible with the register class required by the instruction descriptor.
15//
16// Register live intervals: Registers must be defined only once, and must be
17// defined before use.
18//
19// The machine code verifier is enabled with the command-line option
20// -verify-machineinstrs.
21//===----------------------------------------------------------------------===//
22
24#include "llvm/ADT/BitVector.h"
25#include "llvm/ADT/DenseMap.h"
26#include "llvm/ADT/DenseSet.h"
29#include "llvm/ADT/STLExtras.h"
33#include "llvm/ADT/StringRef.h"
34#include "llvm/ADT/Twine.h"
64#include "llvm/IR/BasicBlock.h"
65#include "llvm/IR/Constants.h"
67#include "llvm/IR/Function.h"
68#include "llvm/IR/InlineAsm.h"
71#include "llvm/MC/LaneBitmask.h"
72#include "llvm/MC/MCAsmInfo.h"
73#include "llvm/MC/MCDwarf.h"
74#include "llvm/MC/MCInstrDesc.h"
77#include "llvm/Pass.h"
82#include "llvm/Support/ModRef.h"
83#include "llvm/Support/Mutex.h"
86#include <algorithm>
87#include <cassert>
88#include <cstddef>
89#include <cstdint>
90#include <iterator>
91#include <string>
92#include <utility>
93
94using namespace llvm;
95
96namespace {
97
98/// Used the by the ReportedErrors class to guarantee only one error is reported
99/// at one time.
100static ManagedStatic<sys::SmartMutex<true>> ReportedErrorsLock;
101
102struct MachineVerifier {
103 MachineVerifier(MachineFunctionAnalysisManager &MFAM, const char *b,
104 raw_ostream *OS, bool AbortOnError = true)
105 : MFAM(&MFAM), OS(OS ? *OS : nulls()), Banner(b),
106 ReportedErrs(AbortOnError) {}
107
108 MachineVerifier(Pass *pass, const char *b, raw_ostream *OS,
109 bool AbortOnError = true)
110 : PASS(pass), OS(OS ? *OS : nulls()), Banner(b),
111 ReportedErrs(AbortOnError) {}
112
113 MachineVerifier(const char *b, LiveVariables *LiveVars,
114 LiveIntervals *LiveInts, LiveStacks *LiveStks,
115 SlotIndexes *Indexes, raw_ostream *OS,
116 bool AbortOnError = true)
117 : OS(OS ? *OS : nulls()), Banner(b), LiveVars(LiveVars),
118 LiveInts(LiveInts), LiveStks(LiveStks), Indexes(Indexes),
119 ReportedErrs(AbortOnError) {}
120
121 /// \returns true if no problems were found.
122 bool verify(const MachineFunction &MF);
123
124 MachineFunctionAnalysisManager *MFAM = nullptr;
125 Pass *const PASS = nullptr;
127 const char *Banner;
128 const MachineFunction *MF = nullptr;
129 const TargetMachine *TM = nullptr;
130 const TargetInstrInfo *TII = nullptr;
131 const TargetRegisterInfo *TRI = nullptr;
132 const MachineRegisterInfo *MRI = nullptr;
133 const RegisterBankInfo *RBI = nullptr;
134
135 // Avoid querying the MachineFunctionProperties for each operand.
136 bool isFunctionRegBankSelected = false;
137 bool isFunctionSelected = false;
138 bool isFunctionTracksDebugUserValues = false;
139
140 using RegVector = SmallVector<Register, 16>;
141 using RegMaskVector = SmallVector<const uint32_t *, 4>;
142 using RegSet = DenseSet<Register>;
145
146 const MachineInstr *FirstNonPHI = nullptr;
147 const MachineInstr *FirstTerminator = nullptr;
148 BlockSet FunctionBlocks;
149
150 BitVector regsReserved;
151 RegSet regsLive;
152 RegVector regsDefined, regsDead, regsKilled;
153 RegMaskVector regMasks;
154
155 SlotIndex lastIndex;
156
157 // Add Reg and any sub-registers to RV
158 void addRegWithSubRegs(RegVector &RV, Register Reg) {
159 RV.push_back(Reg);
160 if (Reg.isPhysical())
161 append_range(RV, TRI->subregs(Reg.asMCReg()));
162 }
163
164 struct BBInfo {
165 // Is this MBB reachable from the MF entry point?
166 bool reachable = false;
167
168 // Vregs that must be live in because they are used without being
169 // defined. Map value is the user. vregsLiveIn doesn't include regs
170 // that only are used by PHI nodes.
171 RegMap vregsLiveIn;
172
173 // Regs killed in MBB. They may be defined again, and will then be in both
174 // regsKilled and regsLiveOut.
175 RegSet regsKilled;
176
177 // Regs defined in MBB and live out. Note that vregs passing through may
178 // be live out without being mentioned here.
179 RegSet regsLiveOut;
180
181 // Vregs that pass through MBB untouched. This set is disjoint from
182 // regsKilled and regsLiveOut.
183 RegSet vregsPassed;
184
185 // Vregs that must pass through MBB because they are needed by a successor
186 // block. This set is disjoint from regsLiveOut.
187 RegSet vregsRequired;
188
189 // Set versions of block's predecessor and successor lists.
190 BlockSet Preds, Succs;
191
192 BBInfo() = default;
193
194 // Add register to vregsRequired if it belongs there. Return true if
195 // anything changed.
196 bool addRequired(Register Reg) {
197 if (!Reg.isVirtual())
198 return false;
199 if (regsLiveOut.count(Reg))
200 return false;
201 return vregsRequired.insert(Reg).second;
202 }
203
204 // Same for a full set.
205 bool addRequired(const RegSet &RS) {
206 bool Changed = false;
207 for (Register Reg : RS)
208 Changed |= addRequired(Reg);
209 return Changed;
210 }
211
212 // Same for a full map.
213 bool addRequired(const RegMap &RM) {
214 bool Changed = false;
215 for (const auto &I : RM)
216 Changed |= addRequired(I.first);
217 return Changed;
218 }
219
220 // Live-out registers are either in regsLiveOut or vregsPassed.
221 bool isLiveOut(Register Reg) const {
222 return regsLiveOut.count(Reg) || vregsPassed.count(Reg);
223 }
224 };
225
226 // Extra register info per MBB.
228
229 bool isReserved(Register Reg) {
230 return Reg.id() < regsReserved.size() && regsReserved.test(Reg.id());
231 }
232
233 bool isAllocatable(Register Reg) const {
234 return Reg.id() < TRI->getNumRegs() && TRI->isInAllocatableClass(Reg) &&
235 !regsReserved.test(Reg.id());
236 }
237
238 // Analysis information if available
239 LiveVariables *LiveVars = nullptr;
240 LiveIntervals *LiveInts = nullptr;
241 LiveStacks *LiveStks = nullptr;
242 SlotIndexes *Indexes = nullptr;
243
244 /// A class to track the number of reported error and to guarantee that only
245 /// one error is reported at one time.
246 class ReportedErrors {
247 unsigned NumReported = 0;
248 bool AbortOnError;
249
250 public:
251 /// \param AbortOnError -- If set, abort after printing the first error.
252 ReportedErrors(bool AbortOnError) : AbortOnError(AbortOnError) {}
253
254 ~ReportedErrors() {
255 if (!hasError())
256 return;
257 if (AbortOnError)
258 report_fatal_error("Found " + Twine(NumReported) +
259 " machine code errors.");
260 // Since we haven't aborted, release the lock to allow other threads to
261 // report errors.
262 ReportedErrorsLock->unlock();
263 }
264
265 /// Increment the number of reported errors.
266 /// \returns true if this is the first reported error.
267 bool increment() {
268 // If this is the first error this thread has encountered, grab the lock
269 // to prevent other threads from reporting errors at the same time.
270 // Otherwise we assume we already have the lock.
271 if (!hasError())
272 ReportedErrorsLock->lock();
273 ++NumReported;
274 return NumReported == 1;
275 }
276
277 /// \returns true if an error was reported.
278 bool hasError() { return NumReported; }
279 };
280 ReportedErrors ReportedErrs;
281
282 // This is calculated only when trying to verify convergence control tokens.
283 // Similar to the LLVM IR verifier, we calculate this locally instead of
284 // relying on the pass manager.
286
287 void visitMachineFunctionBefore();
288 void visitMachineBasicBlockBefore(const MachineBasicBlock *MBB);
289 void visitMachineBundleBefore(const MachineInstr *MI);
290
291 /// Verify that all of \p MI's virtual register operands are scalars.
292 /// \returns True if all virtual register operands are scalar. False
293 /// otherwise.
294 bool verifyAllRegOpsScalar(const MachineInstr &MI,
295 const MachineRegisterInfo &MRI);
296 bool verifyVectorElementMatch(LLT Ty0, LLT Ty1, const MachineInstr *MI);
297
298 bool verifyGIntrinsicSideEffects(const MachineInstr *MI);
299 bool verifyGIntrinsicConvergence(const MachineInstr *MI);
300 void verifyPreISelGenericInstruction(const MachineInstr *MI);
301
302 void visitMachineInstrBefore(const MachineInstr *MI);
303 void visitMachineOperand(const MachineOperand *MO, unsigned MONum);
304 void visitMachineBundleAfter(const MachineInstr *MI);
305 void visitMachineBasicBlockAfter(const MachineBasicBlock *MBB);
306 void visitMachineFunctionAfter();
307
308 void report(const char *msg, const MachineFunction *MF);
309 void report(const char *msg, const MachineBasicBlock *MBB);
310 void report(const char *msg, const MachineInstr *MI);
311 void report(const char *msg, const MachineOperand *MO, unsigned MONum,
312 LLT MOVRegType = LLT{});
313 void report(const Twine &Msg, const MachineInstr *MI);
314
315 void report_context(const LiveInterval &LI) const;
316 void report_context(const LiveRange &LR, VirtRegOrUnit VRegOrUnit,
317 LaneBitmask LaneMask) const;
318 void report_context(const LiveRange::Segment &S) const;
319 void report_context(const VNInfo &VNI) const;
320 void report_context(SlotIndex Pos) const;
321 void report_context(MCPhysReg PhysReg) const;
322 void report_context_liverange(const LiveRange &LR) const;
323 void report_context_lanemask(LaneBitmask LaneMask) const;
324 void report_context_vreg(Register VReg) const;
325 void report_context_vreg_regunit(VirtRegOrUnit VRegOrUnit) const;
326
327 void verifyInlineAsm(const MachineInstr *MI);
328
329 void checkLiveness(const MachineOperand *MO, unsigned MONum);
330 void checkLivenessAtUse(const MachineOperand *MO, unsigned MONum,
331 SlotIndex UseIdx, const LiveRange &LR,
332 VirtRegOrUnit VRegOrUnit,
333 LaneBitmask LaneMask = LaneBitmask::getNone());
334 void checkLivenessAtDef(const MachineOperand *MO, unsigned MONum,
335 SlotIndex DefIdx, const LiveRange &LR,
336 VirtRegOrUnit VRegOrUnit, bool SubRangeCheck = false,
337 LaneBitmask LaneMask = LaneBitmask::getNone());
338
339 void markReachable(const MachineBasicBlock *MBB);
340 void calcRegsPassed();
341 void checkPHIOps(const MachineBasicBlock &MBB);
342
343 void calcRegsRequired();
344 void verifyLiveVariables();
345 void verifyLiveIntervals();
346 void verifyLiveInterval(const LiveInterval &);
347 void verifyLiveRangeValue(const LiveRange &, const VNInfo *, VirtRegOrUnit,
349 void verifyLiveRangeSegment(const LiveRange &,
352 void verifyLiveRange(const LiveRange &, VirtRegOrUnit,
353 LaneBitmask LaneMask = LaneBitmask::getNone());
354
355 void verifyStackFrame();
356 /// Check that the stack protector is the top-most object in the stack.
357 void verifyStackProtector();
358
359 void verifySlotIndexes() const;
360 void verifyProperties(const MachineFunction &MF);
361};
362
363struct MachineVerifierLegacyPass : public MachineFunctionPass {
364 static char ID; // Pass ID, replacement for typeid
365
366 const std::string Banner;
367
368 MachineVerifierLegacyPass(std::string banner = std::string())
369 : MachineFunctionPass(ID), Banner(std::move(banner)) {
371 }
372
373 void getAnalysisUsage(AnalysisUsage &AU) const override {
378 AU.setPreservesAll();
380 }
381
382 bool runOnMachineFunction(MachineFunction &MF) override {
383 // Skip functions that have known verification problems.
384 // FIXME: Remove this mechanism when all problematic passes have been
385 // fixed.
386 if (MF.getProperties().hasProperty(
387 MachineFunctionProperties::Property::FailsVerification))
388 return false;
389
390 MachineVerifier(this, Banner.c_str(), &errs()).verify(MF);
391 return false;
392 }
393};
394
395} // end anonymous namespace
396
400 // Skip functions that have known verification problems.
401 // FIXME: Remove this mechanism when all problematic passes have been
402 // fixed.
403 if (MF.getProperties().hasProperty(
405 return PreservedAnalyses::all();
406 MachineVerifier(MFAM, Banner.c_str(), &errs()).verify(MF);
407 return PreservedAnalyses::all();
408}
409
410char MachineVerifierLegacyPass::ID = 0;
411
412INITIALIZE_PASS(MachineVerifierLegacyPass, "machineverifier",
413 "Verify generated machine code", false, false)
414
416 return new MachineVerifierLegacyPass(Banner);
417}
418
419void llvm::verifyMachineFunction(const std::string &Banner,
420 const MachineFunction &MF) {
421 // TODO: Use MFAM after porting below analyses.
422 // LiveVariables *LiveVars;
423 // LiveIntervals *LiveInts;
424 // LiveStacks *LiveStks;
425 // SlotIndexes *Indexes;
426 MachineVerifier(nullptr, Banner.c_str(), &errs()).verify(MF);
427}
428
429bool MachineFunction::verify(Pass *p, const char *Banner, raw_ostream *OS,
430 bool AbortOnError) const {
431 return MachineVerifier(p, Banner, OS, AbortOnError).verify(*this);
432}
433
435 const char *Banner, raw_ostream *OS,
436 bool AbortOnError) const {
437 return MachineVerifier(Banner, /*LiveVars=*/nullptr, LiveInts,
438 /*LiveStks=*/nullptr, Indexes, OS, AbortOnError)
439 .verify(*this);
440}
441
442void MachineVerifier::verifySlotIndexes() const {
443 if (Indexes == nullptr)
444 return;
445
446 // Ensure the IdxMBB list is sorted by slot indexes.
449 E = Indexes->MBBIndexEnd(); I != E; ++I) {
450 assert(!Last.isValid() || I->first > Last);
451 Last = I->first;
452 }
453}
454
455void MachineVerifier::verifyProperties(const MachineFunction &MF) {
456 // If a pass has introduced virtual registers without clearing the
457 // NoVRegs property (or set it without allocating the vregs)
458 // then report an error.
459 if (MF.getProperties().hasProperty(
461 MRI->getNumVirtRegs())
462 report("Function has NoVRegs property but there are VReg operands", &MF);
463}
464
465bool MachineVerifier::verify(const MachineFunction &MF) {
466 this->MF = &MF;
467 TM = &MF.getTarget();
470 RBI = MF.getSubtarget().getRegBankInfo();
471 MRI = &MF.getRegInfo();
472
473 const bool isFunctionFailedISel = MF.getProperties().hasProperty(
475
476 // If we're mid-GlobalISel and we already triggered the fallback path then
477 // it's expected that the MIR is somewhat broken but that's ok since we'll
478 // reset it and clear the FailedISel attribute in ResetMachineFunctions.
479 if (isFunctionFailedISel)
480 return true;
481
482 isFunctionRegBankSelected = MF.getProperties().hasProperty(
484 isFunctionSelected = MF.getProperties().hasProperty(
486 isFunctionTracksDebugUserValues = MF.getProperties().hasProperty(
488
489 if (PASS) {
490 auto *LISWrapper = PASS->getAnalysisIfAvailable<LiveIntervalsWrapperPass>();
491 LiveInts = LISWrapper ? &LISWrapper->getLIS() : nullptr;
492 // We don't want to verify LiveVariables if LiveIntervals is available.
493 auto *LVWrapper = PASS->getAnalysisIfAvailable<LiveVariablesWrapperPass>();
494 if (!LiveInts)
495 LiveVars = LVWrapper ? &LVWrapper->getLV() : nullptr;
496 auto *LSWrapper = PASS->getAnalysisIfAvailable<LiveStacksWrapperLegacy>();
497 LiveStks = LSWrapper ? &LSWrapper->getLS() : nullptr;
498 auto *SIWrapper = PASS->getAnalysisIfAvailable<SlotIndexesWrapperPass>();
499 Indexes = SIWrapper ? &SIWrapper->getSI() : nullptr;
500 }
501 if (MFAM) {
502 MachineFunction &Func = const_cast<MachineFunction &>(MF);
503 LiveInts = MFAM->getCachedResult<LiveIntervalsAnalysis>(Func);
504 if (!LiveInts)
505 LiveVars = MFAM->getCachedResult<LiveVariablesAnalysis>(Func);
506 // TODO: LiveStks = MFAM->getCachedResult<LiveStacksAnalysis>(Func);
507 Indexes = MFAM->getCachedResult<SlotIndexesAnalysis>(Func);
508 }
509
510 verifySlotIndexes();
511
512 verifyProperties(MF);
513
514 visitMachineFunctionBefore();
515 for (const MachineBasicBlock &MBB : MF) {
516 visitMachineBasicBlockBefore(&MBB);
517 // Keep track of the current bundle header.
518 const MachineInstr *CurBundle = nullptr;
519 // Do we expect the next instruction to be part of the same bundle?
520 bool InBundle = false;
521
522 for (const MachineInstr &MI : MBB.instrs()) {
523 if (MI.getParent() != &MBB) {
524 report("Bad instruction parent pointer", &MBB);
525 OS << "Instruction: " << MI;
526 continue;
527 }
528
529 // Check for consistent bundle flags.
530 if (InBundle && !MI.isBundledWithPred())
531 report("Missing BundledPred flag, "
532 "BundledSucc was set on predecessor",
533 &MI);
534 if (!InBundle && MI.isBundledWithPred())
535 report("BundledPred flag is set, "
536 "but BundledSucc not set on predecessor",
537 &MI);
538
539 // Is this a bundle header?
540 if (!MI.isInsideBundle()) {
541 if (CurBundle)
542 visitMachineBundleAfter(CurBundle);
543 CurBundle = &MI;
544 visitMachineBundleBefore(CurBundle);
545 } else if (!CurBundle)
546 report("No bundle header", &MI);
547 visitMachineInstrBefore(&MI);
548 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
549 const MachineOperand &Op = MI.getOperand(I);
550 if (Op.getParent() != &MI) {
551 // Make sure to use correct addOperand / removeOperand / ChangeTo
552 // functions when replacing operands of a MachineInstr.
553 report("Instruction has operand with wrong parent set", &MI);
554 }
555
556 visitMachineOperand(&Op, I);
557 }
558
559 // Was this the last bundled instruction?
560 InBundle = MI.isBundledWithSucc();
561 }
562 if (CurBundle)
563 visitMachineBundleAfter(CurBundle);
564 if (InBundle)
565 report("BundledSucc flag set on last instruction in block", &MBB.back());
566 visitMachineBasicBlockAfter(&MBB);
567 }
568 visitMachineFunctionAfter();
569
570 // Clean up.
571 regsLive.clear();
572 regsDefined.clear();
573 regsDead.clear();
574 regsKilled.clear();
575 regMasks.clear();
576 MBBInfoMap.clear();
577
578 return !ReportedErrs.hasError();
579}
580
581void MachineVerifier::report(const char *msg, const MachineFunction *MF) {
582 assert(MF);
583 OS << '\n';
584 if (ReportedErrs.increment()) {
585 if (Banner)
586 OS << "# " << Banner << '\n';
587
588 if (LiveInts != nullptr)
589 LiveInts->print(OS);
590 else
591 MF->print(OS, Indexes);
592 }
593
594 OS << "*** Bad machine code: " << msg << " ***\n"
595 << "- function: " << MF->getName() << '\n';
596}
597
598void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) {
599 assert(MBB);
600 report(msg, MBB->getParent());
601 OS << "- basic block: " << printMBBReference(*MBB) << ' ' << MBB->getName()
602 << " (" << (const void *)MBB << ')';
603 if (Indexes)
604 OS << " [" << Indexes->getMBBStartIdx(MBB) << ';'
605 << Indexes->getMBBEndIdx(MBB) << ')';
606 OS << '\n';
607}
608
609void MachineVerifier::report(const char *msg, const MachineInstr *MI) {
610 assert(MI);
611 report(msg, MI->getParent());
612 OS << "- instruction: ";
613 if (Indexes && Indexes->hasIndex(*MI))
614 OS << Indexes->getInstructionIndex(*MI) << '\t';
615 MI->print(OS, /*IsStandalone=*/true);
616}
617
618void MachineVerifier::report(const char *msg, const MachineOperand *MO,
619 unsigned MONum, LLT MOVRegType) {
620 assert(MO);
621 report(msg, MO->getParent());
622 OS << "- operand " << MONum << ": ";
623 MO->print(OS, MOVRegType, TRI);
624 OS << '\n';
625}
626
627void MachineVerifier::report(const Twine &Msg, const MachineInstr *MI) {
628 report(Msg.str().c_str(), MI);
629}
630
631void MachineVerifier::report_context(SlotIndex Pos) const {
632 OS << "- at: " << Pos << '\n';
633}
634
635void MachineVerifier::report_context(const LiveInterval &LI) const {
636 OS << "- interval: " << LI << '\n';
637}
638
639void MachineVerifier::report_context(const LiveRange &LR,
640 VirtRegOrUnit VRegOrUnit,
641 LaneBitmask LaneMask) const {
642 report_context_liverange(LR);
643 report_context_vreg_regunit(VRegOrUnit);
644 if (LaneMask.any())
645 report_context_lanemask(LaneMask);
646}
647
648void MachineVerifier::report_context(const LiveRange::Segment &S) const {
649 OS << "- segment: " << S << '\n';
650}
651
652void MachineVerifier::report_context(const VNInfo &VNI) const {
653 OS << "- ValNo: " << VNI.id << " (def " << VNI.def << ")\n";
654}
655
656void MachineVerifier::report_context_liverange(const LiveRange &LR) const {
657 OS << "- liverange: " << LR << '\n';
658}
659
660void MachineVerifier::report_context(MCPhysReg PReg) const {
661 OS << "- p. register: " << printReg(PReg, TRI) << '\n';
662}
663
664void MachineVerifier::report_context_vreg(Register VReg) const {
665 OS << "- v. register: " << printReg(VReg, TRI) << '\n';
666}
667
668void MachineVerifier::report_context_vreg_regunit(
669 VirtRegOrUnit VRegOrUnit) const {
670 if (VRegOrUnit.isVirtualReg()) {
671 report_context_vreg(VRegOrUnit.asVirtualReg());
672 } else {
673 OS << "- regunit: " << printRegUnit(VRegOrUnit.asMCRegUnit(), TRI)
674 << '\n';
675 }
676}
677
678void MachineVerifier::report_context_lanemask(LaneBitmask LaneMask) const {
679 OS << "- lanemask: " << PrintLaneMask(LaneMask) << '\n';
680}
681
682void MachineVerifier::markReachable(const MachineBasicBlock *MBB) {
683 BBInfo &MInfo = MBBInfoMap[MBB];
684 if (!MInfo.reachable) {
685 MInfo.reachable = true;
686 for (const MachineBasicBlock *Succ : MBB->successors())
687 markReachable(Succ);
688 }
689}
690
691void MachineVerifier::visitMachineFunctionBefore() {
692 lastIndex = SlotIndex();
693 regsReserved = MRI->reservedRegsFrozen() ? MRI->getReservedRegs()
694 : TRI->getReservedRegs(*MF);
695
696 if (!MF->empty())
697 markReachable(&MF->front());
698
699 // Build a set of the basic blocks in the function.
700 FunctionBlocks.clear();
701 for (const auto &MBB : *MF) {
702 FunctionBlocks.insert(&MBB);
703 BBInfo &MInfo = MBBInfoMap[&MBB];
704
705 MInfo.Preds.insert(MBB.pred_begin(), MBB.pred_end());
706 if (MInfo.Preds.size() != MBB.pred_size())
707 report("MBB has duplicate entries in its predecessor list.", &MBB);
708
709 MInfo.Succs.insert(MBB.succ_begin(), MBB.succ_end());
710 if (MInfo.Succs.size() != MBB.succ_size())
711 report("MBB has duplicate entries in its successor list.", &MBB);
712 }
713
714 // Check that the register use lists are sane.
715 MRI->verifyUseLists();
716
717 if (!MF->empty()) {
718 verifyStackFrame();
719 verifyStackProtector();
720 }
721}
722
723void
724MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
725 FirstTerminator = nullptr;
726 FirstNonPHI = nullptr;
727
728 if (!MF->getProperties().hasProperty(
729 MachineFunctionProperties::Property::NoPHIs) && MRI->tracksLiveness()) {
730 // If this block has allocatable physical registers live-in, check that
731 // it is an entry block or landing pad.
732 for (const auto &LI : MBB->liveins()) {
733 if (isAllocatable(LI.PhysReg) && !MBB->isEHPad() &&
734 MBB->getIterator() != MBB->getParent()->begin() &&
736 report("MBB has allocatable live-in, but isn't entry, landing-pad, or "
737 "inlineasm-br-indirect-target.",
738 MBB);
739 report_context(LI.PhysReg);
740 }
741 }
742 }
743
744 if (MBB->isIRBlockAddressTaken()) {
746 report("ir-block-address-taken is associated with basic block not used by "
747 "a blockaddress.",
748 MBB);
749 }
750
751 // Count the number of landing pad successors.
753 for (const auto *succ : MBB->successors()) {
754 if (succ->isEHPad())
755 LandingPadSuccs.insert(succ);
756 if (!FunctionBlocks.count(succ))
757 report("MBB has successor that isn't part of the function.", MBB);
758 if (!MBBInfoMap[succ].Preds.count(MBB)) {
759 report("Inconsistent CFG", MBB);
760 OS << "MBB is not in the predecessor list of the successor "
761 << printMBBReference(*succ) << ".\n";
762 }
763 }
764
765 // Check the predecessor list.
766 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
767 if (!FunctionBlocks.count(Pred))
768 report("MBB has predecessor that isn't part of the function.", MBB);
769 if (!MBBInfoMap[Pred].Succs.count(MBB)) {
770 report("Inconsistent CFG", MBB);
771 OS << "MBB is not in the successor list of the predecessor "
772 << printMBBReference(*Pred) << ".\n";
773 }
774 }
775
776 const MCAsmInfo *AsmInfo = TM->getMCAsmInfo();
777 const BasicBlock *BB = MBB->getBasicBlock();
778 const Function &F = MF->getFunction();
779 if (LandingPadSuccs.size() > 1 &&
780 !(AsmInfo &&
782 BB && isa<SwitchInst>(BB->getTerminator())) &&
783 !isScopedEHPersonality(classifyEHPersonality(F.getPersonalityFn())))
784 report("MBB has more than one landing pad successor", MBB);
785
786 // Call analyzeBranch. If it succeeds, there several more conditions to check.
787 MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
789 if (!TII->analyzeBranch(*const_cast<MachineBasicBlock *>(MBB), TBB, FBB,
790 Cond)) {
791 // Ok, analyzeBranch thinks it knows what's going on with this block. Let's
792 // check whether its answers match up with reality.
793 if (!TBB && !FBB) {
794 // Block falls through to its successor.
795 if (!MBB->empty() && MBB->back().isBarrier() &&
796 !TII->isPredicated(MBB->back())) {
797 report("MBB exits via unconditional fall-through but ends with a "
798 "barrier instruction!", MBB);
799 }
800 if (!Cond.empty()) {
801 report("MBB exits via unconditional fall-through but has a condition!",
802 MBB);
803 }
804 } else if (TBB && !FBB && Cond.empty()) {
805 // Block unconditionally branches somewhere.
806 if (MBB->empty()) {
807 report("MBB exits via unconditional branch but doesn't contain "
808 "any instructions!", MBB);
809 } else if (!MBB->back().isBarrier()) {
810 report("MBB exits via unconditional branch but doesn't end with a "
811 "barrier instruction!", MBB);
812 } else if (!MBB->back().isTerminator()) {
813 report("MBB exits via unconditional branch but the branch isn't a "
814 "terminator instruction!", MBB);
815 }
816 } else if (TBB && !FBB && !Cond.empty()) {
817 // Block conditionally branches somewhere, otherwise falls through.
818 if (MBB->empty()) {
819 report("MBB exits via conditional branch/fall-through but doesn't "
820 "contain any instructions!", MBB);
821 } else if (MBB->back().isBarrier()) {
822 report("MBB exits via conditional branch/fall-through but ends with a "
823 "barrier instruction!", MBB);
824 } else if (!MBB->back().isTerminator()) {
825 report("MBB exits via conditional branch/fall-through but the branch "
826 "isn't a terminator instruction!", MBB);
827 }
828 } else if (TBB && FBB) {
829 // Block conditionally branches somewhere, otherwise branches
830 // somewhere else.
831 if (MBB->empty()) {
832 report("MBB exits via conditional branch/branch but doesn't "
833 "contain any instructions!", MBB);
834 } else if (!MBB->back().isBarrier()) {
835 report("MBB exits via conditional branch/branch but doesn't end with a "
836 "barrier instruction!", MBB);
837 } else if (!MBB->back().isTerminator()) {
838 report("MBB exits via conditional branch/branch but the branch "
839 "isn't a terminator instruction!", MBB);
840 }
841 if (Cond.empty()) {
842 report("MBB exits via conditional branch/branch but there's no "
843 "condition!", MBB);
844 }
845 } else {
846 report("analyzeBranch returned invalid data!", MBB);
847 }
848
849 // Now check that the successors match up with the answers reported by
850 // analyzeBranch.
851 if (TBB && !MBB->isSuccessor(TBB))
852 report("MBB exits via jump or conditional branch, but its target isn't a "
853 "CFG successor!",
854 MBB);
855 if (FBB && !MBB->isSuccessor(FBB))
856 report("MBB exits via conditional branch, but its target isn't a CFG "
857 "successor!",
858 MBB);
859
860 // There might be a fallthrough to the next block if there's either no
861 // unconditional true branch, or if there's a condition, and one of the
862 // branches is missing.
863 bool Fallthrough = !TBB || (!Cond.empty() && !FBB);
864
865 // A conditional fallthrough must be an actual CFG successor, not
866 // unreachable. (Conversely, an unconditional fallthrough might not really
867 // be a successor, because the block might end in unreachable.)
868 if (!Cond.empty() && !FBB) {
870 if (MBBI == MF->end()) {
871 report("MBB conditionally falls through out of function!", MBB);
872 } else if (!MBB->isSuccessor(&*MBBI))
873 report("MBB exits via conditional branch/fall-through but the CFG "
874 "successors don't match the actual successors!",
875 MBB);
876 }
877
878 // Verify that there aren't any extra un-accounted-for successors.
879 for (const MachineBasicBlock *SuccMBB : MBB->successors()) {
880 // If this successor is one of the branch targets, it's okay.
881 if (SuccMBB == TBB || SuccMBB == FBB)
882 continue;
883 // If we might have a fallthrough, and the successor is the fallthrough
884 // block, that's also ok.
885 if (Fallthrough && SuccMBB == MBB->getNextNode())
886 continue;
887 // Also accept successors which are for exception-handling or might be
888 // inlineasm_br targets.
889 if (SuccMBB->isEHPad() || SuccMBB->isInlineAsmBrIndirectTarget())
890 continue;
891 report("MBB has unexpected successors which are not branch targets, "
892 "fallthrough, EHPads, or inlineasm_br targets.",
893 MBB);
894 }
895 }
896
897 regsLive.clear();
898 if (MRI->tracksLiveness()) {
899 for (const auto &LI : MBB->liveins()) {
900 if (!LI.PhysReg.isPhysical()) {
901 report("MBB live-in list contains non-physical register", MBB);
902 continue;
903 }
904 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(LI.PhysReg))
905 regsLive.insert(SubReg);
906 }
907 }
908
909 const MachineFrameInfo &MFI = MF->getFrameInfo();
910 BitVector PR = MFI.getPristineRegs(*MF);
911 for (unsigned I : PR.set_bits()) {
912 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(I))
913 regsLive.insert(SubReg);
914 }
915
916 regsKilled.clear();
917 regsDefined.clear();
918
919 if (Indexes)
920 lastIndex = Indexes->getMBBStartIdx(MBB);
921}
922
923// This function gets called for all bundle headers, including normal
924// stand-alone unbundled instructions.
925void MachineVerifier::visitMachineBundleBefore(const MachineInstr *MI) {
926 if (Indexes && Indexes->hasIndex(*MI)) {
927 SlotIndex idx = Indexes->getInstructionIndex(*MI);
928 if (!(idx > lastIndex)) {
929 report("Instruction index out of order", MI);
930 OS << "Last instruction was at " << lastIndex << '\n';
931 }
932 lastIndex = idx;
933 }
934
935 // Ensure non-terminators don't follow terminators.
936 if (MI->isTerminator()) {
937 if (!FirstTerminator)
938 FirstTerminator = MI;
939 } else if (FirstTerminator) {
940 // For GlobalISel, G_INVOKE_REGION_START is a terminator that we allow to
941 // precede non-terminators.
942 if (FirstTerminator->getOpcode() != TargetOpcode::G_INVOKE_REGION_START) {
943 report("Non-terminator instruction after the first terminator", MI);
944 OS << "First terminator was:\t" << *FirstTerminator;
945 }
946 }
947}
948
949// The operands on an INLINEASM instruction must follow a template.
950// Verify that the flag operands make sense.
951void MachineVerifier::verifyInlineAsm(const MachineInstr *MI) {
952 // The first two operands on INLINEASM are the asm string and global flags.
953 if (MI->getNumOperands() < 2) {
954 report("Too few operands on inline asm", MI);
955 return;
956 }
957 if (!MI->getOperand(0).isSymbol())
958 report("Asm string must be an external symbol", MI);
959 if (!MI->getOperand(1).isImm())
960 report("Asm flags must be an immediate", MI);
961 // Allowed flags are Extra_HasSideEffects = 1, Extra_IsAlignStack = 2,
962 // Extra_AsmDialect = 4, Extra_MayLoad = 8, and Extra_MayStore = 16,
963 // and Extra_IsConvergent = 32.
964 if (!isUInt<6>(MI->getOperand(1).getImm()))
965 report("Unknown asm flags", &MI->getOperand(1), 1);
966
967 static_assert(InlineAsm::MIOp_FirstOperand == 2, "Asm format changed");
968
969 unsigned OpNo = InlineAsm::MIOp_FirstOperand;
970 unsigned NumOps;
971 for (unsigned e = MI->getNumOperands(); OpNo < e; OpNo += NumOps) {
972 const MachineOperand &MO = MI->getOperand(OpNo);
973 // There may be implicit ops after the fixed operands.
974 if (!MO.isImm())
975 break;
976 const InlineAsm::Flag F(MO.getImm());
977 NumOps = 1 + F.getNumOperandRegisters();
978 }
979
980 if (OpNo > MI->getNumOperands())
981 report("Missing operands in last group", MI);
982
983 // An optional MDNode follows the groups.
984 if (OpNo < MI->getNumOperands() && MI->getOperand(OpNo).isMetadata())
985 ++OpNo;
986
987 // All trailing operands must be implicit registers.
988 for (unsigned e = MI->getNumOperands(); OpNo < e; ++OpNo) {
989 const MachineOperand &MO = MI->getOperand(OpNo);
990 if (!MO.isReg() || !MO.isImplicit())
991 report("Expected implicit register after groups", &MO, OpNo);
992 }
993
994 if (MI->getOpcode() == TargetOpcode::INLINEASM_BR) {
995 const MachineBasicBlock *MBB = MI->getParent();
996
997 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = MI->getNumOperands();
998 i != e; ++i) {
999 const MachineOperand &MO = MI->getOperand(i);
1000
1001 if (!MO.isMBB())
1002 continue;
1003
1004 // Check the successor & predecessor lists look ok, assume they are
1005 // not. Find the indirect target without going through the successors.
1006 const MachineBasicBlock *IndirectTargetMBB = MO.getMBB();
1007 if (!IndirectTargetMBB) {
1008 report("INLINEASM_BR indirect target does not exist", &MO, i);
1009 break;
1010 }
1011
1012 if (!MBB->isSuccessor(IndirectTargetMBB))
1013 report("INLINEASM_BR indirect target missing from successor list", &MO,
1014 i);
1015
1016 if (!IndirectTargetMBB->isPredecessor(MBB))
1017 report("INLINEASM_BR indirect target predecessor list missing parent",
1018 &MO, i);
1019 }
1020 }
1021}
1022
1023bool MachineVerifier::verifyAllRegOpsScalar(const MachineInstr &MI,
1024 const MachineRegisterInfo &MRI) {
1025 if (none_of(MI.explicit_operands(), [&MRI](const MachineOperand &Op) {
1026 if (!Op.isReg())
1027 return false;
1028 const auto Reg = Op.getReg();
1029 if (Reg.isPhysical())
1030 return false;
1031 return !MRI.getType(Reg).isScalar();
1032 }))
1033 return true;
1034 report("All register operands must have scalar types", &MI);
1035 return false;
1036}
1037
1038/// Check that types are consistent when two operands need to have the same
1039/// number of vector elements.
1040/// \return true if the types are valid.
1041bool MachineVerifier::verifyVectorElementMatch(LLT Ty0, LLT Ty1,
1042 const MachineInstr *MI) {
1043 if (Ty0.isVector() != Ty1.isVector()) {
1044 report("operand types must be all-vector or all-scalar", MI);
1045 // Generally we try to report as many issues as possible at once, but in
1046 // this case it's not clear what should we be comparing the size of the
1047 // scalar with: the size of the whole vector or its lane. Instead of
1048 // making an arbitrary choice and emitting not so helpful message, let's
1049 // avoid the extra noise and stop here.
1050 return false;
1051 }
1052
1053 if (Ty0.isVector() && Ty0.getElementCount() != Ty1.getElementCount()) {
1054 report("operand types must preserve number of vector elements", MI);
1055 return false;
1056 }
1057
1058 return true;
1059}
1060
1061bool MachineVerifier::verifyGIntrinsicSideEffects(const MachineInstr *MI) {
1062 auto Opcode = MI->getOpcode();
1063 bool NoSideEffects = Opcode == TargetOpcode::G_INTRINSIC ||
1064 Opcode == TargetOpcode::G_INTRINSIC_CONVERGENT;
1065 unsigned IntrID = cast<GIntrinsic>(MI)->getIntrinsicID();
1066 if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
1068 MF->getFunction().getContext(), static_cast<Intrinsic::ID>(IntrID));
1069 bool DeclHasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
1070 if (NoSideEffects && DeclHasSideEffects) {
1071 report(Twine(TII->getName(Opcode),
1072 " used with intrinsic that accesses memory"),
1073 MI);
1074 return false;
1075 }
1076 if (!NoSideEffects && !DeclHasSideEffects) {
1077 report(Twine(TII->getName(Opcode), " used with readnone intrinsic"), MI);
1078 return false;
1079 }
1080 }
1081
1082 return true;
1083}
1084
1085bool MachineVerifier::verifyGIntrinsicConvergence(const MachineInstr *MI) {
1086 auto Opcode = MI->getOpcode();
1087 bool NotConvergent = Opcode == TargetOpcode::G_INTRINSIC ||
1088 Opcode == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS;
1089 unsigned IntrID = cast<GIntrinsic>(MI)->getIntrinsicID();
1090 if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
1092 MF->getFunction().getContext(), static_cast<Intrinsic::ID>(IntrID));
1093 bool DeclIsConvergent = Attrs.hasFnAttr(Attribute::Convergent);
1094 if (NotConvergent && DeclIsConvergent) {
1095 report(Twine(TII->getName(Opcode), " used with a convergent intrinsic"),
1096 MI);
1097 return false;
1098 }
1099 if (!NotConvergent && !DeclIsConvergent) {
1100 report(
1101 Twine(TII->getName(Opcode), " used with a non-convergent intrinsic"),
1102 MI);
1103 return false;
1104 }
1105 }
1106
1107 return true;
1108}
1109
1110void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
1111 if (isFunctionSelected)
1112 report("Unexpected generic instruction in a Selected function", MI);
1113
1114 const MCInstrDesc &MCID = MI->getDesc();
1115 unsigned NumOps = MI->getNumOperands();
1116
1117 // Branches must reference a basic block if they are not indirect
1118 if (MI->isBranch() && !MI->isIndirectBranch()) {
1119 bool HasMBB = false;
1120 for (const MachineOperand &Op : MI->operands()) {
1121 if (Op.isMBB()) {
1122 HasMBB = true;
1123 break;
1124 }
1125 }
1126
1127 if (!HasMBB) {
1128 report("Branch instruction is missing a basic block operand or "
1129 "isIndirectBranch property",
1130 MI);
1131 }
1132 }
1133
1134 // Check types.
1136 for (unsigned I = 0, E = std::min(MCID.getNumOperands(), NumOps);
1137 I != E; ++I) {
1138 if (!MCID.operands()[I].isGenericType())
1139 continue;
1140 // Generic instructions specify type equality constraints between some of
1141 // their operands. Make sure these are consistent.
1142 size_t TypeIdx = MCID.operands()[I].getGenericTypeIndex();
1143 Types.resize(std::max(TypeIdx + 1, Types.size()));
1144
1145 const MachineOperand *MO = &MI->getOperand(I);
1146 if (!MO->isReg()) {
1147 report("generic instruction must use register operands", MI);
1148 continue;
1149 }
1150
1151 LLT OpTy = MRI->getType(MO->getReg());
1152 // Don't report a type mismatch if there is no actual mismatch, only a
1153 // type missing, to reduce noise:
1154 if (OpTy.isValid()) {
1155 // Only the first valid type for a type index will be printed: don't
1156 // overwrite it later so it's always clear which type was expected:
1157 if (!Types[TypeIdx].isValid())
1158 Types[TypeIdx] = OpTy;
1159 else if (Types[TypeIdx] != OpTy)
1160 report("Type mismatch in generic instruction", MO, I, OpTy);
1161 } else {
1162 // Generic instructions must have types attached to their operands.
1163 report("Generic instruction is missing a virtual register type", MO, I);
1164 }
1165 }
1166
1167 // Generic opcodes must not have physical register operands.
1168 for (unsigned I = 0; I < MI->getNumOperands(); ++I) {
1169 const MachineOperand *MO = &MI->getOperand(I);
1170 if (MO->isReg() && MO->getReg().isPhysical())
1171 report("Generic instruction cannot have physical register", MO, I);
1172 }
1173
1174 // Avoid out of bounds in checks below. This was already reported earlier.
1175 if (MI->getNumOperands() < MCID.getNumOperands())
1176 return;
1177
1179 if (!TII->verifyInstruction(*MI, ErrorInfo))
1180 report(ErrorInfo.data(), MI);
1181
1182 // Verify properties of various specific instruction types
1183 unsigned Opc = MI->getOpcode();
1184 switch (Opc) {
1185 case TargetOpcode::G_ASSERT_SEXT:
1186 case TargetOpcode::G_ASSERT_ZEXT: {
1187 std::string OpcName =
1188 Opc == TargetOpcode::G_ASSERT_ZEXT ? "G_ASSERT_ZEXT" : "G_ASSERT_SEXT";
1189 if (!MI->getOperand(2).isImm()) {
1190 report(Twine(OpcName, " expects an immediate operand #2"), MI);
1191 break;
1192 }
1193
1194 Register Dst = MI->getOperand(0).getReg();
1195 Register Src = MI->getOperand(1).getReg();
1196 LLT SrcTy = MRI->getType(Src);
1197 int64_t Imm = MI->getOperand(2).getImm();
1198 if (Imm <= 0) {
1199 report(Twine(OpcName, " size must be >= 1"), MI);
1200 break;
1201 }
1202
1203 if (Imm >= SrcTy.getScalarSizeInBits()) {
1204 report(Twine(OpcName, " size must be less than source bit width"), MI);
1205 break;
1206 }
1207
1208 const RegisterBank *SrcRB = RBI->getRegBank(Src, *MRI, *TRI);
1209 const RegisterBank *DstRB = RBI->getRegBank(Dst, *MRI, *TRI);
1210
1211 // Allow only the source bank to be set.
1212 if ((SrcRB && DstRB && SrcRB != DstRB) || (DstRB && !SrcRB)) {
1213 report(Twine(OpcName, " cannot change register bank"), MI);
1214 break;
1215 }
1216
1217 // Don't allow a class change. Do allow member class->regbank.
1218 const TargetRegisterClass *DstRC = MRI->getRegClassOrNull(Dst);
1219 if (DstRC && DstRC != MRI->getRegClassOrNull(Src)) {
1220 report(
1221 Twine(OpcName, " source and destination register classes must match"),
1222 MI);
1223 break;
1224 }
1225
1226 break;
1227 }
1228
1229 case TargetOpcode::G_CONSTANT:
1230 case TargetOpcode::G_FCONSTANT: {
1231 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1232 if (DstTy.isVector())
1233 report("Instruction cannot use a vector result type", MI);
1234
1235 if (MI->getOpcode() == TargetOpcode::G_CONSTANT) {
1236 if (!MI->getOperand(1).isCImm()) {
1237 report("G_CONSTANT operand must be cimm", MI);
1238 break;
1239 }
1240
1241 const ConstantInt *CI = MI->getOperand(1).getCImm();
1242 if (CI->getBitWidth() != DstTy.getSizeInBits())
1243 report("inconsistent constant size", MI);
1244 } else {
1245 if (!MI->getOperand(1).isFPImm()) {
1246 report("G_FCONSTANT operand must be fpimm", MI);
1247 break;
1248 }
1249 const ConstantFP *CF = MI->getOperand(1).getFPImm();
1250
1252 DstTy.getSizeInBits()) {
1253 report("inconsistent constant size", MI);
1254 }
1255 }
1256
1257 break;
1258 }
1259 case TargetOpcode::G_LOAD:
1260 case TargetOpcode::G_STORE:
1261 case TargetOpcode::G_ZEXTLOAD:
1262 case TargetOpcode::G_SEXTLOAD: {
1263 LLT ValTy = MRI->getType(MI->getOperand(0).getReg());
1264 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1265 if (!PtrTy.isPointer())
1266 report("Generic memory instruction must access a pointer", MI);
1267
1268 // Generic loads and stores must have a single MachineMemOperand
1269 // describing that access.
1270 if (!MI->hasOneMemOperand()) {
1271 report("Generic instruction accessing memory must have one mem operand",
1272 MI);
1273 } else {
1274 const MachineMemOperand &MMO = **MI->memoperands_begin();
1275 if (MI->getOpcode() == TargetOpcode::G_ZEXTLOAD ||
1276 MI->getOpcode() == TargetOpcode::G_SEXTLOAD) {
1278 ValTy.getSizeInBits()))
1279 report("Generic extload must have a narrower memory type", MI);
1280 } else if (MI->getOpcode() == TargetOpcode::G_LOAD) {
1282 ValTy.getSizeInBytes()))
1283 report("load memory size cannot exceed result size", MI);
1284
1285 if (MMO.getRanges()) {
1286 ConstantInt *i =
1287 mdconst::extract<ConstantInt>(MMO.getRanges()->getOperand(0));
1288 if (i->getIntegerType()->getBitWidth() !=
1289 ValTy.getScalarType().getSizeInBits()) {
1290 report("range is incompatible with the result type", MI);
1291 }
1292 }
1293 } else if (MI->getOpcode() == TargetOpcode::G_STORE) {
1295 MMO.getSize().getValue()))
1296 report("store memory size cannot exceed value size", MI);
1297 }
1298
1299 const AtomicOrdering Order = MMO.getSuccessOrdering();
1300 if (Opc == TargetOpcode::G_STORE) {
1301 if (Order == AtomicOrdering::Acquire ||
1303 report("atomic store cannot use acquire ordering", MI);
1304
1305 } else {
1306 if (Order == AtomicOrdering::Release ||
1308 report("atomic load cannot use release ordering", MI);
1309 }
1310 }
1311
1312 break;
1313 }
1314 case TargetOpcode::G_PHI: {
1315 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1316 if (!DstTy.isValid() || !all_of(drop_begin(MI->operands()),
1317 [this, &DstTy](const MachineOperand &MO) {
1318 if (!MO.isReg())
1319 return true;
1320 LLT Ty = MRI->getType(MO.getReg());
1321 if (!Ty.isValid() || (Ty != DstTy))
1322 return false;
1323 return true;
1324 }))
1325 report("Generic Instruction G_PHI has operands with incompatible/missing "
1326 "types",
1327 MI);
1328 break;
1329 }
1330 case TargetOpcode::G_BITCAST: {
1331 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1332 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1333 if (!DstTy.isValid() || !SrcTy.isValid())
1334 break;
1335
1336 if (SrcTy.isPointer() != DstTy.isPointer())
1337 report("bitcast cannot convert between pointers and other types", MI);
1338
1339 if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
1340 report("bitcast sizes must match", MI);
1341
1342 if (SrcTy == DstTy)
1343 report("bitcast must change the type", MI);
1344
1345 break;
1346 }
1347 case TargetOpcode::G_INTTOPTR:
1348 case TargetOpcode::G_PTRTOINT:
1349 case TargetOpcode::G_ADDRSPACE_CAST: {
1350 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1351 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1352 if (!DstTy.isValid() || !SrcTy.isValid())
1353 break;
1354
1355 verifyVectorElementMatch(DstTy, SrcTy, MI);
1356
1357 DstTy = DstTy.getScalarType();
1358 SrcTy = SrcTy.getScalarType();
1359
1360 if (MI->getOpcode() == TargetOpcode::G_INTTOPTR) {
1361 if (!DstTy.isPointer())
1362 report("inttoptr result type must be a pointer", MI);
1363 if (SrcTy.isPointer())
1364 report("inttoptr source type must not be a pointer", MI);
1365 } else if (MI->getOpcode() == TargetOpcode::G_PTRTOINT) {
1366 if (!SrcTy.isPointer())
1367 report("ptrtoint source type must be a pointer", MI);
1368 if (DstTy.isPointer())
1369 report("ptrtoint result type must not be a pointer", MI);
1370 } else {
1371 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST);
1372 if (!SrcTy.isPointer() || !DstTy.isPointer())
1373 report("addrspacecast types must be pointers", MI);
1374 else {
1375 if (SrcTy.getAddressSpace() == DstTy.getAddressSpace())
1376 report("addrspacecast must convert different address spaces", MI);
1377 }
1378 }
1379
1380 break;
1381 }
1382 case TargetOpcode::G_PTR_ADD: {
1383 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1384 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1385 LLT OffsetTy = MRI->getType(MI->getOperand(2).getReg());
1386 if (!DstTy.isValid() || !PtrTy.isValid() || !OffsetTy.isValid())
1387 break;
1388
1389 if (!PtrTy.isPointerOrPointerVector())
1390 report("gep first operand must be a pointer", MI);
1391
1392 if (OffsetTy.isPointerOrPointerVector())
1393 report("gep offset operand must not be a pointer", MI);
1394
1395 if (PtrTy.isPointerOrPointerVector()) {
1396 const DataLayout &DL = MF->getDataLayout();
1397 unsigned AS = PtrTy.getAddressSpace();
1398 unsigned IndexSizeInBits = DL.getIndexSize(AS) * 8;
1399 if (OffsetTy.getScalarSizeInBits() != IndexSizeInBits) {
1400 report("gep offset operand must match index size for address space",
1401 MI);
1402 }
1403 }
1404
1405 // TODO: Is the offset allowed to be a scalar with a vector?
1406 break;
1407 }
1408 case TargetOpcode::G_PTRMASK: {
1409 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1410 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1411 LLT MaskTy = MRI->getType(MI->getOperand(2).getReg());
1412 if (!DstTy.isValid() || !SrcTy.isValid() || !MaskTy.isValid())
1413 break;
1414
1415 if (!DstTy.isPointerOrPointerVector())
1416 report("ptrmask result type must be a pointer", MI);
1417
1418 if (!MaskTy.getScalarType().isScalar())
1419 report("ptrmask mask type must be an integer", MI);
1420
1421 verifyVectorElementMatch(DstTy, MaskTy, MI);
1422 break;
1423 }
1424 case TargetOpcode::G_SEXT:
1425 case TargetOpcode::G_ZEXT:
1426 case TargetOpcode::G_ANYEXT:
1427 case TargetOpcode::G_TRUNC:
1428 case TargetOpcode::G_FPEXT:
1429 case TargetOpcode::G_FPTRUNC: {
1430 // Number of operands and presense of types is already checked (and
1431 // reported in case of any issues), so no need to report them again. As
1432 // we're trying to report as many issues as possible at once, however, the
1433 // instructions aren't guaranteed to have the right number of operands or
1434 // types attached to them at this point
1435 assert(MCID.getNumOperands() == 2 && "Expected 2 operands G_*{EXT,TRUNC}");
1436 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1437 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1438 if (!DstTy.isValid() || !SrcTy.isValid())
1439 break;
1440
1442 report("Generic extend/truncate can not operate on pointers", MI);
1443
1444 verifyVectorElementMatch(DstTy, SrcTy, MI);
1445
1446 unsigned DstSize = DstTy.getScalarSizeInBits();
1447 unsigned SrcSize = SrcTy.getScalarSizeInBits();
1448 switch (MI->getOpcode()) {
1449 default:
1450 if (DstSize <= SrcSize)
1451 report("Generic extend has destination type no larger than source", MI);
1452 break;
1453 case TargetOpcode::G_TRUNC:
1454 case TargetOpcode::G_FPTRUNC:
1455 if (DstSize >= SrcSize)
1456 report("Generic truncate has destination type no smaller than source",
1457 MI);
1458 break;
1459 }
1460 break;
1461 }
1462 case TargetOpcode::G_SELECT: {
1463 LLT SelTy = MRI->getType(MI->getOperand(0).getReg());
1464 LLT CondTy = MRI->getType(MI->getOperand(1).getReg());
1465 if (!SelTy.isValid() || !CondTy.isValid())
1466 break;
1467
1468 // Scalar condition select on a vector is valid.
1469 if (CondTy.isVector())
1470 verifyVectorElementMatch(SelTy, CondTy, MI);
1471 break;
1472 }
1473 case TargetOpcode::G_MERGE_VALUES: {
1474 // G_MERGE_VALUES should only be used to merge scalars into a larger scalar,
1475 // e.g. s2N = MERGE sN, sN
1476 // Merging multiple scalars into a vector is not allowed, should use
1477 // G_BUILD_VECTOR for that.
1478 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1479 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1480 if (DstTy.isVector() || SrcTy.isVector())
1481 report("G_MERGE_VALUES cannot operate on vectors", MI);
1482
1483 const unsigned NumOps = MI->getNumOperands();
1484 if (DstTy.getSizeInBits() != SrcTy.getSizeInBits() * (NumOps - 1))
1485 report("G_MERGE_VALUES result size is inconsistent", MI);
1486
1487 for (unsigned I = 2; I != NumOps; ++I) {
1488 if (MRI->getType(MI->getOperand(I).getReg()) != SrcTy)
1489 report("G_MERGE_VALUES source types do not match", MI);
1490 }
1491
1492 break;
1493 }
1494 case TargetOpcode::G_UNMERGE_VALUES: {
1495 unsigned NumDsts = MI->getNumOperands() - 1;
1496 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1497 for (unsigned i = 1; i < NumDsts; ++i) {
1498 if (MRI->getType(MI->getOperand(i).getReg()) != DstTy) {
1499 report("G_UNMERGE_VALUES destination types do not match", MI);
1500 break;
1501 }
1502 }
1503
1504 LLT SrcTy = MRI->getType(MI->getOperand(NumDsts).getReg());
1505 if (DstTy.isVector()) {
1506 // This case is the converse of G_CONCAT_VECTORS.
1507 if (!SrcTy.isVector() ||
1508 (SrcTy.getScalarType() != DstTy.getScalarType() &&
1509 !SrcTy.isPointerVector()) ||
1510 SrcTy.isScalableVector() != DstTy.isScalableVector() ||
1511 SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits())
1512 report("G_UNMERGE_VALUES source operand does not match vector "
1513 "destination operands",
1514 MI);
1515 } else if (SrcTy.isVector()) {
1516 // This case is the converse of G_BUILD_VECTOR, but relaxed to allow
1517 // mismatched types as long as the total size matches:
1518 // %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<4 x s32>)
1519 if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits())
1520 report("G_UNMERGE_VALUES vector source operand does not match scalar "
1521 "destination operands",
1522 MI);
1523 } else {
1524 // This case is the converse of G_MERGE_VALUES.
1525 if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits()) {
1526 report("G_UNMERGE_VALUES scalar source operand does not match scalar "
1527 "destination operands",
1528 MI);
1529 }
1530 }
1531 break;
1532 }
1533 case TargetOpcode::G_BUILD_VECTOR: {
1534 // Source types must be scalars, dest type a vector. Total size of scalars
1535 // must match the dest vector size.
1536 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1537 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1538 if (!DstTy.isVector() || SrcEltTy.isVector()) {
1539 report("G_BUILD_VECTOR must produce a vector from scalar operands", MI);
1540 break;
1541 }
1542
1543 if (DstTy.getElementType() != SrcEltTy)
1544 report("G_BUILD_VECTOR result element type must match source type", MI);
1545
1546 if (DstTy.getNumElements() != MI->getNumOperands() - 1)
1547 report("G_BUILD_VECTOR must have an operand for each elemement", MI);
1548
1549 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1550 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1551 report("G_BUILD_VECTOR source operand types are not homogeneous", MI);
1552
1553 break;
1554 }
1555 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1556 // Source types must be scalars, dest type a vector. Scalar types must be
1557 // larger than the dest vector elt type, as this is a truncating operation.
1558 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1559 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1560 if (!DstTy.isVector() || SrcEltTy.isVector())
1561 report("G_BUILD_VECTOR_TRUNC must produce a vector from scalar operands",
1562 MI);
1563 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1564 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1565 report("G_BUILD_VECTOR_TRUNC source operand types are not homogeneous",
1566 MI);
1567 if (SrcEltTy.getSizeInBits() <= DstTy.getElementType().getSizeInBits())
1568 report("G_BUILD_VECTOR_TRUNC source operand types are not larger than "
1569 "dest elt type",
1570 MI);
1571 break;
1572 }
1573 case TargetOpcode::G_CONCAT_VECTORS: {
1574 // Source types should be vectors, and total size should match the dest
1575 // vector size.
1576 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1577 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1578 if (!DstTy.isVector() || !SrcTy.isVector())
1579 report("G_CONCAT_VECTOR requires vector source and destination operands",
1580 MI);
1581
1582 if (MI->getNumOperands() < 3)
1583 report("G_CONCAT_VECTOR requires at least 2 source operands", MI);
1584
1585 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1586 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1587 report("G_CONCAT_VECTOR source operand types are not homogeneous", MI);
1588 if (DstTy.getElementCount() !=
1589 SrcTy.getElementCount() * (MI->getNumOperands() - 1))
1590 report("G_CONCAT_VECTOR num dest and source elements should match", MI);
1591 break;
1592 }
1593 case TargetOpcode::G_ICMP:
1594 case TargetOpcode::G_FCMP: {
1595 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1596 LLT SrcTy = MRI->getType(MI->getOperand(2).getReg());
1597
1598 if ((DstTy.isVector() != SrcTy.isVector()) ||
1599 (DstTy.isVector() &&
1600 DstTy.getElementCount() != SrcTy.getElementCount()))
1601 report("Generic vector icmp/fcmp must preserve number of lanes", MI);
1602
1603 break;
1604 }
1605 case TargetOpcode::G_SCMP:
1606 case TargetOpcode::G_UCMP: {
1607 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1608 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1609
1610 if (SrcTy.isPointerOrPointerVector()) {
1611 report("Generic scmp/ucmp does not support pointers as operands", MI);
1612 break;
1613 }
1614
1615 if (DstTy.isPointerOrPointerVector()) {
1616 report("Generic scmp/ucmp does not support pointers as a result", MI);
1617 break;
1618 }
1619
1620 if (DstTy.getScalarSizeInBits() < 2) {
1621 report("Result type must be at least 2 bits wide", MI);
1622 break;
1623 }
1624
1625 if ((DstTy.isVector() != SrcTy.isVector()) ||
1626 (DstTy.isVector() &&
1627 DstTy.getElementCount() != SrcTy.getElementCount())) {
1628 report("Generic vector scmp/ucmp must preserve number of lanes", MI);
1629 break;
1630 }
1631
1632 break;
1633 }
1634 case TargetOpcode::G_EXTRACT: {
1635 const MachineOperand &SrcOp = MI->getOperand(1);
1636 if (!SrcOp.isReg()) {
1637 report("extract source must be a register", MI);
1638 break;
1639 }
1640
1641 const MachineOperand &OffsetOp = MI->getOperand(2);
1642 if (!OffsetOp.isImm()) {
1643 report("extract offset must be a constant", MI);
1644 break;
1645 }
1646
1647 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1648 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1649 if (SrcSize == DstSize)
1650 report("extract source must be larger than result", MI);
1651
1652 if (DstSize + OffsetOp.getImm() > SrcSize)
1653 report("extract reads past end of register", MI);
1654 break;
1655 }
1656 case TargetOpcode::G_INSERT: {
1657 const MachineOperand &SrcOp = MI->getOperand(2);
1658 if (!SrcOp.isReg()) {
1659 report("insert source must be a register", MI);
1660 break;
1661 }
1662
1663 const MachineOperand &OffsetOp = MI->getOperand(3);
1664 if (!OffsetOp.isImm()) {
1665 report("insert offset must be a constant", MI);
1666 break;
1667 }
1668
1669 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1670 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1671
1672 if (DstSize <= SrcSize)
1673 report("inserted size must be smaller than total register", MI);
1674
1675 if (SrcSize + OffsetOp.getImm() > DstSize)
1676 report("insert writes past end of register", MI);
1677
1678 break;
1679 }
1680 case TargetOpcode::G_JUMP_TABLE: {
1681 if (!MI->getOperand(1).isJTI())
1682 report("G_JUMP_TABLE source operand must be a jump table index", MI);
1683 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1684 if (!DstTy.isPointer())
1685 report("G_JUMP_TABLE dest operand must have a pointer type", MI);
1686 break;
1687 }
1688 case TargetOpcode::G_BRJT: {
1689 if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
1690 report("G_BRJT src operand 0 must be a pointer type", MI);
1691
1692 if (!MI->getOperand(1).isJTI())
1693 report("G_BRJT src operand 1 must be a jump table index", MI);
1694
1695 const auto &IdxOp = MI->getOperand(2);
1696 if (!IdxOp.isReg() || MRI->getType(IdxOp.getReg()).isPointer())
1697 report("G_BRJT src operand 2 must be a scalar reg type", MI);
1698 break;
1699 }
1700 case TargetOpcode::G_INTRINSIC:
1701 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1702 case TargetOpcode::G_INTRINSIC_CONVERGENT:
1703 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS: {
1704 // TODO: Should verify number of def and use operands, but the current
1705 // interface requires passing in IR types for mangling.
1706 const MachineOperand &IntrIDOp = MI->getOperand(MI->getNumExplicitDefs());
1707 if (!IntrIDOp.isIntrinsicID()) {
1708 report("G_INTRINSIC first src operand must be an intrinsic ID", MI);
1709 break;
1710 }
1711
1712 if (!verifyGIntrinsicSideEffects(MI))
1713 break;
1714 if (!verifyGIntrinsicConvergence(MI))
1715 break;
1716
1717 break;
1718 }
1719 case TargetOpcode::G_SEXT_INREG: {
1720 if (!MI->getOperand(2).isImm()) {
1721 report("G_SEXT_INREG expects an immediate operand #2", MI);
1722 break;
1723 }
1724
1725 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1726 int64_t Imm = MI->getOperand(2).getImm();
1727 if (Imm <= 0)
1728 report("G_SEXT_INREG size must be >= 1", MI);
1729 if (Imm >= SrcTy.getScalarSizeInBits())
1730 report("G_SEXT_INREG size must be less than source bit width", MI);
1731 break;
1732 }
1733 case TargetOpcode::G_BSWAP: {
1734 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1735 if (DstTy.getScalarSizeInBits() % 16 != 0)
1736 report("G_BSWAP size must be a multiple of 16 bits", MI);
1737 break;
1738 }
1739 case TargetOpcode::G_VSCALE: {
1740 if (!MI->getOperand(1).isCImm()) {
1741 report("G_VSCALE operand must be cimm", MI);
1742 break;
1743 }
1744 if (MI->getOperand(1).getCImm()->isZero()) {
1745 report("G_VSCALE immediate cannot be zero", MI);
1746 break;
1747 }
1748 break;
1749 }
1750 case TargetOpcode::G_STEP_VECTOR: {
1751 if (!MI->getOperand(1).isCImm()) {
1752 report("operand must be cimm", MI);
1753 break;
1754 }
1755
1756 if (!MI->getOperand(1).getCImm()->getValue().isStrictlyPositive()) {
1757 report("step must be > 0", MI);
1758 break;
1759 }
1760
1761 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1762 if (!DstTy.isScalableVector()) {
1763 report("Destination type must be a scalable vector", MI);
1764 break;
1765 }
1766
1767 // <vscale x 2 x p0>
1768 if (!DstTy.getElementType().isScalar()) {
1769 report("Destination element type must be scalar", MI);
1770 break;
1771 }
1772
1773 if (MI->getOperand(1).getCImm()->getBitWidth() !=
1775 report("step bitwidth differs from result type element bitwidth", MI);
1776 break;
1777 }
1778 break;
1779 }
1780 case TargetOpcode::G_INSERT_SUBVECTOR: {
1781 const MachineOperand &Src0Op = MI->getOperand(1);
1782 if (!Src0Op.isReg()) {
1783 report("G_INSERT_SUBVECTOR first source must be a register", MI);
1784 break;
1785 }
1786
1787 const MachineOperand &Src1Op = MI->getOperand(2);
1788 if (!Src1Op.isReg()) {
1789 report("G_INSERT_SUBVECTOR second source must be a register", MI);
1790 break;
1791 }
1792
1793 const MachineOperand &IndexOp = MI->getOperand(3);
1794 if (!IndexOp.isImm()) {
1795 report("G_INSERT_SUBVECTOR index must be an immediate", MI);
1796 break;
1797 }
1798
1799 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1800 LLT Src1Ty = MRI->getType(Src1Op.getReg());
1801
1802 if (!DstTy.isVector()) {
1803 report("Destination type must be a vector", MI);
1804 break;
1805 }
1806
1807 if (!Src1Ty.isVector()) {
1808 report("Second source must be a vector", MI);
1809 break;
1810 }
1811
1812 if (DstTy.getElementType() != Src1Ty.getElementType()) {
1813 report("Element type of vectors must be the same", MI);
1814 break;
1815 }
1816
1817 if (Src1Ty.isScalable() != DstTy.isScalable()) {
1818 report("Vector types must both be fixed or both be scalable", MI);
1819 break;
1820 }
1821
1823 DstTy.getElementCount())) {
1824 report("Second source must be smaller than destination vector", MI);
1825 break;
1826 }
1827
1828 uint64_t Idx = IndexOp.getImm();
1829 uint64_t Src1MinLen = Src1Ty.getElementCount().getKnownMinValue();
1830 if (IndexOp.getImm() % Src1MinLen != 0) {
1831 report("Index must be a multiple of the second source vector's "
1832 "minimum vector length",
1833 MI);
1834 break;
1835 }
1836
1837 uint64_t DstMinLen = DstTy.getElementCount().getKnownMinValue();
1838 if (Idx >= DstMinLen || Idx + Src1MinLen > DstMinLen) {
1839 report("Subvector type and index must not cause insert to overrun the "
1840 "vector being inserted into",
1841 MI);
1842 break;
1843 }
1844
1845 break;
1846 }
1847 case TargetOpcode::G_EXTRACT_SUBVECTOR: {
1848 const MachineOperand &SrcOp = MI->getOperand(1);
1849 if (!SrcOp.isReg()) {
1850 report("G_EXTRACT_SUBVECTOR first source must be a register", MI);
1851 break;
1852 }
1853
1854 const MachineOperand &IndexOp = MI->getOperand(2);
1855 if (!IndexOp.isImm()) {
1856 report("G_EXTRACT_SUBVECTOR index must be an immediate", MI);
1857 break;
1858 }
1859
1860 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1861 LLT SrcTy = MRI->getType(SrcOp.getReg());
1862
1863 if (!DstTy.isVector()) {
1864 report("Destination type must be a vector", MI);
1865 break;
1866 }
1867
1868 if (!SrcTy.isVector()) {
1869 report("Source must be a vector", MI);
1870 break;
1871 }
1872
1873 if (DstTy.getElementType() != SrcTy.getElementType()) {
1874 report("Element type of vectors must be the same", MI);
1875 break;
1876 }
1877
1878 if (SrcTy.isScalable() != DstTy.isScalable()) {
1879 report("Vector types must both be fixed or both be scalable", MI);
1880 break;
1881 }
1882
1884 SrcTy.getElementCount())) {
1885 report("Destination vector must be smaller than source vector", MI);
1886 break;
1887 }
1888
1889 uint64_t Idx = IndexOp.getImm();
1890 uint64_t DstMinLen = DstTy.getElementCount().getKnownMinValue();
1891 if (Idx % DstMinLen != 0) {
1892 report("Index must be a multiple of the destination vector's minimum "
1893 "vector length",
1894 MI);
1895 break;
1896 }
1897
1898 uint64_t SrcMinLen = SrcTy.getElementCount().getKnownMinValue();
1899 if (Idx >= SrcMinLen || Idx + DstMinLen > SrcMinLen) {
1900 report("Destination type and index must not cause extract to overrun the "
1901 "source vector",
1902 MI);
1903 break;
1904 }
1905
1906 break;
1907 }
1908 case TargetOpcode::G_SHUFFLE_VECTOR: {
1909 const MachineOperand &MaskOp = MI->getOperand(3);
1910 if (!MaskOp.isShuffleMask()) {
1911 report("Incorrect mask operand type for G_SHUFFLE_VECTOR", MI);
1912 break;
1913 }
1914
1915 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1916 LLT Src0Ty = MRI->getType(MI->getOperand(1).getReg());
1917 LLT Src1Ty = MRI->getType(MI->getOperand(2).getReg());
1918
1919 if (Src0Ty != Src1Ty)
1920 report("Source operands must be the same type", MI);
1921
1922 if (Src0Ty.getScalarType() != DstTy.getScalarType())
1923 report("G_SHUFFLE_VECTOR cannot change element type", MI);
1924
1925 // Don't check that all operands are vector because scalars are used in
1926 // place of 1 element vectors.
1927 int SrcNumElts = Src0Ty.isVector() ? Src0Ty.getNumElements() : 1;
1928 int DstNumElts = DstTy.isVector() ? DstTy.getNumElements() : 1;
1929
1930 ArrayRef<int> MaskIdxes = MaskOp.getShuffleMask();
1931
1932 if (static_cast<int>(MaskIdxes.size()) != DstNumElts)
1933 report("Wrong result type for shufflemask", MI);
1934
1935 for (int Idx : MaskIdxes) {
1936 if (Idx < 0)
1937 continue;
1938
1939 if (Idx >= 2 * SrcNumElts)
1940 report("Out of bounds shuffle index", MI);
1941 }
1942
1943 break;
1944 }
1945
1946 case TargetOpcode::G_SPLAT_VECTOR: {
1947 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1948 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1949
1950 if (!DstTy.isScalableVector()) {
1951 report("Destination type must be a scalable vector", MI);
1952 break;
1953 }
1954
1955 if (!SrcTy.isScalar() && !SrcTy.isPointer()) {
1956 report("Source type must be a scalar or pointer", MI);
1957 break;
1958 }
1959
1961 SrcTy.getSizeInBits())) {
1962 report("Element type of the destination must be the same size or smaller "
1963 "than the source type",
1964 MI);
1965 break;
1966 }
1967
1968 break;
1969 }
1970 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1971 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1972 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1973 LLT IdxTy = MRI->getType(MI->getOperand(2).getReg());
1974
1975 if (!DstTy.isScalar() && !DstTy.isPointer()) {
1976 report("Destination type must be a scalar or pointer", MI);
1977 break;
1978 }
1979
1980 if (!SrcTy.isVector()) {
1981 report("First source must be a vector", MI);
1982 break;
1983 }
1984
1985 auto TLI = MF->getSubtarget().getTargetLowering();
1986 if (IdxTy.getSizeInBits() !=
1987 TLI->getVectorIdxTy(MF->getDataLayout()).getFixedSizeInBits()) {
1988 report("Index type must match VectorIdxTy", MI);
1989 break;
1990 }
1991
1992 break;
1993 }
1994 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1995 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1996 LLT VecTy = MRI->getType(MI->getOperand(1).getReg());
1997 LLT ScaTy = MRI->getType(MI->getOperand(2).getReg());
1998 LLT IdxTy = MRI->getType(MI->getOperand(3).getReg());
1999
2000 if (!DstTy.isVector()) {
2001 report("Destination type must be a vector", MI);
2002 break;
2003 }
2004
2005 if (VecTy != DstTy) {
2006 report("Destination type and vector type must match", MI);
2007 break;
2008 }
2009
2010 if (!ScaTy.isScalar() && !ScaTy.isPointer()) {
2011 report("Inserted element must be a scalar or pointer", MI);
2012 break;
2013 }
2014
2015 auto TLI = MF->getSubtarget().getTargetLowering();
2016 if (IdxTy.getSizeInBits() !=
2017 TLI->getVectorIdxTy(MF->getDataLayout()).getFixedSizeInBits()) {
2018 report("Index type must match VectorIdxTy", MI);
2019 break;
2020 }
2021
2022 break;
2023 }
2024 case TargetOpcode::G_DYN_STACKALLOC: {
2025 const MachineOperand &DstOp = MI->getOperand(0);
2026 const MachineOperand &AllocOp = MI->getOperand(1);
2027 const MachineOperand &AlignOp = MI->getOperand(2);
2028
2029 if (!DstOp.isReg() || !MRI->getType(DstOp.getReg()).isPointer()) {
2030 report("dst operand 0 must be a pointer type", MI);
2031 break;
2032 }
2033
2034 if (!AllocOp.isReg() || !MRI->getType(AllocOp.getReg()).isScalar()) {
2035 report("src operand 1 must be a scalar reg type", MI);
2036 break;
2037 }
2038
2039 if (!AlignOp.isImm()) {
2040 report("src operand 2 must be an immediate type", MI);
2041 break;
2042 }
2043 break;
2044 }
2045 case TargetOpcode::G_MEMCPY_INLINE:
2046 case TargetOpcode::G_MEMCPY:
2047 case TargetOpcode::G_MEMMOVE: {
2048 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
2049 if (MMOs.size() != 2) {
2050 report("memcpy/memmove must have 2 memory operands", MI);
2051 break;
2052 }
2053
2054 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad()) ||
2055 (MMOs[1]->isStore() || !MMOs[1]->isLoad())) {
2056 report("wrong memory operand types", MI);
2057 break;
2058 }
2059
2060 if (MMOs[0]->getSize() != MMOs[1]->getSize())
2061 report("inconsistent memory operand sizes", MI);
2062
2063 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
2064 LLT SrcPtrTy = MRI->getType(MI->getOperand(1).getReg());
2065
2066 if (!DstPtrTy.isPointer() || !SrcPtrTy.isPointer()) {
2067 report("memory instruction operand must be a pointer", MI);
2068 break;
2069 }
2070
2071 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
2072 report("inconsistent store address space", MI);
2073 if (SrcPtrTy.getAddressSpace() != MMOs[1]->getAddrSpace())
2074 report("inconsistent load address space", MI);
2075
2076 if (Opc != TargetOpcode::G_MEMCPY_INLINE)
2077 if (!MI->getOperand(3).isImm() || (MI->getOperand(3).getImm() & ~1LL))
2078 report("'tail' flag (operand 3) must be an immediate 0 or 1", MI);
2079
2080 break;
2081 }
2082 case TargetOpcode::G_BZERO:
2083 case TargetOpcode::G_MEMSET: {
2084 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
2085 std::string Name = Opc == TargetOpcode::G_MEMSET ? "memset" : "bzero";
2086 if (MMOs.size() != 1) {
2087 report(Twine(Name, " must have 1 memory operand"), MI);
2088 break;
2089 }
2090
2091 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad())) {
2092 report(Twine(Name, " memory operand must be a store"), MI);
2093 break;
2094 }
2095
2096 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
2097 if (!DstPtrTy.isPointer()) {
2098 report(Twine(Name, " operand must be a pointer"), MI);
2099 break;
2100 }
2101
2102 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
2103 report("inconsistent " + Twine(Name, " address space"), MI);
2104
2105 if (!MI->getOperand(MI->getNumOperands() - 1).isImm() ||
2106 (MI->getOperand(MI->getNumOperands() - 1).getImm() & ~1LL))
2107 report("'tail' flag (last operand) must be an immediate 0 or 1", MI);
2108
2109 break;
2110 }
2111 case TargetOpcode::G_UBSANTRAP: {
2112 const MachineOperand &KindOp = MI->getOperand(0);
2113 if (!MI->getOperand(0).isImm()) {
2114 report("Crash kind must be an immediate", &KindOp, 0);
2115 break;
2116 }
2117 int64_t Kind = MI->getOperand(0).getImm();
2118 if (!isInt<8>(Kind))
2119 report("Crash kind must be 8 bit wide", &KindOp, 0);
2120 break;
2121 }
2122 case TargetOpcode::G_VECREDUCE_SEQ_FADD:
2123 case TargetOpcode::G_VECREDUCE_SEQ_FMUL: {
2124 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2125 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
2126 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
2127 if (!DstTy.isScalar())
2128 report("Vector reduction requires a scalar destination type", MI);
2129 if (!Src1Ty.isScalar())
2130 report("Sequential FADD/FMUL vector reduction requires a scalar 1st operand", MI);
2131 if (!Src2Ty.isVector())
2132 report("Sequential FADD/FMUL vector reduction must have a vector 2nd operand", MI);
2133 break;
2134 }
2135 case TargetOpcode::G_VECREDUCE_FADD:
2136 case TargetOpcode::G_VECREDUCE_FMUL:
2137 case TargetOpcode::G_VECREDUCE_FMAX:
2138 case TargetOpcode::G_VECREDUCE_FMIN:
2139 case TargetOpcode::G_VECREDUCE_FMAXIMUM:
2140 case TargetOpcode::G_VECREDUCE_FMINIMUM:
2141 case TargetOpcode::G_VECREDUCE_ADD:
2142 case TargetOpcode::G_VECREDUCE_MUL:
2143 case TargetOpcode::G_VECREDUCE_AND:
2144 case TargetOpcode::G_VECREDUCE_OR:
2145 case TargetOpcode::G_VECREDUCE_XOR:
2146 case TargetOpcode::G_VECREDUCE_SMAX:
2147 case TargetOpcode::G_VECREDUCE_SMIN:
2148 case TargetOpcode::G_VECREDUCE_UMAX:
2149 case TargetOpcode::G_VECREDUCE_UMIN: {
2150 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2151 if (!DstTy.isScalar())
2152 report("Vector reduction requires a scalar destination type", MI);
2153 break;
2154 }
2155
2156 case TargetOpcode::G_SBFX:
2157 case TargetOpcode::G_UBFX: {
2158 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2159 if (DstTy.isVector()) {
2160 report("Bitfield extraction is not supported on vectors", MI);
2161 break;
2162 }
2163 break;
2164 }
2165 case TargetOpcode::G_SHL:
2166 case TargetOpcode::G_LSHR:
2167 case TargetOpcode::G_ASHR:
2168 case TargetOpcode::G_ROTR:
2169 case TargetOpcode::G_ROTL: {
2170 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
2171 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
2172 if (Src1Ty.isVector() != Src2Ty.isVector()) {
2173 report("Shifts and rotates require operands to be either all scalars or "
2174 "all vectors",
2175 MI);
2176 break;
2177 }
2178 break;
2179 }
2180 case TargetOpcode::G_LLROUND:
2181 case TargetOpcode::G_LROUND: {
2182 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
2183 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
2184 if (!DstTy.isValid() || !SrcTy.isValid())
2185 break;
2186 if (SrcTy.isPointer() || DstTy.isPointer()) {
2187 StringRef Op = SrcTy.isPointer() ? "Source" : "Destination";
2188 report(Twine(Op, " operand must not be a pointer type"), MI);
2189 } else if (SrcTy.isScalar()) {
2190 verifyAllRegOpsScalar(*MI, *MRI);
2191 break;
2192 } else if (SrcTy.isVector()) {
2193 verifyVectorElementMatch(SrcTy, DstTy, MI);
2194 break;
2195 }
2196 break;
2197 }
2198 case TargetOpcode::G_IS_FPCLASS: {
2199 LLT DestTy = MRI->getType(MI->getOperand(0).getReg());
2200 LLT DestEltTy = DestTy.getScalarType();
2201 if (!DestEltTy.isScalar()) {
2202 report("Destination must be a scalar or vector of scalars", MI);
2203 break;
2204 }
2205 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
2206 LLT SrcEltTy = SrcTy.getScalarType();
2207 if (!SrcEltTy.isScalar()) {
2208 report("Source must be a scalar or vector of scalars", MI);
2209 break;
2210 }
2211 if (!verifyVectorElementMatch(DestTy, SrcTy, MI))
2212 break;
2213 const MachineOperand &TestMO = MI->getOperand(2);
2214 if (!TestMO.isImm()) {
2215 report("floating-point class set (operand 2) must be an immediate", MI);
2216 break;
2217 }
2218 int64_t Test = TestMO.getImm();
2219 if (Test < 0 || Test > fcAllFlags) {
2220 report("Incorrect floating-point class set (operand 2)", MI);
2221 break;
2222 }
2223 break;
2224 }
2225 case TargetOpcode::G_PREFETCH: {
2226 const MachineOperand &AddrOp = MI->getOperand(0);
2227 if (!AddrOp.isReg() || !MRI->getType(AddrOp.getReg()).isPointer()) {
2228 report("addr operand must be a pointer", &AddrOp, 0);
2229 break;
2230 }
2231 const MachineOperand &RWOp = MI->getOperand(1);
2232 if (!RWOp.isImm() || (uint64_t)RWOp.getImm() >= 2) {
2233 report("rw operand must be an immediate 0-1", &RWOp, 1);
2234 break;
2235 }
2236 const MachineOperand &LocalityOp = MI->getOperand(2);
2237 if (!LocalityOp.isImm() || (uint64_t)LocalityOp.getImm() >= 4) {
2238 report("locality operand must be an immediate 0-3", &LocalityOp, 2);
2239 break;
2240 }
2241 const MachineOperand &CacheTypeOp = MI->getOperand(3);
2242 if (!CacheTypeOp.isImm() || (uint64_t)CacheTypeOp.getImm() >= 2) {
2243 report("cache type operand must be an immediate 0-1", &CacheTypeOp, 3);
2244 break;
2245 }
2246 break;
2247 }
2248 case TargetOpcode::G_ASSERT_ALIGN: {
2249 if (MI->getOperand(2).getImm() < 1)
2250 report("alignment immediate must be >= 1", MI);
2251 break;
2252 }
2253 case TargetOpcode::G_CONSTANT_POOL: {
2254 if (!MI->getOperand(1).isCPI())
2255 report("Src operand 1 must be a constant pool index", MI);
2256 if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
2257 report("Dst operand 0 must be a pointer", MI);
2258 break;
2259 }
2260 case TargetOpcode::G_PTRAUTH_GLOBAL_VALUE: {
2261 const MachineOperand &AddrOp = MI->getOperand(1);
2262 if (!AddrOp.isReg() || !MRI->getType(AddrOp.getReg()).isPointer())
2263 report("addr operand must be a pointer", &AddrOp, 1);
2264 break;
2265 }
2266 default:
2267 break;
2268 }
2269}
2270
2271void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
2272 const MCInstrDesc &MCID = MI->getDesc();
2273 if (MI->getNumOperands() < MCID.getNumOperands()) {
2274 report("Too few operands", MI);
2275 OS << MCID.getNumOperands() << " operands expected, but "
2276 << MI->getNumOperands() << " given.\n";
2277 }
2278
2279 if (MI->getFlag(MachineInstr::NoConvergent) && !MCID.isConvergent())
2280 report("NoConvergent flag expected only on convergent instructions.", MI);
2281
2282 if (MI->isPHI()) {
2283 if (MF->getProperties().hasProperty(
2285 report("Found PHI instruction with NoPHIs property set", MI);
2286
2287 if (FirstNonPHI)
2288 report("Found PHI instruction after non-PHI", MI);
2289 } else if (FirstNonPHI == nullptr)
2290 FirstNonPHI = MI;
2291
2292 // Check the tied operands.
2293 if (MI->isInlineAsm())
2294 verifyInlineAsm(MI);
2295
2296 // Check that unspillable terminators define a reg and have at most one use.
2297 if (TII->isUnspillableTerminator(MI)) {
2298 if (!MI->getOperand(0).isReg() || !MI->getOperand(0).isDef())
2299 report("Unspillable Terminator does not define a reg", MI);
2300 Register Def = MI->getOperand(0).getReg();
2301 if (Def.isVirtual() &&
2302 !MF->getProperties().hasProperty(
2304 std::distance(MRI->use_nodbg_begin(Def), MRI->use_nodbg_end()) > 1)
2305 report("Unspillable Terminator expected to have at most one use!", MI);
2306 }
2307
2308 // A fully-formed DBG_VALUE must have a location. Ignore partially formed
2309 // DBG_VALUEs: these are convenient to use in tests, but should never get
2310 // generated.
2311 if (MI->isDebugValue() && MI->getNumOperands() == 4)
2312 if (!MI->getDebugLoc())
2313 report("Missing DebugLoc for debug instruction", MI);
2314
2315 // Meta instructions should never be the subject of debug value tracking,
2316 // they don't create a value in the output program at all.
2317 if (MI->isMetaInstruction() && MI->peekDebugInstrNum())
2318 report("Metadata instruction should not have a value tracking number", MI);
2319
2320 // Check the MachineMemOperands for basic consistency.
2321 for (MachineMemOperand *Op : MI->memoperands()) {
2322 if (Op->isLoad() && !MI->mayLoad())
2323 report("Missing mayLoad flag", MI);
2324 if (Op->isStore() && !MI->mayStore())
2325 report("Missing mayStore flag", MI);
2326 }
2327
2328 // Debug values must not have a slot index.
2329 // Other instructions must have one, unless they are inside a bundle.
2330 if (LiveInts) {
2331 bool mapped = !LiveInts->isNotInMIMap(*MI);
2332 if (MI->isDebugOrPseudoInstr()) {
2333 if (mapped)
2334 report("Debug instruction has a slot index", MI);
2335 } else if (MI->isInsideBundle()) {
2336 if (mapped)
2337 report("Instruction inside bundle has a slot index", MI);
2338 } else {
2339 if (!mapped)
2340 report("Missing slot index", MI);
2341 }
2342 }
2343
2344 unsigned Opc = MCID.getOpcode();
2346 verifyPreISelGenericInstruction(MI);
2347 return;
2348 }
2349
2351 if (!TII->verifyInstruction(*MI, ErrorInfo))
2352 report(ErrorInfo.data(), MI);
2353
2354 // Verify properties of various specific instruction types
2355 switch (MI->getOpcode()) {
2356 case TargetOpcode::COPY: {
2357 const MachineOperand &DstOp = MI->getOperand(0);
2358 const MachineOperand &SrcOp = MI->getOperand(1);
2359 const Register SrcReg = SrcOp.getReg();
2360 const Register DstReg = DstOp.getReg();
2361
2362 LLT DstTy = MRI->getType(DstReg);
2363 LLT SrcTy = MRI->getType(SrcReg);
2364 if (SrcTy.isValid() && DstTy.isValid()) {
2365 // If both types are valid, check that the types are the same.
2366 if (SrcTy != DstTy) {
2367 report("Copy Instruction is illegal with mismatching types", MI);
2368 OS << "Def = " << DstTy << ", Src = " << SrcTy << '\n';
2369 }
2370
2371 break;
2372 }
2373
2374 if (!SrcTy.isValid() && !DstTy.isValid())
2375 break;
2376
2377 // If we have only one valid type, this is likely a copy between a virtual
2378 // and physical register.
2379 TypeSize SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI);
2380 TypeSize DstSize = TRI->getRegSizeInBits(DstReg, *MRI);
2381 if (SrcReg.isPhysical() && DstTy.isValid()) {
2382 const TargetRegisterClass *SrcRC =
2383 TRI->getMinimalPhysRegClassLLT(SrcReg, DstTy);
2384 if (SrcRC)
2385 SrcSize = TRI->getRegSizeInBits(*SrcRC);
2386 }
2387
2388 if (DstReg.isPhysical() && SrcTy.isValid()) {
2389 const TargetRegisterClass *DstRC =
2390 TRI->getMinimalPhysRegClassLLT(DstReg, SrcTy);
2391 if (DstRC)
2392 DstSize = TRI->getRegSizeInBits(*DstRC);
2393 }
2394
2395 // The next two checks allow COPY between physical and virtual registers,
2396 // when the virtual register has a scalable size and the physical register
2397 // has a fixed size. These checks allow COPY between *potentialy* mismatched
2398 // sizes. However, once RegisterBankSelection occurs, MachineVerifier should
2399 // be able to resolve a fixed size for the scalable vector, and at that
2400 // point this function will know for sure whether the sizes are mismatched
2401 // and correctly report a size mismatch.
2402 if (SrcReg.isPhysical() && DstReg.isVirtual() && DstSize.isScalable() &&
2403 !SrcSize.isScalable())
2404 break;
2405 if (SrcReg.isVirtual() && DstReg.isPhysical() && SrcSize.isScalable() &&
2406 !DstSize.isScalable())
2407 break;
2408
2409 if (SrcSize.isNonZero() && DstSize.isNonZero() && SrcSize != DstSize) {
2410 if (!DstOp.getSubReg() && !SrcOp.getSubReg()) {
2411 report("Copy Instruction is illegal with mismatching sizes", MI);
2412 OS << "Def Size = " << DstSize << ", Src Size = " << SrcSize << '\n';
2413 }
2414 }
2415 break;
2416 }
2417 case TargetOpcode::STATEPOINT: {
2418 StatepointOpers SO(MI);
2419 if (!MI->getOperand(SO.getIDPos()).isImm() ||
2420 !MI->getOperand(SO.getNBytesPos()).isImm() ||
2421 !MI->getOperand(SO.getNCallArgsPos()).isImm()) {
2422 report("meta operands to STATEPOINT not constant!", MI);
2423 break;
2424 }
2425
2426 auto VerifyStackMapConstant = [&](unsigned Offset) {
2427 if (Offset >= MI->getNumOperands()) {
2428 report("stack map constant to STATEPOINT is out of range!", MI);
2429 return;
2430 }
2431 if (!MI->getOperand(Offset - 1).isImm() ||
2432 MI->getOperand(Offset - 1).getImm() != StackMaps::ConstantOp ||
2433 !MI->getOperand(Offset).isImm())
2434 report("stack map constant to STATEPOINT not well formed!", MI);
2435 };
2436 VerifyStackMapConstant(SO.getCCIdx());
2437 VerifyStackMapConstant(SO.getFlagsIdx());
2438 VerifyStackMapConstant(SO.getNumDeoptArgsIdx());
2439 VerifyStackMapConstant(SO.getNumGCPtrIdx());
2440 VerifyStackMapConstant(SO.getNumAllocaIdx());
2441 VerifyStackMapConstant(SO.getNumGcMapEntriesIdx());
2442
2443 // Verify that all explicit statepoint defs are tied to gc operands as
2444 // they are expected to be a relocation of gc operands.
2445 unsigned FirstGCPtrIdx = SO.getFirstGCPtrIdx();
2446 unsigned LastGCPtrIdx = SO.getNumAllocaIdx() - 2;
2447 for (unsigned Idx = 0; Idx < MI->getNumDefs(); Idx++) {
2448 unsigned UseOpIdx;
2449 if (!MI->isRegTiedToUseOperand(Idx, &UseOpIdx)) {
2450 report("STATEPOINT defs expected to be tied", MI);
2451 break;
2452 }
2453 if (UseOpIdx < FirstGCPtrIdx || UseOpIdx > LastGCPtrIdx) {
2454 report("STATEPOINT def tied to non-gc operand", MI);
2455 break;
2456 }
2457 }
2458
2459 // TODO: verify we have properly encoded deopt arguments
2460 } break;
2461 case TargetOpcode::INSERT_SUBREG: {
2462 unsigned InsertedSize;
2463 if (unsigned SubIdx = MI->getOperand(2).getSubReg())
2464 InsertedSize = TRI->getSubRegIdxSize(SubIdx);
2465 else
2466 InsertedSize = TRI->getRegSizeInBits(MI->getOperand(2).getReg(), *MRI);
2467 unsigned SubRegSize = TRI->getSubRegIdxSize(MI->getOperand(3).getImm());
2468 if (SubRegSize < InsertedSize) {
2469 report("INSERT_SUBREG expected inserted value to have equal or lesser "
2470 "size than the subreg it was inserted into", MI);
2471 break;
2472 }
2473 } break;
2474 case TargetOpcode::REG_SEQUENCE: {
2475 unsigned NumOps = MI->getNumOperands();
2476 if (!(NumOps & 1)) {
2477 report("Invalid number of operands for REG_SEQUENCE", MI);
2478 break;
2479 }
2480
2481 for (unsigned I = 1; I != NumOps; I += 2) {
2482 const MachineOperand &RegOp = MI->getOperand(I);
2483 const MachineOperand &SubRegOp = MI->getOperand(I + 1);
2484
2485 if (!RegOp.isReg())
2486 report("Invalid register operand for REG_SEQUENCE", &RegOp, I);
2487
2488 if (!SubRegOp.isImm() || SubRegOp.getImm() == 0 ||
2489 SubRegOp.getImm() >= TRI->getNumSubRegIndices()) {
2490 report("Invalid subregister index operand for REG_SEQUENCE",
2491 &SubRegOp, I + 1);
2492 }
2493 }
2494
2495 Register DstReg = MI->getOperand(0).getReg();
2496 if (DstReg.isPhysical())
2497 report("REG_SEQUENCE does not support physical register results", MI);
2498
2499 if (MI->getOperand(0).getSubReg())
2500 report("Invalid subreg result for REG_SEQUENCE", MI);
2501
2502 break;
2503 }
2504 }
2505}
2506
2507void
2508MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
2509 const MachineInstr *MI = MO->getParent();
2510 const MCInstrDesc &MCID = MI->getDesc();
2511 unsigned NumDefs = MCID.getNumDefs();
2512 if (MCID.getOpcode() == TargetOpcode::PATCHPOINT)
2513 NumDefs = (MONum == 0 && MO->isReg()) ? NumDefs : 0;
2514
2515 // The first MCID.NumDefs operands must be explicit register defines
2516 if (MONum < NumDefs) {
2517 const MCOperandInfo &MCOI = MCID.operands()[MONum];
2518 if (!MO->isReg())
2519 report("Explicit definition must be a register", MO, MONum);
2520 else if (!MO->isDef() && !MCOI.isOptionalDef())
2521 report("Explicit definition marked as use", MO, MONum);
2522 else if (MO->isImplicit())
2523 report("Explicit definition marked as implicit", MO, MONum);
2524 } else if (MONum < MCID.getNumOperands()) {
2525 const MCOperandInfo &MCOI = MCID.operands()[MONum];
2526 // Don't check if it's the last operand in a variadic instruction. See,
2527 // e.g., LDM_RET in the arm back end. Check non-variadic operands only.
2528 bool IsOptional = MI->isVariadic() && MONum == MCID.getNumOperands() - 1;
2529 if (!IsOptional) {
2530 if (MO->isReg()) {
2531 if (MO->isDef() && !MCOI.isOptionalDef() && !MCID.variadicOpsAreDefs())
2532 report("Explicit operand marked as def", MO, MONum);
2533 if (MO->isImplicit())
2534 report("Explicit operand marked as implicit", MO, MONum);
2535 }
2536
2537 // Check that an instruction has register operands only as expected.
2538 if (MCOI.OperandType == MCOI::OPERAND_REGISTER &&
2539 !MO->isReg() && !MO->isFI())
2540 report("Expected a register operand.", MO, MONum);
2541 if (MO->isReg()) {
2544 !TII->isPCRelRegisterOperandLegal(*MO)))
2545 report("Expected a non-register operand.", MO, MONum);
2546 }
2547 }
2548
2549 int TiedTo = MCID.getOperandConstraint(MONum, MCOI::TIED_TO);
2550 if (TiedTo != -1) {
2551 if (!MO->isReg())
2552 report("Tied use must be a register", MO, MONum);
2553 else if (!MO->isTied())
2554 report("Operand should be tied", MO, MONum);
2555 else if (unsigned(TiedTo) != MI->findTiedOperandIdx(MONum))
2556 report("Tied def doesn't match MCInstrDesc", MO, MONum);
2557 else if (MO->getReg().isPhysical()) {
2558 const MachineOperand &MOTied = MI->getOperand(TiedTo);
2559 if (!MOTied.isReg())
2560 report("Tied counterpart must be a register", &MOTied, TiedTo);
2561 else if (MOTied.getReg().isPhysical() &&
2562 MO->getReg() != MOTied.getReg())
2563 report("Tied physical registers must match.", &MOTied, TiedTo);
2564 }
2565 } else if (MO->isReg() && MO->isTied())
2566 report("Explicit operand should not be tied", MO, MONum);
2567 } else if (!MI->isVariadic()) {
2568 // ARM adds %reg0 operands to indicate predicates. We'll allow that.
2569 if (!MO->isValidExcessOperand())
2570 report("Extra explicit operand on non-variadic instruction", MO, MONum);
2571 }
2572
2573 switch (MO->getType()) {
2575 // Verify debug flag on debug instructions. Check this first because reg0
2576 // indicates an undefined debug value.
2577 if (MI->isDebugInstr() && MO->isUse()) {
2578 if (!MO->isDebug())
2579 report("Register operand must be marked debug", MO, MONum);
2580 } else if (MO->isDebug()) {
2581 report("Register operand must not be marked debug", MO, MONum);
2582 }
2583
2584 const Register Reg = MO->getReg();
2585 if (!Reg)
2586 return;
2587 if (MRI->tracksLiveness() && !MI->isDebugInstr())
2588 checkLiveness(MO, MONum);
2589
2590 if (MO->isDef() && MO->isUndef() && !MO->getSubReg() &&
2591 MO->getReg().isVirtual()) // TODO: Apply to physregs too
2592 report("Undef virtual register def operands require a subregister", MO, MONum);
2593
2594 // Verify the consistency of tied operands.
2595 if (MO->isTied()) {
2596 unsigned OtherIdx = MI->findTiedOperandIdx(MONum);
2597 const MachineOperand &OtherMO = MI->getOperand(OtherIdx);
2598 if (!OtherMO.isReg())
2599 report("Must be tied to a register", MO, MONum);
2600 if (!OtherMO.isTied())
2601 report("Missing tie flags on tied operand", MO, MONum);
2602 if (MI->findTiedOperandIdx(OtherIdx) != MONum)
2603 report("Inconsistent tie links", MO, MONum);
2604 if (MONum < MCID.getNumDefs()) {
2605 if (OtherIdx < MCID.getNumOperands()) {
2606 if (-1 == MCID.getOperandConstraint(OtherIdx, MCOI::TIED_TO))
2607 report("Explicit def tied to explicit use without tie constraint",
2608 MO, MONum);
2609 } else {
2610 if (!OtherMO.isImplicit())
2611 report("Explicit def should be tied to implicit use", MO, MONum);
2612 }
2613 }
2614 }
2615
2616 // Verify two-address constraints after the twoaddressinstruction pass.
2617 // Both twoaddressinstruction pass and phi-node-elimination pass call
2618 // MRI->leaveSSA() to set MF as not IsSSA, we should do the verification
2619 // after twoaddressinstruction pass not after phi-node-elimination pass. So
2620 // we shouldn't use the IsSSA as the condition, we should based on
2621 // TiedOpsRewritten property to verify two-address constraints, this
2622 // property will be set in twoaddressinstruction pass.
2623 unsigned DefIdx;
2624 if (MF->getProperties().hasProperty(
2626 MO->isUse() && MI->isRegTiedToDefOperand(MONum, &DefIdx) &&
2627 Reg != MI->getOperand(DefIdx).getReg())
2628 report("Two-address instruction operands must be identical", MO, MONum);
2629
2630 // Check register classes.
2631 unsigned SubIdx = MO->getSubReg();
2632
2633 if (Reg.isPhysical()) {
2634 if (SubIdx) {
2635 report("Illegal subregister index for physical register", MO, MONum);
2636 return;
2637 }
2638 if (MONum < MCID.getNumOperands()) {
2639 if (const TargetRegisterClass *DRC =
2640 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2641 if (!DRC->contains(Reg)) {
2642 report("Illegal physical register for instruction", MO, MONum);
2643 OS << printReg(Reg, TRI) << " is not a "
2644 << TRI->getRegClassName(DRC) << " register.\n";
2645 }
2646 }
2647 }
2648 if (MO->isRenamable()) {
2649 if (MRI->isReserved(Reg)) {
2650 report("isRenamable set on reserved register", MO, MONum);
2651 return;
2652 }
2653 }
2654 } else {
2655 // Virtual register.
2656 const TargetRegisterClass *RC = MRI->getRegClassOrNull(Reg);
2657 if (!RC) {
2658 // This is a generic virtual register.
2659
2660 // Do not allow undef uses for generic virtual registers. This ensures
2661 // getVRegDef can never fail and return null on a generic register.
2662 //
2663 // FIXME: This restriction should probably be broadened to all SSA
2664 // MIR. However, DetectDeadLanes/ProcessImplicitDefs technically still
2665 // run on the SSA function just before phi elimination.
2666 if (MO->isUndef())
2667 report("Generic virtual register use cannot be undef", MO, MONum);
2668
2669 // Debug value instruction is permitted to use undefined vregs.
2670 // This is a performance measure to skip the overhead of immediately
2671 // pruning unused debug operands. The final undef substitution occurs
2672 // when debug values are allocated in LDVImpl::handleDebugValue, so
2673 // these verifications always apply after this pass.
2674 if (isFunctionTracksDebugUserValues || !MO->isUse() ||
2675 !MI->isDebugValue() || !MRI->def_empty(Reg)) {
2676 // If we're post-Select, we can't have gvregs anymore.
2677 if (isFunctionSelected) {
2678 report("Generic virtual register invalid in a Selected function",
2679 MO, MONum);
2680 return;
2681 }
2682
2683 // The gvreg must have a type and it must not have a SubIdx.
2684 LLT Ty = MRI->getType(Reg);
2685 if (!Ty.isValid()) {
2686 report("Generic virtual register must have a valid type", MO,
2687 MONum);
2688 return;
2689 }
2690
2691 const RegisterBank *RegBank = MRI->getRegBankOrNull(Reg);
2692 const RegisterBankInfo *RBI = MF->getSubtarget().getRegBankInfo();
2693
2694 // If we're post-RegBankSelect, the gvreg must have a bank.
2695 if (!RegBank && isFunctionRegBankSelected) {
2696 report("Generic virtual register must have a bank in a "
2697 "RegBankSelected function",
2698 MO, MONum);
2699 return;
2700 }
2701
2702 // Make sure the register fits into its register bank if any.
2703 if (RegBank && Ty.isValid() && !Ty.isScalableVector() &&
2704 RBI->getMaximumSize(RegBank->getID()) < Ty.getSizeInBits()) {
2705 report("Register bank is too small for virtual register", MO,
2706 MONum);
2707 OS << "Register bank " << RegBank->getName() << " too small("
2708 << RBI->getMaximumSize(RegBank->getID()) << ") to fit "
2709 << Ty.getSizeInBits() << "-bits\n";
2710 return;
2711 }
2712 }
2713
2714 if (SubIdx) {
2715 report("Generic virtual register does not allow subregister index", MO,
2716 MONum);
2717 return;
2718 }
2719
2720 // If this is a target specific instruction and this operand
2721 // has register class constraint, the virtual register must
2722 // comply to it.
2723 if (!isPreISelGenericOpcode(MCID.getOpcode()) &&
2724 MONum < MCID.getNumOperands() &&
2725 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2726 report("Virtual register does not match instruction constraint", MO,
2727 MONum);
2728 OS << "Expect register class "
2729 << TRI->getRegClassName(TII->getRegClass(MCID, MONum, TRI, *MF))
2730 << " but got nothing\n";
2731 return;
2732 }
2733
2734 break;
2735 }
2736 if (SubIdx) {
2737 const TargetRegisterClass *SRC =
2738 TRI->getSubClassWithSubReg(RC, SubIdx);
2739 if (!SRC) {
2740 report("Invalid subregister index for virtual register", MO, MONum);
2741 OS << "Register class " << TRI->getRegClassName(RC)
2742 << " does not support subreg index " << SubIdx << '\n';
2743 return;
2744 }
2745 if (RC != SRC) {
2746 report("Invalid register class for subregister index", MO, MONum);
2747 OS << "Register class " << TRI->getRegClassName(RC)
2748 << " does not fully support subreg index " << SubIdx << '\n';
2749 return;
2750 }
2751 }
2752 if (MONum < MCID.getNumOperands()) {
2753 if (const TargetRegisterClass *DRC =
2754 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2755 if (SubIdx) {
2756 const TargetRegisterClass *SuperRC =
2757 TRI->getLargestLegalSuperClass(RC, *MF);
2758 if (!SuperRC) {
2759 report("No largest legal super class exists.", MO, MONum);
2760 return;
2761 }
2762 DRC = TRI->getMatchingSuperRegClass(SuperRC, DRC, SubIdx);
2763 if (!DRC) {
2764 report("No matching super-reg register class.", MO, MONum);
2765 return;
2766 }
2767 }
2768 if (!RC->hasSuperClassEq(DRC)) {
2769 report("Illegal virtual register for instruction", MO, MONum);
2770 OS << "Expected a " << TRI->getRegClassName(DRC)
2771 << " register, but got a " << TRI->getRegClassName(RC)
2772 << " register\n";
2773 }
2774 }
2775 }
2776 }
2777 break;
2778 }
2779
2781 regMasks.push_back(MO->getRegMask());
2782 break;
2783
2785 if (MI->isPHI() && !MO->getMBB()->isSuccessor(MI->getParent()))
2786 report("PHI operand is not in the CFG", MO, MONum);
2787 break;
2788
2790 if (LiveStks && LiveStks->hasInterval(MO->getIndex()) &&
2791 LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2792 int FI = MO->getIndex();
2793 LiveInterval &LI = LiveStks->getInterval(FI);
2794 SlotIndex Idx = LiveInts->getInstructionIndex(*MI);
2795
2796 bool stores = MI->mayStore();
2797 bool loads = MI->mayLoad();
2798 // For a memory-to-memory move, we need to check if the frame
2799 // index is used for storing or loading, by inspecting the
2800 // memory operands.
2801 if (stores && loads) {
2802 for (auto *MMO : MI->memoperands()) {
2803 const PseudoSourceValue *PSV = MMO->getPseudoValue();
2804 if (PSV == nullptr) continue;
2806 dyn_cast<FixedStackPseudoSourceValue>(PSV);
2807 if (Value == nullptr) continue;
2808 if (Value->getFrameIndex() != FI) continue;
2809
2810 if (MMO->isStore())
2811 loads = false;
2812 else
2813 stores = false;
2814 break;
2815 }
2816 if (loads == stores)
2817 report("Missing fixed stack memoperand.", MI);
2818 }
2819 if (loads && !LI.liveAt(Idx.getRegSlot(true))) {
2820 report("Instruction loads from dead spill slot", MO, MONum);
2821 OS << "Live stack: " << LI << '\n';
2822 }
2823 if (stores && !LI.liveAt(Idx.getRegSlot())) {
2824 report("Instruction stores to dead spill slot", MO, MONum);
2825 OS << "Live stack: " << LI << '\n';
2826 }
2827 }
2828 break;
2829
2831 if (MO->getCFIIndex() >= MF->getFrameInstructions().size())
2832 report("CFI instruction has invalid index", MO, MONum);
2833 break;
2834
2835 default:
2836 break;
2837 }
2838}
2839
2840void MachineVerifier::checkLivenessAtUse(const MachineOperand *MO,
2841 unsigned MONum, SlotIndex UseIdx,
2842 const LiveRange &LR,
2843 VirtRegOrUnit VRegOrUnit,
2844 LaneBitmask LaneMask) {
2845 const MachineInstr *MI = MO->getParent();
2846
2847 if (!LR.verify()) {
2848 report("invalid live range", MO, MONum);
2849 report_context_liverange(LR);
2850 report_context_vreg_regunit(VRegOrUnit);
2851 report_context(UseIdx);
2852 return;
2853 }
2854
2855 LiveQueryResult LRQ = LR.Query(UseIdx);
2856 bool HasValue = LRQ.valueIn() || (MI->isPHI() && LRQ.valueOut());
2857 // Check if we have a segment at the use, note however that we only need one
2858 // live subregister range, the others may be dead.
2859 if (!HasValue && LaneMask.none()) {
2860 report("No live segment at use", MO, MONum);
2861 report_context_liverange(LR);
2862 report_context_vreg_regunit(VRegOrUnit);
2863 report_context(UseIdx);
2864 }
2865 if (MO->isKill() && !LRQ.isKill()) {
2866 report("Live range continues after kill flag", MO, MONum);
2867 report_context_liverange(LR);
2868 report_context_vreg_regunit(VRegOrUnit);
2869 if (LaneMask.any())
2870 report_context_lanemask(LaneMask);
2871 report_context(UseIdx);
2872 }
2873}
2874
2875void MachineVerifier::checkLivenessAtDef(const MachineOperand *MO,
2876 unsigned MONum, SlotIndex DefIdx,
2877 const LiveRange &LR,
2878 VirtRegOrUnit VRegOrUnit,
2879 bool SubRangeCheck,
2880 LaneBitmask LaneMask) {
2881 if (!LR.verify()) {
2882 report("invalid live range", MO, MONum);
2883 report_context_liverange(LR);
2884 report_context_vreg_regunit(VRegOrUnit);
2885 if (LaneMask.any())
2886 report_context_lanemask(LaneMask);
2887 report_context(DefIdx);
2888 }
2889
2890 if (const VNInfo *VNI = LR.getVNInfoAt(DefIdx)) {
2891 // The LR can correspond to the whole reg and its def slot is not obliged
2892 // to be the same as the MO' def slot. E.g. when we check here "normal"
2893 // subreg MO but there is other EC subreg MO in the same instruction so the
2894 // whole reg has EC def slot and differs from the currently checked MO' def
2895 // slot. For example:
2896 // %0 [16e,32r:0) 0@16e L..3 [16e,32r:0) 0@16e L..C [16r,32r:0) 0@16r
2897 // Check that there is an early-clobber def of the same superregister
2898 // somewhere is performed in visitMachineFunctionAfter()
2899 if (((SubRangeCheck || MO->getSubReg() == 0) && VNI->def != DefIdx) ||
2900 !SlotIndex::isSameInstr(VNI->def, DefIdx) ||
2901 (VNI->def != DefIdx &&
2902 (!VNI->def.isEarlyClobber() || !DefIdx.isRegister()))) {
2903 report("Inconsistent valno->def", MO, MONum);
2904 report_context_liverange(LR);
2905 report_context_vreg_regunit(VRegOrUnit);
2906 if (LaneMask.any())
2907 report_context_lanemask(LaneMask);
2908 report_context(*VNI);
2909 report_context(DefIdx);
2910 }
2911 } else {
2912 report("No live segment at def", MO, MONum);
2913 report_context_liverange(LR);
2914 report_context_vreg_regunit(VRegOrUnit);
2915 if (LaneMask.any())
2916 report_context_lanemask(LaneMask);
2917 report_context(DefIdx);
2918 }
2919 // Check that, if the dead def flag is present, LiveInts agree.
2920 if (MO->isDead()) {
2921 LiveQueryResult LRQ = LR.Query(DefIdx);
2922 if (!LRQ.isDeadDef()) {
2923 assert(VRegOrUnit.isVirtualReg() && "Expecting a virtual register.");
2924 // A dead subreg def only tells us that the specific subreg is dead. There
2925 // could be other non-dead defs of other subregs, or we could have other
2926 // parts of the register being live through the instruction. So unless we
2927 // are checking liveness for a subrange it is ok for the live range to
2928 // continue, given that we have a dead def of a subregister.
2929 if (SubRangeCheck || MO->getSubReg() == 0) {
2930 report("Live range continues after dead def flag", MO, MONum);
2931 report_context_liverange(LR);
2932 report_context_vreg_regunit(VRegOrUnit);
2933 if (LaneMask.any())
2934 report_context_lanemask(LaneMask);
2935 }
2936 }
2937 }
2938}
2939
2940void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
2941 const MachineInstr *MI = MO->getParent();
2942 const Register Reg = MO->getReg();
2943 const unsigned SubRegIdx = MO->getSubReg();
2944
2945 const LiveInterval *LI = nullptr;
2946 if (LiveInts && Reg.isVirtual()) {
2947 if (LiveInts->hasInterval(Reg)) {
2948 LI = &LiveInts->getInterval(Reg);
2949 if (SubRegIdx != 0 && (MO->isDef() || !MO->isUndef()) && !LI->empty() &&
2950 !LI->hasSubRanges() && MRI->shouldTrackSubRegLiveness(Reg))
2951 report("Live interval for subreg operand has no subranges", MO, MONum);
2952 } else {
2953 report("Virtual register has no live interval", MO, MONum);
2954 }
2955 }
2956
2957 // Both use and def operands can read a register.
2958 if (MO->readsReg()) {
2959 if (MO->isKill())
2960 addRegWithSubRegs(regsKilled, Reg);
2961
2962 // Check that LiveVars knows this kill (unless we are inside a bundle, in
2963 // which case we have already checked that LiveVars knows any kills on the
2964 // bundle header instead).
2965 if (LiveVars && Reg.isVirtual() && MO->isKill() &&
2966 !MI->isBundledWithPred()) {
2967 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
2968 if (!is_contained(VI.Kills, MI))
2969 report("Kill missing from LiveVariables", MO, MONum);
2970 }
2971
2972 // Check LiveInts liveness and kill.
2973 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2974 SlotIndex UseIdx;
2975 if (MI->isPHI()) {
2976 // PHI use occurs on the edge, so check for live out here instead.
2977 UseIdx = LiveInts->getMBBEndIdx(
2978 MI->getOperand(MONum + 1).getMBB()).getPrevSlot();
2979 } else {
2980 UseIdx = LiveInts->getInstructionIndex(*MI);
2981 }
2982 // Check the cached regunit intervals.
2983 if (Reg.isPhysical() && !isReserved(Reg)) {
2984 for (MCRegUnit Unit : TRI->regunits(Reg.asMCReg())) {
2985 if (MRI->isReservedRegUnit(Unit))
2986 continue;
2987 if (const LiveRange *LR = LiveInts->getCachedRegUnit(Unit))
2988 checkLivenessAtUse(MO, MONum, UseIdx, *LR, VirtRegOrUnit(Unit));
2989 }
2990 }
2991
2992 if (Reg.isVirtual()) {
2993 // This is a virtual register interval.
2994 checkLivenessAtUse(MO, MONum, UseIdx, *LI, VirtRegOrUnit(Reg));
2995
2996 if (LI->hasSubRanges() && !MO->isDef()) {
2997 LaneBitmask MOMask = SubRegIdx != 0
2998 ? TRI->getSubRegIndexLaneMask(SubRegIdx)
2999 : MRI->getMaxLaneMaskForVReg(Reg);
3000 LaneBitmask LiveInMask;
3001 for (const LiveInterval::SubRange &SR : LI->subranges()) {
3002 if ((MOMask & SR.LaneMask).none())
3003 continue;
3004 checkLivenessAtUse(MO, MONum, UseIdx, SR, VirtRegOrUnit(Reg),
3005 SR.LaneMask);
3006 LiveQueryResult LRQ = SR.Query(UseIdx);
3007 if (LRQ.valueIn() || (MI->isPHI() && LRQ.valueOut()))
3008 LiveInMask |= SR.LaneMask;
3009 }
3010 // At least parts of the register has to be live at the use.
3011 if ((LiveInMask & MOMask).none()) {
3012 report("No live subrange at use", MO, MONum);
3013 report_context(*LI);
3014 report_context(UseIdx);
3015 }
3016 // For PHIs all lanes should be live
3017 if (MI->isPHI() && LiveInMask != MOMask) {
3018 report("Not all lanes of PHI source live at use", MO, MONum);
3019 report_context(*LI);
3020 report_context(UseIdx);
3021 }
3022 }
3023 }
3024 }
3025
3026 // Use of a dead register.
3027 if (!regsLive.count(Reg)) {
3028 if (Reg.isPhysical()) {
3029 // Reserved registers may be used even when 'dead'.
3030 bool Bad = !isReserved(Reg);
3031 // We are fine if just any subregister has a defined value.
3032 if (Bad) {
3033
3034 for (const MCPhysReg &SubReg : TRI->subregs(Reg)) {
3035 if (regsLive.count(SubReg)) {
3036 Bad = false;
3037 break;
3038 }
3039 }
3040 }
3041 // If there is an additional implicit-use of a super register we stop
3042 // here. By definition we are fine if the super register is not
3043 // (completely) dead, if the complete super register is dead we will
3044 // get a report for its operand.
3045 if (Bad) {
3046 for (const MachineOperand &MOP : MI->uses()) {
3047 if (!MOP.isReg() || !MOP.isImplicit())
3048 continue;
3049
3050 if (!MOP.getReg().isPhysical())
3051 continue;
3052
3053 if (MOP.getReg() != Reg &&
3054 all_of(TRI->regunits(Reg), [&](const MCRegUnit RegUnit) {
3055 return llvm::is_contained(TRI->regunits(MOP.getReg()),
3056 RegUnit);
3057 }))
3058 Bad = false;
3059 }
3060 }
3061 if (Bad)
3062 report("Using an undefined physical register", MO, MONum);
3063 } else if (MRI->def_empty(Reg)) {
3064 report("Reading virtual register without a def", MO, MONum);
3065 } else {
3066 BBInfo &MInfo = MBBInfoMap[MI->getParent()];
3067 // We don't know which virtual registers are live in, so only complain
3068 // if vreg was killed in this MBB. Otherwise keep track of vregs that
3069 // must be live in. PHI instructions are handled separately.
3070 if (MInfo.regsKilled.count(Reg))
3071 report("Using a killed virtual register", MO, MONum);
3072 else if (!MI->isPHI())
3073 MInfo.vregsLiveIn.insert(std::make_pair(Reg, MI));
3074 }
3075 }
3076 }
3077
3078 if (MO->isDef()) {
3079 // Register defined.
3080 // TODO: verify that earlyclobber ops are not used.
3081 if (MO->isDead())
3082 addRegWithSubRegs(regsDead, Reg);
3083 else
3084 addRegWithSubRegs(regsDefined, Reg);
3085
3086 // Verify SSA form.
3087 if (MRI->isSSA() && Reg.isVirtual() &&
3088 std::next(MRI->def_begin(Reg)) != MRI->def_end())
3089 report("Multiple virtual register defs in SSA form", MO, MONum);
3090
3091 // Check LiveInts for a live segment, but only for virtual registers.
3092 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
3093 SlotIndex DefIdx = LiveInts->getInstructionIndex(*MI);
3094 DefIdx = DefIdx.getRegSlot(MO->isEarlyClobber());
3095
3096 if (Reg.isVirtual()) {
3097 checkLivenessAtDef(MO, MONum, DefIdx, *LI, VirtRegOrUnit(Reg));
3098
3099 if (LI->hasSubRanges()) {
3100 LaneBitmask MOMask = SubRegIdx != 0
3101 ? TRI->getSubRegIndexLaneMask(SubRegIdx)
3102 : MRI->getMaxLaneMaskForVReg(Reg);
3103 for (const LiveInterval::SubRange &SR : LI->subranges()) {
3104 if ((SR.LaneMask & MOMask).none())
3105 continue;
3106 checkLivenessAtDef(MO, MONum, DefIdx, SR, VirtRegOrUnit(Reg), true,
3107 SR.LaneMask);
3108 }
3109 }
3110 }
3111 }
3112 }
3113}
3114
3115// This function gets called after visiting all instructions in a bundle. The
3116// argument points to the bundle header.
3117// Normal stand-alone instructions are also considered 'bundles', and this
3118// function is called for all of them.
3119void MachineVerifier::visitMachineBundleAfter(const MachineInstr *MI) {
3120 BBInfo &MInfo = MBBInfoMap[MI->getParent()];
3121 set_union(MInfo.regsKilled, regsKilled);
3122 set_subtract(regsLive, regsKilled); regsKilled.clear();
3123 // Kill any masked registers.
3124 while (!regMasks.empty()) {
3125 const uint32_t *Mask = regMasks.pop_back_val();
3126 for (Register Reg : regsLive)
3127 if (Reg.isPhysical() &&
3128 MachineOperand::clobbersPhysReg(Mask, Reg.asMCReg()))
3129 regsDead.push_back(Reg);
3130 }
3131 set_subtract(regsLive, regsDead); regsDead.clear();
3132 set_union(regsLive, regsDefined); regsDefined.clear();
3133}
3134
3135void
3136MachineVerifier::visitMachineBasicBlockAfter(const MachineBasicBlock *MBB) {
3137 MBBInfoMap[MBB].regsLiveOut = regsLive;
3138 regsLive.clear();
3139
3140 if (Indexes) {
3141 SlotIndex stop = Indexes->getMBBEndIdx(MBB);
3142 if (!(stop > lastIndex)) {
3143 report("Block ends before last instruction index", MBB);
3144 OS << "Block ends at " << stop << " last instruction was at " << lastIndex
3145 << '\n';
3146 }
3147 lastIndex = stop;
3148 }
3149}
3150
3151namespace {
3152// This implements a set of registers that serves as a filter: can filter other
3153// sets by passing through elements not in the filter and blocking those that
3154// are. Any filter implicitly includes the full set of physical registers upon
3155// creation, thus filtering them all out. The filter itself as a set only grows,
3156// and needs to be as efficient as possible.
3157struct VRegFilter {
3158 // Add elements to the filter itself. \pre Input set \p FromRegSet must have
3159 // no duplicates. Both virtual and physical registers are fine.
3160 template <typename RegSetT> void add(const RegSetT &FromRegSet) {
3161 SmallVector<Register, 0> VRegsBuffer;
3162 filterAndAdd(FromRegSet, VRegsBuffer);
3163 }
3164 // Filter \p FromRegSet through the filter and append passed elements into \p
3165 // ToVRegs. All elements appended are then added to the filter itself.
3166 // \returns true if anything changed.
3167 template <typename RegSetT>
3168 bool filterAndAdd(const RegSetT &FromRegSet,
3169 SmallVectorImpl<Register> &ToVRegs) {
3170 unsigned SparseUniverse = Sparse.size();
3171 unsigned NewSparseUniverse = SparseUniverse;
3172 unsigned NewDenseSize = Dense.size();
3173 size_t Begin = ToVRegs.size();
3174 for (Register Reg : FromRegSet) {
3175 if (!Reg.isVirtual())
3176 continue;
3177 unsigned Index = Register::virtReg2Index(Reg);
3178 if (Index < SparseUniverseMax) {
3179 if (Index < SparseUniverse && Sparse.test(Index))
3180 continue;
3181 NewSparseUniverse = std::max(NewSparseUniverse, Index + 1);
3182 } else {
3183 if (Dense.count(Reg))
3184 continue;
3185 ++NewDenseSize;
3186 }
3187 ToVRegs.push_back(Reg);
3188 }
3189 size_t End = ToVRegs.size();
3190 if (Begin == End)
3191 return false;
3192 // Reserving space in sets once performs better than doing so continuously
3193 // and pays easily for double look-ups (even in Dense with SparseUniverseMax
3194 // tuned all the way down) and double iteration (the second one is over a
3195 // SmallVector, which is a lot cheaper compared to DenseSet or BitVector).
3196 Sparse.resize(NewSparseUniverse);
3197 Dense.reserve(NewDenseSize);
3198 for (unsigned I = Begin; I < End; ++I) {
3199 Register Reg = ToVRegs[I];
3200 unsigned Index = Register::virtReg2Index(Reg);
3201 if (Index < SparseUniverseMax)
3202 Sparse.set(Index);
3203 else
3204 Dense.insert(Reg);
3205 }
3206 return true;
3207 }
3208
3209private:
3210 static constexpr unsigned SparseUniverseMax = 10 * 1024 * 8;
3211 // VRegs indexed within SparseUniverseMax are tracked by Sparse, those beyound
3212 // are tracked by Dense. The only purpose of the threashold and the Dense set
3213 // is to have a reasonably growing memory usage in pathological cases (large
3214 // number of very sparse VRegFilter instances live at the same time). In
3215 // practice even in the worst-by-execution time cases having all elements
3216 // tracked by Sparse (very large SparseUniverseMax scenario) tends to be more
3217 // space efficient than if tracked by Dense. The threashold is set to keep the
3218 // worst-case memory usage within 2x of figures determined empirically for
3219 // "all Dense" scenario in such worst-by-execution-time cases.
3220 BitVector Sparse;
3222};
3223
3224// Implements both a transfer function and a (binary, in-place) join operator
3225// for a dataflow over register sets with set union join and filtering transfer
3226// (out_b = in_b \ filter_b). filter_b is expected to be set-up ahead of time.
3227// Maintains out_b as its state, allowing for O(n) iteration over it at any
3228// time, where n is the size of the set (as opposed to O(U) where U is the
3229// universe). filter_b implicitly contains all physical registers at all times.
3230class FilteringVRegSet {
3231 VRegFilter Filter;
3233
3234public:
3235 // Set-up the filter_b. \pre Input register set \p RS must have no duplicates.
3236 // Both virtual and physical registers are fine.
3237 template <typename RegSetT> void addToFilter(const RegSetT &RS) {
3238 Filter.add(RS);
3239 }
3240 // Passes \p RS through the filter_b (transfer function) and adds what's left
3241 // to itself (out_b).
3242 template <typename RegSetT> bool add(const RegSetT &RS) {
3243 // Double-duty the Filter: to maintain VRegs a set (and the join operation
3244 // a set union) just add everything being added here to the Filter as well.
3245 return Filter.filterAndAdd(RS, VRegs);
3246 }
3247 using const_iterator = decltype(VRegs)::const_iterator;
3248 const_iterator begin() const { return VRegs.begin(); }
3249 const_iterator end() const { return VRegs.end(); }
3250 size_t size() const { return VRegs.size(); }
3251};
3252} // namespace
3253
3254// Calculate the largest possible vregsPassed sets. These are the registers that
3255// can pass through an MBB live, but may not be live every time. It is assumed
3256// that all vregsPassed sets are empty before the call.
3257void MachineVerifier::calcRegsPassed() {
3258 if (MF->empty())
3259 // ReversePostOrderTraversal doesn't handle empty functions.
3260 return;
3261
3262 for (const MachineBasicBlock *MB :
3264 FilteringVRegSet VRegs;
3265 BBInfo &Info = MBBInfoMap[MB];
3266 assert(Info.reachable);
3267
3268 VRegs.addToFilter(Info.regsKilled);
3269 VRegs.addToFilter(Info.regsLiveOut);
3270 for (const MachineBasicBlock *Pred : MB->predecessors()) {
3271 const BBInfo &PredInfo = MBBInfoMap[Pred];
3272 if (!PredInfo.reachable)
3273 continue;
3274
3275 VRegs.add(PredInfo.regsLiveOut);
3276 VRegs.add(PredInfo.vregsPassed);
3277 }
3278 Info.vregsPassed.reserve(VRegs.size());
3279 Info.vregsPassed.insert(VRegs.begin(), VRegs.end());
3280 }
3281}
3282
3283// Calculate the set of virtual registers that must be passed through each basic
3284// block in order to satisfy the requirements of successor blocks. This is very
3285// similar to calcRegsPassed, only backwards.
3286void MachineVerifier::calcRegsRequired() {
3287 // First push live-in regs to predecessors' vregsRequired.
3289 for (const auto &MBB : *MF) {
3290 BBInfo &MInfo = MBBInfoMap[&MBB];
3291 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
3292 BBInfo &PInfo = MBBInfoMap[Pred];
3293 if (PInfo.addRequired(MInfo.vregsLiveIn))
3294 todo.insert(Pred);
3295 }
3296
3297 // Handle the PHI node.
3298 for (const MachineInstr &MI : MBB.phis()) {
3299 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
3300 // Skip those Operands which are undef regs or not regs.
3301 if (!MI.getOperand(i).isReg() || !MI.getOperand(i).readsReg())
3302 continue;
3303
3304 // Get register and predecessor for one PHI edge.
3305 Register Reg = MI.getOperand(i).getReg();
3306 const MachineBasicBlock *Pred = MI.getOperand(i + 1).getMBB();
3307
3308 BBInfo &PInfo = MBBInfoMap[Pred];
3309 if (PInfo.addRequired(Reg))
3310 todo.insert(Pred);
3311 }
3312 }
3313 }
3314
3315 // Iteratively push vregsRequired to predecessors. This will converge to the
3316 // same final state regardless of DenseSet iteration order.
3317 while (!todo.empty()) {
3318 const MachineBasicBlock *MBB = *todo.begin();
3319 todo.erase(MBB);
3320 BBInfo &MInfo = MBBInfoMap[MBB];
3321 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
3322 if (Pred == MBB)
3323 continue;
3324 BBInfo &SInfo = MBBInfoMap[Pred];
3325 if (SInfo.addRequired(MInfo.vregsRequired))
3326 todo.insert(Pred);
3327 }
3328 }
3329}
3330
3331// Check PHI instructions at the beginning of MBB. It is assumed that
3332// calcRegsPassed has been run so BBInfo::isLiveOut is valid.
3333void MachineVerifier::checkPHIOps(const MachineBasicBlock &MBB) {
3334 BBInfo &MInfo = MBBInfoMap[&MBB];
3335
3337 for (const MachineInstr &Phi : MBB) {
3338 if (!Phi.isPHI())
3339 break;
3340 seen.clear();
3341
3342 const MachineOperand &MODef = Phi.getOperand(0);
3343 if (!MODef.isReg() || !MODef.isDef()) {
3344 report("Expected first PHI operand to be a register def", &MODef, 0);
3345 continue;
3346 }
3347 if (MODef.isTied() || MODef.isImplicit() || MODef.isInternalRead() ||
3348 MODef.isEarlyClobber() || MODef.isDebug())
3349 report("Unexpected flag on PHI operand", &MODef, 0);
3350 Register DefReg = MODef.getReg();
3351 if (!DefReg.isVirtual())
3352 report("Expected first PHI operand to be a virtual register", &MODef, 0);
3353
3354 for (unsigned I = 1, E = Phi.getNumOperands(); I != E; I += 2) {
3355 const MachineOperand &MO0 = Phi.getOperand(I);
3356 if (!MO0.isReg()) {
3357 report("Expected PHI operand to be a register", &MO0, I);
3358 continue;
3359 }
3360 if (MO0.isImplicit() || MO0.isInternalRead() || MO0.isEarlyClobber() ||
3361 MO0.isDebug() || MO0.isTied())
3362 report("Unexpected flag on PHI operand", &MO0, I);
3363
3364 const MachineOperand &MO1 = Phi.getOperand(I + 1);
3365 if (!MO1.isMBB()) {
3366 report("Expected PHI operand to be a basic block", &MO1, I + 1);
3367 continue;
3368 }
3369
3370 const MachineBasicBlock &Pre = *MO1.getMBB();
3371 if (!Pre.isSuccessor(&MBB)) {
3372 report("PHI input is not a predecessor block", &MO1, I + 1);
3373 continue;
3374 }
3375
3376 if (MInfo.reachable) {
3377 seen.insert(&Pre);
3378 BBInfo &PrInfo = MBBInfoMap[&Pre];
3379 if (!MO0.isUndef() && PrInfo.reachable &&
3380 !PrInfo.isLiveOut(MO0.getReg()))
3381 report("PHI operand is not live-out from predecessor", &MO0, I);
3382 }
3383 }
3384
3385 // Did we see all predecessors?
3386 if (MInfo.reachable) {
3387 for (MachineBasicBlock *Pred : MBB.predecessors()) {
3388 if (!seen.count(Pred)) {
3389 report("Missing PHI operand", &Phi);
3390 OS << printMBBReference(*Pred)
3391 << " is a predecessor according to the CFG.\n";
3392 }
3393 }
3394 }
3395 }
3396}
3397
3398static void
3400 std::function<void(const Twine &Message)> FailureCB,
3401 raw_ostream &OS) {
3403 CV.initialize(&OS, FailureCB, MF);
3404
3405 for (const auto &MBB : MF) {
3406 CV.visit(MBB);
3407 for (const auto &MI : MBB.instrs())
3408 CV.visit(MI);
3409 }
3410
3411 if (CV.sawTokens()) {
3412 DT.recalculate(const_cast<MachineFunction &>(MF));
3413 CV.verify(DT);
3414 }
3415}
3416
3417void MachineVerifier::visitMachineFunctionAfter() {
3418 auto FailureCB = [this](const Twine &Message) {
3419 report(Message.str().c_str(), MF);
3420 };
3421 verifyConvergenceControl(*MF, DT, FailureCB, OS);
3422
3423 calcRegsPassed();
3424
3425 for (const MachineBasicBlock &MBB : *MF)
3426 checkPHIOps(MBB);
3427
3428 // Now check liveness info if available
3429 calcRegsRequired();
3430
3431 // Check for killed virtual registers that should be live out.
3432 for (const auto &MBB : *MF) {
3433 BBInfo &MInfo = MBBInfoMap[&MBB];
3434 for (Register VReg : MInfo.vregsRequired)
3435 if (MInfo.regsKilled.count(VReg)) {
3436 report("Virtual register killed in block, but needed live out.", &MBB);
3437 OS << "Virtual register " << printReg(VReg)
3438 << " is used after the block.\n";
3439 }
3440 }
3441
3442 if (!MF->empty()) {
3443 BBInfo &MInfo = MBBInfoMap[&MF->front()];
3444 for (Register VReg : MInfo.vregsRequired) {
3445 report("Virtual register defs don't dominate all uses.", MF);
3446 report_context_vreg(VReg);
3447 }
3448 }
3449
3450 if (LiveVars)
3451 verifyLiveVariables();
3452 if (LiveInts)
3453 verifyLiveIntervals();
3454
3455 // Check live-in list of each MBB. If a register is live into MBB, check
3456 // that the register is in regsLiveOut of each predecessor block. Since
3457 // this must come from a definition in the predecesssor or its live-in
3458 // list, this will catch a live-through case where the predecessor does not
3459 // have the register in its live-in list. This currently only checks
3460 // registers that have no aliases, are not allocatable and are not
3461 // reserved, which could mean a condition code register for instance.
3462 if (MRI->tracksLiveness())
3463 for (const auto &MBB : *MF)
3465 MCRegister LiveInReg = P.PhysReg;
3466 bool hasAliases = MCRegAliasIterator(LiveInReg, TRI, false).isValid();
3467 if (hasAliases || isAllocatable(LiveInReg) || isReserved(LiveInReg))
3468 continue;
3469 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
3470 BBInfo &PInfo = MBBInfoMap[Pred];
3471 if (!PInfo.regsLiveOut.count(LiveInReg)) {
3472 report("Live in register not found to be live out from predecessor.",
3473 &MBB);
3474 OS << TRI->getName(LiveInReg) << " not found to be live out from "
3475 << printMBBReference(*Pred) << '\n';
3476 }
3477 }
3478 }
3479
3480 for (auto CSInfo : MF->getCallSitesInfo())
3481 if (!CSInfo.first->isCall())
3482 report("Call site info referencing instruction that is not call", MF);
3483
3484 // If there's debug-info, check that we don't have any duplicate value
3485 // tracking numbers.
3486 if (MF->getFunction().getSubprogram()) {
3487 DenseSet<unsigned> SeenNumbers;
3488 for (const auto &MBB : *MF) {
3489 for (const auto &MI : MBB) {
3490 if (auto Num = MI.peekDebugInstrNum()) {
3491 auto Result = SeenNumbers.insert((unsigned)Num);
3492 if (!Result.second)
3493 report("Instruction has a duplicated value tracking number", &MI);
3494 }
3495 }
3496 }
3497 }
3498}
3499
3500void MachineVerifier::verifyLiveVariables() {
3501 assert(LiveVars && "Don't call verifyLiveVariables without LiveVars");
3502 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
3504 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
3505 for (const auto &MBB : *MF) {
3506 BBInfo &MInfo = MBBInfoMap[&MBB];
3507
3508 // Our vregsRequired should be identical to LiveVariables' AliveBlocks
3509 if (MInfo.vregsRequired.count(Reg)) {
3510 if (!VI.AliveBlocks.test(MBB.getNumber())) {
3511 report("LiveVariables: Block missing from AliveBlocks", &MBB);
3512 OS << "Virtual register " << printReg(Reg)
3513 << " must be live through the block.\n";
3514 }
3515 } else {
3516 if (VI.AliveBlocks.test(MBB.getNumber())) {
3517 report("LiveVariables: Block should not be in AliveBlocks", &MBB);
3518 OS << "Virtual register " << printReg(Reg)
3519 << " is not needed live through the block.\n";
3520 }
3521 }
3522 }
3523 }
3524}
3525
3526void MachineVerifier::verifyLiveIntervals() {
3527 assert(LiveInts && "Don't call verifyLiveIntervals without LiveInts");
3528 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
3530
3531 // Spilling and splitting may leave unused registers around. Skip them.
3532 if (MRI->reg_nodbg_empty(Reg))
3533 continue;
3534
3535 if (!LiveInts->hasInterval(Reg)) {
3536 report("Missing live interval for virtual register", MF);
3537 OS << printReg(Reg, TRI) << " still has defs or uses\n";
3538 continue;
3539 }
3540
3541 const LiveInterval &LI = LiveInts->getInterval(Reg);
3542 assert(Reg == LI.reg() && "Invalid reg to interval mapping");
3543 verifyLiveInterval(LI);
3544 }
3545
3546 // Verify all the cached regunit intervals.
3547 for (unsigned i = 0, e = TRI->getNumRegUnits(); i != e; ++i)
3548 if (const LiveRange *LR = LiveInts->getCachedRegUnit(i))
3549 verifyLiveRange(*LR, VirtRegOrUnit(i));
3550}
3551
3552void MachineVerifier::verifyLiveRangeValue(const LiveRange &LR,
3553 const VNInfo *VNI,
3554 VirtRegOrUnit VRegOrUnit,
3555 LaneBitmask LaneMask) {
3556 if (VNI->isUnused())
3557 return;
3558
3559 const VNInfo *DefVNI = LR.getVNInfoAt(VNI->def);
3560
3561 if (!DefVNI) {
3562 report("Value not live at VNInfo def and not marked unused", MF);
3563 report_context(LR, VRegOrUnit, LaneMask);
3564 report_context(*VNI);
3565 return;
3566 }
3567
3568 if (DefVNI != VNI) {
3569 report("Live segment at def has different VNInfo", MF);
3570 report_context(LR, VRegOrUnit, LaneMask);
3571 report_context(*VNI);
3572 return;
3573 }
3574
3575 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(VNI->def);
3576 if (!MBB) {
3577 report("Invalid VNInfo definition index", MF);
3578 report_context(LR, VRegOrUnit, LaneMask);
3579 report_context(*VNI);
3580 return;
3581 }
3582
3583 if (VNI->isPHIDef()) {
3584 if (VNI->def != LiveInts->getMBBStartIdx(MBB)) {
3585 report("PHIDef VNInfo is not defined at MBB start", MBB);
3586 report_context(LR, VRegOrUnit, LaneMask);
3587 report_context(*VNI);
3588 }
3589 return;
3590 }
3591
3592 // Non-PHI def.
3593 const MachineInstr *MI = LiveInts->getInstructionFromIndex(VNI->def);
3594 if (!MI) {
3595 report("No instruction at VNInfo def index", MBB);
3596 report_context(LR, VRegOrUnit, LaneMask);
3597 report_context(*VNI);
3598 return;
3599 }
3600
3601 bool hasDef = false;
3602 bool isEarlyClobber = false;
3603 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3604 if (!MOI->isReg() || !MOI->isDef())
3605 continue;
3606 if (VRegOrUnit.isVirtualReg()) {
3607 if (MOI->getReg() != VRegOrUnit.asVirtualReg())
3608 continue;
3609 } else {
3610 if (!MOI->getReg().isPhysical() ||
3611 !TRI->hasRegUnit(MOI->getReg(), VRegOrUnit.asMCRegUnit()))
3612 continue;
3613 }
3614 if (LaneMask.any() &&
3615 (TRI->getSubRegIndexLaneMask(MOI->getSubReg()) & LaneMask).none())
3616 continue;
3617 hasDef = true;
3618 if (MOI->isEarlyClobber())
3619 isEarlyClobber = true;
3620 }
3621
3622 if (!hasDef) {
3623 report("Defining instruction does not modify register", MI);
3624 report_context(LR, VRegOrUnit, LaneMask);
3625 report_context(*VNI);
3626 }
3627
3628 // Early clobber defs begin at USE slots, but other defs must begin at
3629 // DEF slots.
3630 if (isEarlyClobber) {
3631 if (!VNI->def.isEarlyClobber()) {
3632 report("Early clobber def must be at an early-clobber slot", MBB);
3633 report_context(LR, VRegOrUnit, LaneMask);
3634 report_context(*VNI);
3635 }
3636 } else if (!VNI->def.isRegister()) {
3637 report("Non-PHI, non-early clobber def must be at a register slot", MBB);
3638 report_context(LR, VRegOrUnit, LaneMask);
3639 report_context(*VNI);
3640 }
3641}
3642
3643void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR,
3645 VirtRegOrUnit VRegOrUnit,
3646 LaneBitmask LaneMask) {
3647 const LiveRange::Segment &S = *I;
3648 const VNInfo *VNI = S.valno;
3649 assert(VNI && "Live segment has no valno");
3650
3651 if (VNI->id >= LR.getNumValNums() || VNI != LR.getValNumInfo(VNI->id)) {
3652 report("Foreign valno in live segment", MF);
3653 report_context(LR, VRegOrUnit, LaneMask);
3654 report_context(S);
3655 report_context(*VNI);
3656 }
3657
3658 if (VNI->isUnused()) {
3659 report("Live segment valno is marked unused", MF);
3660 report_context(LR, VRegOrUnit, LaneMask);
3661 report_context(S);
3662 }
3663
3664 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(S.start);
3665 if (!MBB) {
3666 report("Bad start of live segment, no basic block", MF);
3667 report_context(LR, VRegOrUnit, LaneMask);
3668 report_context(S);
3669 return;
3670 }
3671 SlotIndex MBBStartIdx = LiveInts->getMBBStartIdx(MBB);
3672 if (S.start != MBBStartIdx && S.start != VNI->def) {
3673 report("Live segment must begin at MBB entry or valno def", MBB);
3674 report_context(LR, VRegOrUnit, LaneMask);
3675 report_context(S);
3676 }
3677
3678 const MachineBasicBlock *EndMBB =
3679 LiveInts->getMBBFromIndex(S.end.getPrevSlot());
3680 if (!EndMBB) {
3681 report("Bad end of live segment, no basic block", MF);
3682 report_context(LR, VRegOrUnit, LaneMask);
3683 report_context(S);
3684 return;
3685 }
3686
3687 // Checks for non-live-out segments.
3688 if (S.end != LiveInts->getMBBEndIdx(EndMBB)) {
3689 // RegUnit intervals are allowed dead phis.
3690 if (!VRegOrUnit.isVirtualReg() && VNI->isPHIDef() && S.start == VNI->def &&
3691 S.end == VNI->def.getDeadSlot())
3692 return;
3693
3694 // The live segment is ending inside EndMBB
3695 const MachineInstr *MI =
3696 LiveInts->getInstructionFromIndex(S.end.getPrevSlot());
3697 if (!MI) {
3698 report("Live segment doesn't end at a valid instruction", EndMBB);
3699 report_context(LR, VRegOrUnit, LaneMask);
3700 report_context(S);
3701 return;
3702 }
3703
3704 // The block slot must refer to a basic block boundary.
3705 if (S.end.isBlock()) {
3706 report("Live segment ends at B slot of an instruction", EndMBB);
3707 report_context(LR, VRegOrUnit, LaneMask);
3708 report_context(S);
3709 }
3710
3711 if (S.end.isDead()) {
3712 // Segment ends on the dead slot.
3713 // That means there must be a dead def.
3714 if (!SlotIndex::isSameInstr(S.start, S.end)) {
3715 report("Live segment ending at dead slot spans instructions", EndMBB);
3716 report_context(LR, VRegOrUnit, LaneMask);
3717 report_context(S);
3718 }
3719 }
3720
3721 // After tied operands are rewritten, a live segment can only end at an
3722 // early-clobber slot if it is being redefined by an early-clobber def.
3723 // TODO: Before tied operands are rewritten, a live segment can only end at
3724 // an early-clobber slot if the last use is tied to an early-clobber def.
3725 if (MF->getProperties().hasProperty(
3727 S.end.isEarlyClobber()) {
3728 if (I + 1 == LR.end() || (I + 1)->start != S.end) {
3729 report("Live segment ending at early clobber slot must be "
3730 "redefined by an EC def in the same instruction",
3731 EndMBB);
3732 report_context(LR, VRegOrUnit, LaneMask);
3733 report_context(S);
3734 }
3735 }
3736
3737 // The following checks only apply to virtual registers. Physreg liveness
3738 // is too weird to check.
3739 if (VRegOrUnit.isVirtualReg()) {
3740 // A live segment can end with either a redefinition, a kill flag on a
3741 // use, or a dead flag on a def.
3742 bool hasRead = false;
3743 bool hasSubRegDef = false;
3744 bool hasDeadDef = false;
3745 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3746 if (!MOI->isReg() || MOI->getReg() != VRegOrUnit.asVirtualReg())
3747 continue;
3748 unsigned Sub = MOI->getSubReg();
3749 LaneBitmask SLM =
3750 Sub != 0 ? TRI->getSubRegIndexLaneMask(Sub) : LaneBitmask::getAll();
3751 if (MOI->isDef()) {
3752 if (Sub != 0) {
3753 hasSubRegDef = true;
3754 // An operand %0:sub0 reads %0:sub1..n. Invert the lane
3755 // mask for subregister defs. Read-undef defs will be handled by
3756 // readsReg below.
3757 SLM = ~SLM;
3758 }
3759 if (MOI->isDead())
3760 hasDeadDef = true;
3761 }
3762 if (LaneMask.any() && (LaneMask & SLM).none())
3763 continue;
3764 if (MOI->readsReg())
3765 hasRead = true;
3766 }
3767 if (S.end.isDead()) {
3768 // Make sure that the corresponding machine operand for a "dead" live
3769 // range has the dead flag. We cannot perform this check for subregister
3770 // liveranges as partially dead values are allowed.
3771 if (LaneMask.none() && !hasDeadDef) {
3772 report(
3773 "Instruction ending live segment on dead slot has no dead flag",
3774 MI);
3775 report_context(LR, VRegOrUnit, LaneMask);
3776 report_context(S);
3777 }
3778 } else {
3779 if (!hasRead) {
3780 // When tracking subregister liveness, the main range must start new
3781 // values on partial register writes, even if there is no read.
3782 if (!MRI->shouldTrackSubRegLiveness(VRegOrUnit.asVirtualReg()) ||
3783 LaneMask.any() || !hasSubRegDef) {
3784 report("Instruction ending live segment doesn't read the register",
3785 MI);
3786 report_context(LR, VRegOrUnit, LaneMask);
3787 report_context(S);
3788 }
3789 }
3790 }
3791 }
3792 }
3793
3794 // Now check all the basic blocks in this live segment.
3796 // Is this live segment the beginning of a non-PHIDef VN?
3797 if (S.start == VNI->def && !VNI->isPHIDef()) {
3798 // Not live-in to any blocks.
3799 if (MBB == EndMBB)
3800 return;
3801 // Skip this block.
3802 ++MFI;
3803 }
3804
3806 if (LaneMask.any()) {
3807 LiveInterval &OwnerLI = LiveInts->getInterval(VRegOrUnit.asVirtualReg());
3808 OwnerLI.computeSubRangeUndefs(Undefs, LaneMask, *MRI, *Indexes);
3809 }
3810
3811 while (true) {
3812 assert(LiveInts->isLiveInToMBB(LR, &*MFI));
3813 // We don't know how to track physregs into a landing pad.
3814 if (!VRegOrUnit.isVirtualReg() && MFI->isEHPad()) {
3815 if (&*MFI == EndMBB)
3816 break;
3817 ++MFI;
3818 continue;
3819 }
3820
3821 // Is VNI a PHI-def in the current block?
3822 bool IsPHI = VNI->isPHIDef() &&
3823 VNI->def == LiveInts->getMBBStartIdx(&*MFI);
3824
3825 // Check that VNI is live-out of all predecessors.
3826 for (const MachineBasicBlock *Pred : MFI->predecessors()) {
3827 SlotIndex PEnd = LiveInts->getMBBEndIdx(Pred);
3828 // Predecessor of landing pad live-out on last call.
3829 if (MFI->isEHPad()) {
3830 for (const MachineInstr &MI : llvm::reverse(*Pred)) {
3831 if (MI.isCall()) {
3832 PEnd = Indexes->getInstructionIndex(MI).getBoundaryIndex();
3833 break;
3834 }
3835 }
3836 }
3837 const VNInfo *PVNI = LR.getVNInfoBefore(PEnd);
3838
3839 // All predecessors must have a live-out value. However for a phi
3840 // instruction with subregister intervals
3841 // only one of the subregisters (not necessarily the current one) needs to
3842 // be defined.
3843 if (!PVNI && (LaneMask.none() || !IsPHI)) {
3844 if (LiveRangeCalc::isJointlyDominated(Pred, Undefs, *Indexes))
3845 continue;
3846 report("Register not marked live out of predecessor", Pred);
3847 report_context(LR, VRegOrUnit, LaneMask);
3848 report_context(*VNI);
3849 OS << " live into " << printMBBReference(*MFI) << '@'
3850 << LiveInts->getMBBStartIdx(&*MFI) << ", not live before " << PEnd
3851 << '\n';
3852 continue;
3853 }
3854
3855 // Only PHI-defs can take different predecessor values.
3856 if (!IsPHI && PVNI != VNI) {
3857 report("Different value live out of predecessor", Pred);
3858 report_context(LR, VRegOrUnit, LaneMask);
3859 OS << "Valno #" << PVNI->id << " live out of "
3860 << printMBBReference(*Pred) << '@' << PEnd << "\nValno #" << VNI->id
3861 << " live into " << printMBBReference(*MFI) << '@'
3862 << LiveInts->getMBBStartIdx(&*MFI) << '\n';
3863 }
3864 }
3865 if (&*MFI == EndMBB)
3866 break;
3867 ++MFI;
3868 }
3869}
3870
3871void MachineVerifier::verifyLiveRange(const LiveRange &LR,
3872 VirtRegOrUnit VRegOrUnit,
3873 LaneBitmask LaneMask) {
3874 for (const VNInfo *VNI : LR.valnos)
3875 verifyLiveRangeValue(LR, VNI, VRegOrUnit, LaneMask);
3876
3877 for (LiveRange::const_iterator I = LR.begin(), E = LR.end(); I != E; ++I)
3878 verifyLiveRangeSegment(LR, I, VRegOrUnit, LaneMask);
3879}
3880
3881void MachineVerifier::verifyLiveInterval(const LiveInterval &LI) {
3882 Register Reg = LI.reg();
3883 assert(Reg.isVirtual());
3884 verifyLiveRange(LI, VirtRegOrUnit(Reg));
3885
3886 if (LI.hasSubRanges()) {
3888 LaneBitmask MaxMask = MRI->getMaxLaneMaskForVReg(Reg);
3889 for (const LiveInterval::SubRange &SR : LI.subranges()) {
3890 if ((Mask & SR.LaneMask).any()) {
3891 report("Lane masks of sub ranges overlap in live interval", MF);
3892 report_context(LI);
3893 }
3894 if ((SR.LaneMask & ~MaxMask).any()) {
3895 report("Subrange lanemask is invalid", MF);
3896 report_context(LI);
3897 }
3898 if (SR.empty()) {
3899 report("Subrange must not be empty", MF);
3900 report_context(SR, VirtRegOrUnit(LI.reg()), SR.LaneMask);
3901 }
3902 Mask |= SR.LaneMask;
3903 verifyLiveRange(SR, VirtRegOrUnit(LI.reg()), SR.LaneMask);
3904 if (!LI.covers(SR)) {
3905 report("A Subrange is not covered by the main range", MF);
3906 report_context(LI);
3907 }
3908 }
3909 }
3910
3911 // Check the LI only has one connected component.
3912 ConnectedVNInfoEqClasses ConEQ(*LiveInts);
3913 unsigned NumComp = ConEQ.Classify(LI);
3914 if (NumComp > 1) {
3915 report("Multiple connected components in live interval", MF);
3916 report_context(LI);
3917 for (unsigned comp = 0; comp != NumComp; ++comp) {
3918 OS << comp << ": valnos";
3919 for (const VNInfo *I : LI.valnos)
3920 if (comp == ConEQ.getEqClass(I))
3921 OS << ' ' << I->id;
3922 OS << '\n';
3923 }
3924 }
3925}
3926
3927namespace {
3928
3929 // FrameSetup and FrameDestroy can have zero adjustment, so using a single
3930 // integer, we can't tell whether it is a FrameSetup or FrameDestroy if the
3931 // value is zero.
3932 // We use a bool plus an integer to capture the stack state.
3933struct StackStateOfBB {
3934 StackStateOfBB() = default;
3935 StackStateOfBB(int EntryVal, int ExitVal, bool EntrySetup, bool ExitSetup)
3936 : EntryValue(EntryVal), ExitValue(ExitVal), EntryIsSetup(EntrySetup),
3937 ExitIsSetup(ExitSetup) {}
3938
3939 // Can be negative, which means we are setting up a frame.
3940 int EntryValue = 0;
3941 int ExitValue = 0;
3942 bool EntryIsSetup = false;
3943 bool ExitIsSetup = false;
3944};
3945
3946} // end anonymous namespace
3947
3948/// Make sure on every path through the CFG, a FrameSetup <n> is always followed
3949/// by a FrameDestroy <n>, stack adjustments are identical on all
3950/// CFG edges to a merge point, and frame is destroyed at end of a return block.
3951void MachineVerifier::verifyStackFrame() {
3952 unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode();
3953 unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();
3954 if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u)
3955 return;
3956
3958 SPState.resize(MF->getNumBlockIDs());
3960
3961 // Visit the MBBs in DFS order.
3962 for (df_ext_iterator<const MachineFunction *,
3964 DFI = df_ext_begin(MF, Reachable), DFE = df_ext_end(MF, Reachable);
3965 DFI != DFE; ++DFI) {
3966 const MachineBasicBlock *MBB = *DFI;
3967
3968 StackStateOfBB BBState;
3969 // Check the exit state of the DFS stack predecessor.
3970 if (DFI.getPathLength() >= 2) {
3971 const MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2);
3972 assert(Reachable.count(StackPred) &&
3973 "DFS stack predecessor is already visited.\n");
3974 BBState.EntryValue = SPState[StackPred->getNumber()].ExitValue;
3975 BBState.EntryIsSetup = SPState[StackPred->getNumber()].ExitIsSetup;
3976 BBState.ExitValue = BBState.EntryValue;
3977 BBState.ExitIsSetup = BBState.EntryIsSetup;
3978 }
3979
3980 if ((int)MBB->getCallFrameSize() != -BBState.EntryValue) {
3981 report("Call frame size on entry does not match value computed from "
3982 "predecessor",
3983 MBB);
3984 OS << "Call frame size on entry " << MBB->getCallFrameSize()
3985 << " does not match value computed from predecessor "
3986 << -BBState.EntryValue << '\n';
3987 }
3988
3989 // Update stack state by checking contents of MBB.
3990 for (const auto &I : *MBB) {
3991 if (I.getOpcode() == FrameSetupOpcode) {
3992 if (BBState.ExitIsSetup)
3993 report("FrameSetup is after another FrameSetup", &I);
3994 if (!MRI->isSSA() && !MF->getFrameInfo().adjustsStack())
3995 report("AdjustsStack not set in presence of a frame pseudo "
3996 "instruction.", &I);
3997 BBState.ExitValue -= TII->getFrameTotalSize(I);
3998 BBState.ExitIsSetup = true;
3999 }
4000
4001 if (I.getOpcode() == FrameDestroyOpcode) {
4002 int Size = TII->getFrameTotalSize(I);
4003 if (!BBState.ExitIsSetup)
4004 report("FrameDestroy is not after a FrameSetup", &I);
4005 int AbsSPAdj = BBState.ExitValue < 0 ? -BBState.ExitValue :
4006 BBState.ExitValue;
4007 if (BBState.ExitIsSetup && AbsSPAdj != Size) {
4008 report("FrameDestroy <n> is after FrameSetup <m>", &I);
4009 OS << "FrameDestroy <" << Size << "> is after FrameSetup <"
4010 << AbsSPAdj << ">.\n";
4011 }
4012 if (!MRI->isSSA() && !MF->getFrameInfo().adjustsStack())
4013 report("AdjustsStack not set in presence of a frame pseudo "
4014 "instruction.", &I);
4015 BBState.ExitValue += Size;
4016 BBState.ExitIsSetup = false;
4017 }
4018 }
4019 SPState[MBB->getNumber()] = BBState;
4020
4021 // Make sure the exit state of any predecessor is consistent with the entry
4022 // state.
4023 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
4024 if (Reachable.count(Pred) &&
4025 (SPState[Pred->getNumber()].ExitValue != BBState.EntryValue ||
4026 SPState[Pred->getNumber()].ExitIsSetup != BBState.EntryIsSetup)) {
4027 report("The exit stack state of a predecessor is inconsistent.", MBB);
4028 OS << "Predecessor " << printMBBReference(*Pred) << " has exit state ("
4029 << SPState[Pred->getNumber()].ExitValue << ", "
4030 << SPState[Pred->getNumber()].ExitIsSetup << "), while "
4031 << printMBBReference(*MBB) << " has entry state ("
4032 << BBState.EntryValue << ", " << BBState.EntryIsSetup << ").\n";
4033 }
4034 }
4035
4036 // Make sure the entry state of any successor is consistent with the exit
4037 // state.
4038 for (const MachineBasicBlock *Succ : MBB->successors()) {
4039 if (Reachable.count(Succ) &&
4040 (SPState[Succ->getNumber()].EntryValue != BBState.ExitValue ||
4041 SPState[Succ->getNumber()].EntryIsSetup != BBState.ExitIsSetup)) {
4042 report("The entry stack state of a successor is inconsistent.", MBB);
4043 OS << "Successor " << printMBBReference(*Succ) << " has entry state ("
4044 << SPState[Succ->getNumber()].EntryValue << ", "
4045 << SPState[Succ->getNumber()].EntryIsSetup << "), while "
4046 << printMBBReference(*MBB) << " has exit state ("
4047 << BBState.ExitValue << ", " << BBState.ExitIsSetup << ").\n";
4048 }
4049 }
4050
4051 // Make sure a basic block with return ends with zero stack adjustment.
4052 if (!MBB->empty() && MBB->back().isReturn()) {
4053 if (BBState.ExitIsSetup)
4054 report("A return block ends with a FrameSetup.", MBB);
4055 if (BBState.ExitValue)
4056 report("A return block ends with a nonzero stack adjustment.", MBB);
4057 }
4058 }
4059}
4060
4061void MachineVerifier::verifyStackProtector() {
4062 const MachineFrameInfo &MFI = MF->getFrameInfo();
4063 if (!MFI.hasStackProtectorIndex())
4064 return;
4065 // Only applicable when the offsets of frame objects have been determined,
4066 // which is indicated by a non-zero stack size.
4067 if (!MFI.getStackSize())
4068 return;
4069 const TargetFrameLowering &TFI = *MF->getSubtarget().getFrameLowering();
4070 bool StackGrowsDown =
4072 unsigned FI = MFI.getStackProtectorIndex();
4073 int64_t SPStart = MFI.getObjectOffset(FI);
4074 int64_t SPEnd = SPStart + MFI.getObjectSize(FI);
4075 for (unsigned I = 0, E = MFI.getObjectIndexEnd(); I != E; ++I) {
4076 if (I == FI)
4077 continue;
4078 if (MFI.isDeadObjectIndex(I))
4079 continue;
4080 // FIXME: Skip non-default stack objects, as some targets may place them
4081 // above the stack protector. This is a workaround for the fact that
4082 // backends such as AArch64 may place SVE stack objects *above* the stack
4083 // protector.
4085 continue;
4086 // Skip variable-sized objects because they do not have a fixed offset.
4088 continue;
4089 // FIXME: Skip spill slots which may be allocated above the stack protector.
4090 // Ideally this would only skip callee-saved registers, but we don't have
4091 // that information here. For example, spill-slots used for scavenging are
4092 // not described in CalleeSavedInfo.
4093 if (MFI.isSpillSlotObjectIndex(I))
4094 continue;
4095 int64_t ObjStart = MFI.getObjectOffset(I);
4096 int64_t ObjEnd = ObjStart + MFI.getObjectSize(I);
4097 if (SPStart < ObjEnd && ObjStart < SPEnd) {
4098 report("Stack protector overlaps with another stack object", MF);
4099 break;
4100 }
4101 if ((StackGrowsDown && SPStart <= ObjStart) ||
4102 (!StackGrowsDown && SPStart >= ObjStart)) {
4103 report("Stack protector is not the top-most object on the stack", MF);
4104 break;
4105 }
4106 }
4107}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
aarch64 promote const
static bool isLoad(int Opcode)
static bool isStore(int Opcode)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file implements the BitVector class.
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
This file defines the DenseSet and SmallDenseSet classes.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
std::string Name
uint32_t Index
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
global merge Global merge function pass
const HexagonInstrInfo * TII
hexagon widen Hexagon Store false hexagon widen loads
hexagon widen stores
IRTranslator LLVM IR MI
A common definition of LaneBitmask for use in TableGen and CodeGen.
Implement a low-level type suitable for MachineInstr level instruction selection.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file declares the MIR specialization of the GenericConvergenceVerifier template.
unsigned const TargetRegisterInfo * TRI
unsigned Reg
static void verifyConvergenceControl(const MachineFunction &MF, MachineDominatorTree &DT, std::function< void(const Twine &Message)> FailureCB, raw_ostream &OS)
#define P(N)
ppc ctr loops verify
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:38
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isLiveOut(const MachineBasicBlock &MBB, unsigned Reg)
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
This file defines generic set operations that may be used on set's of different types,...
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
static unsigned getSize(unsigned Kind)
const fltSemantics & getSemantics() const
Definition: APFloat.h:1453
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
Definition: PassManager.h:429
Represent the analysis usage information of a pass.
AnalysisUsage & addUsedIfAvailable()
Add the specified Pass class to the set of analyses used by this pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:168
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches,...
Definition: BasicBlock.h:671
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:240
bool test(unsigned Idx) const
Definition: BitVector.h:461
void clear()
clear - Removes all bits from the bitvector.
Definition: BitVector.h:335
iterator_range< const_set_bits_iterator > set_bits() const
Definition: BitVector.h:140
size_type size() const
size - Returns the number of bits in this bitvector.
Definition: BitVector.h:159
ConnectedVNInfoEqClasses - Helper class that can divide VNInfos in a LiveInterval into equivalence cl...
ConstMIBundleOperands - Iterate over all operands in a const bundle of machine instructions.
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:271
const APFloat & getValueAPF() const
Definition: Constants.h:314
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
IntegerType * getIntegerType() const
Variant of the getType() method to always return an IntegerType, which reduces the amount of casting ...
Definition: Constants.h:187
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition: Constants.h:151
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:211
Implements a dense probed hash-table based set.
Definition: DenseSet.h:278
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
Register getReg() const
Base class for user error types.
Definition: Error.h:355
A specialized PseudoSourceValue for holding FixedStack values, which must include a frame index.
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:310
void initialize(raw_ostream *OS, function_ref< void(const Twine &Message)> FailureCB, const FunctionT &F)
bool isPredicated(const MachineInstr &MI) const override
Returns true if the instruction is already predicated.
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
Analyze the branching code at the end of MBB, returning true if it cannot be understood (e....
unsigned getBitWidth() const
Get the number of bits in this IntegerType.
Definition: DerivedTypes.h:74
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
Definition: LowLevelType.h:181
constexpr unsigned getScalarSizeInBits() const
Definition: LowLevelType.h:264
constexpr bool isScalar() const
Definition: LowLevelType.h:146
constexpr bool isPointerVector() const
Definition: LowLevelType.h:152
constexpr bool isValid() const
Definition: LowLevelType.h:145
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
Definition: LowLevelType.h:159
constexpr bool isVector() const
Definition: LowLevelType.h:148
constexpr bool isScalable() const
Returns true if the LLT is a scalable vector.
Definition: LowLevelType.h:170
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelType.h:190
constexpr bool isPointer() const
Definition: LowLevelType.h:149
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
Definition: LowLevelType.h:277
constexpr ElementCount getElementCount() const
Definition: LowLevelType.h:183
constexpr unsigned getAddressSpace() const
Definition: LowLevelType.h:270
constexpr bool isPointerOrPointerVector() const
Definition: LowLevelType.h:153
constexpr LLT getScalarType() const
Definition: LowLevelType.h:205
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
Definition: LowLevelType.h:200
A live range for subregisters.
Definition: LiveInterval.h:694
LiveInterval - This class represents the liveness of a register, or stack slot.
Definition: LiveInterval.h:687
Register reg() const
Definition: LiveInterval.h:718
bool hasSubRanges() const
Returns true if subregister liveness information is available.
Definition: LiveInterval.h:810
iterator_range< subrange_iterator > subranges()
Definition: LiveInterval.h:782
void computeSubRangeUndefs(SmallVectorImpl< SlotIndex > &Undefs, LaneBitmask LaneMask, const MachineRegisterInfo &MRI, const SlotIndexes &Indexes) const
For a given lane mask LaneMask, compute indexes at which the lane is marked undefined by subregister ...
void print(raw_ostream &O, const Module *=nullptr) const override
Implement the dump method.
Result of a LiveRange query.
Definition: LiveInterval.h:90
bool isDeadDef() const
Return true if this instruction has a dead def.
Definition: LiveInterval.h:117
VNInfo * valueIn() const
Return the value that is live-in to the instruction.
Definition: LiveInterval.h:105
VNInfo * valueOut() const
Return the value leaving the instruction, if any.
Definition: LiveInterval.h:123
bool isKill() const
Return true if the live-in value is killed by this instruction.
Definition: LiveInterval.h:112
static LLVM_ATTRIBUTE_UNUSED bool isJointlyDominated(const MachineBasicBlock *MBB, ArrayRef< SlotIndex > Defs, const SlotIndexes &Indexes)
A diagnostic function to check if the end of the block MBB is jointly dominated by the blocks corresp...
This class represents the liveness of a register, stack slot, etc.
Definition: LiveInterval.h:157
VNInfo * getValNumInfo(unsigned ValNo)
getValNumInfo - Returns pointer to the specified val#.
Definition: LiveInterval.h:317
bool liveAt(SlotIndex index) const
Definition: LiveInterval.h:401
bool covers(const LiveRange &Other) const
Returns true if all segments of the Other live range are completely covered by this live range.
bool empty() const
Definition: LiveInterval.h:382
LiveQueryResult Query(SlotIndex Idx) const
Query Liveness at Idx.
Definition: LiveInterval.h:542
iterator end()
Definition: LiveInterval.h:216
VNInfo * getVNInfoBefore(SlotIndex Idx) const
getVNInfoBefore - Return the VNInfo that is live up to but not necessarily including Idx,...
Definition: LiveInterval.h:429
bool verify() const
Walk the range and assert if any invariants fail to hold.
unsigned getNumValNums() const
Definition: LiveInterval.h:313
iterator begin()
Definition: LiveInterval.h:215
VNInfoList valnos
Definition: LiveInterval.h:204
VNInfo * getVNInfoAt(SlotIndex Idx) const
getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
Definition: LiveInterval.h:421
VarInfo & getVarInfo(Register Reg)
getVarInfo - Return the VarInfo structure for the specified VIRTUAL register.
TypeSize getValue() const
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition: MCAsmInfo.h:56
ExceptionHandling getExceptionHandlingType() const
Definition: MCAsmInfo.h:642
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:237
ArrayRef< MCOperandInfo > operands() const
Definition: MCInstrDesc.h:239
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:248
bool isConvergent() const
Return true if this instruction is convergent.
Definition: MCInstrDesc.h:415
bool variadicOpsAreDefs() const
Return true if variadic operands of this instruction are definitions.
Definition: MCInstrDesc.h:418
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
Definition: MCInstrDesc.h:219
unsigned getOpcode() const
Return the opcode number for this descriptor.
Definition: MCInstrDesc.h:230
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition: MCInstrDesc.h:85
bool isOptionalDef() const
Set if this operand is a optional def.
Definition: MCInstrDesc.h:113
uint8_t OperandType
Information about the type of the operand.
Definition: MCInstrDesc.h:97
MCRegAliasIterator enumerates all registers aliasing Reg.
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1434
bool isValid() const
isValid - Returns true until all the operands have been visited.
bool isInlineAsmBrIndirectTarget() const
Returns true if this is the indirect dest of an INLINEASM_BR.
unsigned pred_size() const
bool isEHPad() const
Returns true if the block is a landing pad.
iterator_range< livein_iterator > liveins() const
iterator_range< iterator > phis()
Returns a range that iterates over the phis in the basic block.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
bool isIRBlockAddressTaken() const
Test whether this block is the target of an IR BlockAddress.
unsigned succ_size() const
BasicBlock * getAddressTakenIRBlock() const
Retrieves the BasicBlock which corresponds to this MachineBasicBlock.
bool isPredecessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a predecessor of this block.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
unsigned getCallFrameSize() const
Return the call frame size on entry to this basic block.
iterator_range< succ_iterator > successors()
bool isSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a successor of this block.
iterator_range< pred_iterator > predecessors()
StringRef getName() const
Return the name of the corresponding LLVM basic block, or an empty string.
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
int getStackProtectorIndex() const
Return the index for the stack protector object.
bool isSpillSlotObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a spill slot.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
BitVector getPristineRegs(const MachineFunction &MF) const
Return a set of physical registers that are pristine.
bool isVariableSizedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a variable sized object.
int getObjectIndexEnd() const
Return one past the maximum frame object index.
bool hasStackProtectorIndex() const
uint8_t getStackID(int ObjectIdx) const
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
bool isDeadObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a dead object.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
bool hasProperty(Property P) const
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
bool verify(Pass *p=nullptr, const char *Banner=nullptr, raw_ostream *OS=nullptr, bool AbortOnError=true) const
Run the current MachineFunction through the machine code verifier, useful for debugger use.
const MachineFunctionProperties & getProperties() const
Get the function properties.
const MachineBasicBlock & front() const
void print(raw_ostream &OS, const SlotIndexes *=nullptr) const
print - Print out the MachineFunction in a format suitable for debugging to the specified stream.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
BasicBlockListType::const_iterator const_iterator
Representation of each machine instruction.
Definition: MachineInstr.h:71
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:577
bool isReturn(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:948
bool isTerminator(QueryType Type=AnyInBundle) const
Returns true if this instruction part of the terminator for a basic block.
Definition: MachineInstr.h:983
bool isBarrier(QueryType Type=AnyInBundle) const
Returns true if the specified instruction stops control flow from executing the instruction immediate...
Definition: MachineInstr.h:974
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
const PseudoSourceValue * getPseudoValue() const
const MDNode * getRanges() const
Return the range tag for the memory reference.
AtomicOrdering getSuccessOrdering() const
Return the atomic ordering requirements for this memory operation.
LocationSize getSizeInBits() const
Return the size in bits of the memory reference.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
int64_t getImm() const
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
bool isImplicit() const
bool isIntrinsicID() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
ArrayRef< int > getShuffleMask() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
bool isValidExcessOperand() const
Return true if this operand can validly be appended to an arbitrary operand list.
bool isShuffleMask() const
unsigned getCFIIndex() const
bool isRenamable() const
isRenamable - Returns true if this register may be renamed, i.e.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
bool isEarlyClobber() const
Register getReg() const
getReg - Returns the register number.
bool isInternalRead() const
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
const uint32_t * getRegMask() const
getRegMask - Returns a bit mask of registers preserved by this RegMask operand.
void print(raw_ostream &os, const TargetRegisterInfo *TRI=nullptr, const TargetIntrinsicInfo *IntrinsicInfo=nullptr) const
Print the MachineOperand to os.
@ MO_CFIIndex
MCCFIInstruction index.
@ MO_RegisterMask
Mask of preserved registers.
@ MO_MachineBasicBlock
MachineBasicBlock reference.
@ MO_FrameIndex
Abstract Stack Frame Index.
@ MO_Register
Register operand.
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
ManagedStatic - This transparently changes the behavior of global statics to be lazily constructed on...
Definition: ManagedStatic.h:83
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:94
virtual void print(raw_ostream &OS, const Module *M) const
print - Print out the internal state of the pass.
Definition: Pass.cpp:130
AnalysisType * getAnalysisIfAvailable() const
getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to get analysis information tha...
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:117
Special value supplied for machine level alias analysis.
Holds all the information related to register banks.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
unsigned getMaximumSize(unsigned RegBankID) const
Get the maximum size in bits that fits in the given register bank.
This class implements the register bank concept.
Definition: RegisterBank.h:28
const char * getName() const
Get a user friendly name of this register bank.
Definition: RegisterBank.h:49
unsigned getID() const
Get the identifier of this register bank.
Definition: RegisterBank.h:45
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
static Register index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
Definition: Register.h:84
static unsigned virtReg2Index(Register Reg)
Convert a virtual register number to a 0-based index.
Definition: Register.h:77
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:91
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:95
SlotIndex - An opaque wrapper around machine indexes.
Definition: SlotIndexes.h:65
static bool isSameInstr(SlotIndex A, SlotIndex B)
isSameInstr - Return true if A and B refer to the same instruction.
Definition: SlotIndexes.h:176
bool isBlock() const
isBlock - Returns true if this is a block boundary slot.
Definition: SlotIndexes.h:209
SlotIndex getDeadSlot() const
Returns the dead def kill slot for the current instruction.
Definition: SlotIndexes.h:242
bool isEarlyClobber() const
isEarlyClobber - Returns true if this is an early-clobber slot.
Definition: SlotIndexes.h:212
bool isRegister() const
isRegister - Returns true if this is a normal register use/def slot.
Definition: SlotIndexes.h:216
SlotIndex getPrevSlot() const
Returns the previous slot in the index list.
Definition: SlotIndexes.h:272
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
Definition: SlotIndexes.h:237
bool isDead() const
isDead - Returns true if this is a dead def kill slot.
Definition: SlotIndexes.h:219
SlotIndexes pass.
Definition: SlotIndexes.h:297
MBBIndexIterator MBBIndexBegin() const
Returns an iterator for the begin of the idx2MBBMap.
Definition: SlotIndexes.h:505
MBBIndexIterator MBBIndexEnd() const
Return an iterator for the end of the idx2MBBMap.
Definition: SlotIndexes.h:510
SmallVectorImpl< IdxMBBPair >::const_iterator MBBIndexIterator
Iterator over the idx2MBBMap (sorted pairs of slot index of basic block begin and basic block)
Definition: SlotIndexes.h:481
size_type size() const
Definition: SmallPtrSet.h:94
bool erase(PtrType Ptr)
Remove pointer from the set.
Definition: SmallPtrSet.h:401
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:452
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:384
iterator begin() const
Definition: SmallPtrSet.h:472
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:519
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
void resize(size_type N)
Definition: SmallVector.h:638
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
Register getReg() const
MI-level Statepoint operands.
Definition: StackMaps.h:158
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
Information about stack frame layout on the target.
StackDirection getStackGrowthDirection() const
getStackGrowthDirection - Return the direction the stack grows
TargetInstrInfo - Interface to description of machine instruction set.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:77
bool hasSuperClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a super-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const RegisterBankInfo * getRegBankInfo() const
If the information for the register banks is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
std::string str() const
Return the twine contents as a std::string.
Definition: Twine.cpp:17
VNInfo - Value Number Information.
Definition: LiveInterval.h:53
bool isUnused() const
Returns true if this value is unused.
Definition: LiveInterval.h:81
unsigned id
The ID number of this value.
Definition: LiveInterval.h:58
SlotIndex def
The index of the defining instruction.
Definition: LiveInterval.h:61
bool isPHIDef() const
Returns true if this value is defined by a PHI instruction (or was, PHI instructions may have been el...
Definition: LiveInterval.h:78
LLVM Value Representation.
Definition: Value.h:74
Wrapper class representing a virtual register or register unit.
Definition: Register.h:164
constexpr bool isVirtualReg() const
Definition: Register.h:175
constexpr MCRegUnit asMCRegUnit() const
Definition: Register.h:179
constexpr Register asVirtualReg() const
Definition: Register.h:184
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:213
constexpr bool isNonZero() const
Definition: TypeSize.h:158
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:218
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:168
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:225
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:239
self_iterator getIterator()
Definition: ilist_node.h:132
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:353
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:125
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
AttributeList getAttributes(LLVMContext &C, ID id)
Return the attributes for an intrinsic.
@ OPERAND_REGISTER
Definition: MCInstrDesc.h:61
@ OPERAND_IMMEDIATE
Definition: MCInstrDesc.h:60
Reg
All possible values of the reg field in the ModR/M byte.
constexpr double e
Definition: MathExtras.h:47
NodeAddr< PhiNode * > Phi
Definition: RDFGraph.h:390
NodeAddr< DefNode * > Def
Definition: RDFGraph.h:384
NodeAddr< FuncNode * > Func
Definition: RDFGraph.h:393
const_iterator begin(StringRef path LLVM_LIFETIME_BOUND, Style style=Style::native)
Get begin iterator over path.
Definition: Path.cpp:226
const_iterator end(StringRef path LLVM_LIFETIME_BOUND)
Get end iterator over path.
Definition: Path.cpp:235
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
@ Offset
Definition: DWP.cpp:480
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1739
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1697
void initializeMachineVerifierLegacyPassPass(PassRegistry &)
@ SjLj
setjmp/longjmp based exceptions
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
Definition: TargetOpcodes.h:30
Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI)
Create Printable object to print register units on a raw_ostream.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2115
void set_subtract(S1Ty &S1, const S2Ty &S2)
set_subtract(A, B) - Compute A := A - B
Printable PrintLaneMask(LaneBitmask LaneMask)
Create Printable object to print LaneBitmasks on a raw_ostream.
Definition: LaneBitmask.h:92
bool isPreISelGenericOptimizationHint(unsigned Opcode)
Definition: TargetOpcodes.h:42
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
void verifyMachineFunction(const std::string &Banner, const MachineFunction &MF)
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:420
detail::ValueMatchesPoly< M > HasValue(M Matcher)
Definition: Error.h:221
df_ext_iterator< T, SetTy > df_ext_begin(const T &G, SetTy &S)
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1753
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
raw_ostream & nulls()
This returns a reference to a raw_ostream which simply discards output.
bool set_union(S1Ty &S1, const S2Ty &S2)
set_union(A, B) - Compute A := A u B, return whether A changed.
Definition: SetOperations.h:43
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1873
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1903
df_ext_iterator< T, SetTy > df_ext_end(const T &G, SetTy &S)
Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
FunctionPass * createMachineVerifierPass(const std::string &Banner)
createMachineVerifierPass - This pass verifies cenerated machine code instructions for correctness.
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:858
static unsigned getSizeInBits(const fltSemantics &Sem)
Returns the size of the floating point number (in bits) in the given semantics.
Definition: APFloat.cpp:370
static constexpr LaneBitmask getAll()
Definition: LaneBitmask.h:82
constexpr bool none() const
Definition: LaneBitmask.h:52
constexpr bool any() const
Definition: LaneBitmask.h:53
static constexpr LaneBitmask getNone()
Definition: LaneBitmask.h:81
This represents a simple continuous liveness interval for a value.
Definition: LiveInterval.h:162
VarInfo - This represents the regions where a virtual register is live in the program.
Definition: LiveVariables.h:78
Pair of physical register and lane mask.