clang 22.0.0git
CIRGenStmt.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Emit Stmt nodes as CIR code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenBuilder.h"
14#include "CIRGenFunction.h"
15
16#include "mlir/IR/Builders.h"
17#include "mlir/IR/Location.h"
18#include "mlir/Support/LLVM.h"
19#include "clang/AST/ExprCXX.h"
20#include "clang/AST/Stmt.h"
24
25using namespace clang;
26using namespace clang::CIRGen;
27using namespace cir;
28
29static mlir::LogicalResult emitStmtWithResult(CIRGenFunction &cgf,
30 const Stmt *exprResult,
31 AggValueSlot slot,
32 Address *lastValue) {
33 // We have to special case labels here. They are statements, but when put
34 // at the end of a statement expression, they yield the value of their
35 // subexpression. Handle this by walking through all labels we encounter,
36 // emitting them before we evaluate the subexpr.
37 // Similar issues arise for attributed statements.
38 while (!isa<Expr>(exprResult)) {
39 if (const auto *ls = dyn_cast<LabelStmt>(exprResult)) {
40 if (cgf.emitLabel(*ls->getDecl()).failed())
41 return mlir::failure();
42 exprResult = ls->getSubStmt();
43 } else if (const auto *as = dyn_cast<AttributedStmt>(exprResult)) {
44 // FIXME: Update this if we ever have attributes that affect the
45 // semantics of an expression.
46 exprResult = as->getSubStmt();
47 } else {
48 llvm_unreachable("Unknown value statement");
49 }
50 }
51
52 const Expr *e = cast<Expr>(exprResult);
53 QualType exprTy = e->getType();
54 if (cgf.hasAggregateEvaluationKind(exprTy)) {
55 cgf.emitAggExpr(e, slot);
56 } else {
57 // We can't return an RValue here because there might be cleanups at
58 // the end of the StmtExpr. Because of that, we have to emit the result
59 // here into a temporary alloca.
60 cgf.emitAnyExprToMem(e, *lastValue, Qualifiers(),
61 /*IsInit*/ false);
62 }
63
64 return mlir::success();
65}
66
68 const CompoundStmt &s, Address *lastValue, AggValueSlot slot) {
69 mlir::LogicalResult result = mlir::success();
70 const Stmt *exprResult = s.body_back();
71 assert((!lastValue || (lastValue && exprResult)) &&
72 "If lastValue is not null then the CompoundStmt must have a "
73 "StmtExprResult");
74
75 for (const Stmt *curStmt : s.body()) {
76 const bool saveResult = lastValue && exprResult == curStmt;
77 if (saveResult) {
78 if (emitStmtWithResult(*this, exprResult, slot, lastValue).failed())
79 result = mlir::failure();
80 } else {
81 if (emitStmt(curStmt, /*useCurrentScope=*/false).failed())
82 result = mlir::failure();
83 }
84 }
85 return result;
86}
87
89 Address *lastValue,
90 AggValueSlot slot) {
91 // Add local scope to track new declared variables.
93 mlir::Location scopeLoc = getLoc(s.getSourceRange());
94 mlir::OpBuilder::InsertPoint scopeInsPt;
95 cir::ScopeOp::create(
96 builder, scopeLoc,
97 [&](mlir::OpBuilder &b, mlir::Type &type, mlir::Location loc) {
98 scopeInsPt = b.saveInsertionPoint();
99 });
100 mlir::OpBuilder::InsertionGuard guard(builder);
101 builder.restoreInsertionPoint(scopeInsPt);
102 LexicalScope lexScope(*this, scopeLoc, builder.getInsertionBlock());
103 return emitCompoundStmtWithoutScope(s, lastValue, slot);
104}
105
109
110// Build CIR for a statement. useCurrentScope should be true if no new scopes
111// need to be created when finding a compound statement.
112mlir::LogicalResult CIRGenFunction::emitStmt(const Stmt *s,
113 bool useCurrentScope,
115 if (mlir::succeeded(emitSimpleStmt(s, useCurrentScope)))
116 return mlir::success();
117
118 switch (s->getStmtClass()) {
120 case Stmt::CXXCatchStmtClass:
121 case Stmt::SEHExceptStmtClass:
122 case Stmt::SEHFinallyStmtClass:
123 case Stmt::MSDependentExistsStmtClass:
124 llvm_unreachable("invalid statement class to emit generically");
125 case Stmt::BreakStmtClass:
126 case Stmt::NullStmtClass:
127 case Stmt::CompoundStmtClass:
128 case Stmt::ContinueStmtClass:
129 case Stmt::DeclStmtClass:
130 case Stmt::ReturnStmtClass:
131 llvm_unreachable("should have emitted these statements as simple");
132
133#define STMT(Type, Base)
134#define ABSTRACT_STMT(Op)
135#define EXPR(Type, Base) case Stmt::Type##Class:
136#include "clang/AST/StmtNodes.inc"
137 {
138 assert(builder.getInsertionBlock() &&
139 "expression emission must have an insertion point");
140
142
143 // Classic codegen has a check here to see if the emitter created a new
144 // block that isn't used (comparing the incoming and outgoing insertion
145 // points) and deletes the outgoing block if it's not used. In CIR, we
146 // will handle that during the cir.canonicalize pass.
147 return mlir::success();
148 }
149 case Stmt::IfStmtClass:
150 return emitIfStmt(cast<IfStmt>(*s));
151 case Stmt::SwitchStmtClass:
153 case Stmt::ForStmtClass:
154 return emitForStmt(cast<ForStmt>(*s));
155 case Stmt::WhileStmtClass:
157 case Stmt::DoStmtClass:
158 return emitDoStmt(cast<DoStmt>(*s));
159 case Stmt::CXXTryStmtClass:
161 case Stmt::CXXForRangeStmtClass:
163 case Stmt::CoroutineBodyStmtClass:
165 case Stmt::IndirectGotoStmtClass:
167 case Stmt::CoreturnStmtClass:
169 case Stmt::OpenACCComputeConstructClass:
171 case Stmt::OpenACCLoopConstructClass:
173 case Stmt::OpenACCCombinedConstructClass:
175 case Stmt::OpenACCDataConstructClass:
177 case Stmt::OpenACCEnterDataConstructClass:
179 case Stmt::OpenACCExitDataConstructClass:
181 case Stmt::OpenACCHostDataConstructClass:
183 case Stmt::OpenACCWaitConstructClass:
185 case Stmt::OpenACCInitConstructClass:
187 case Stmt::OpenACCShutdownConstructClass:
189 case Stmt::OpenACCSetConstructClass:
191 case Stmt::OpenACCUpdateConstructClass:
193 case Stmt::OpenACCCacheConstructClass:
195 case Stmt::OpenACCAtomicConstructClass:
197 case Stmt::GCCAsmStmtClass:
198 case Stmt::MSAsmStmtClass:
199 return emitAsmStmt(cast<AsmStmt>(*s));
200 case Stmt::OMPScopeDirectiveClass:
202 case Stmt::OMPErrorDirectiveClass:
204 case Stmt::OMPParallelDirectiveClass:
206 case Stmt::OMPTaskwaitDirectiveClass:
208 case Stmt::OMPTaskyieldDirectiveClass:
210 case Stmt::OMPBarrierDirectiveClass:
212 case Stmt::OMPMetaDirectiveClass:
214 case Stmt::OMPCanonicalLoopClass:
216 case Stmt::OMPSimdDirectiveClass:
218 case Stmt::OMPTileDirectiveClass:
220 case Stmt::OMPUnrollDirectiveClass:
222 case Stmt::OMPFuseDirectiveClass:
224 case Stmt::OMPForDirectiveClass:
226 case Stmt::OMPForSimdDirectiveClass:
228 case Stmt::OMPSectionsDirectiveClass:
230 case Stmt::OMPSectionDirectiveClass:
232 case Stmt::OMPSingleDirectiveClass:
234 case Stmt::OMPMasterDirectiveClass:
236 case Stmt::OMPCriticalDirectiveClass:
238 case Stmt::OMPParallelForDirectiveClass:
240 case Stmt::OMPParallelForSimdDirectiveClass:
243 case Stmt::OMPParallelMasterDirectiveClass:
245 case Stmt::OMPParallelSectionsDirectiveClass:
248 case Stmt::OMPTaskDirectiveClass:
250 case Stmt::OMPTaskgroupDirectiveClass:
252 case Stmt::OMPFlushDirectiveClass:
254 case Stmt::OMPDepobjDirectiveClass:
256 case Stmt::OMPScanDirectiveClass:
258 case Stmt::OMPOrderedDirectiveClass:
260 case Stmt::OMPAtomicDirectiveClass:
262 case Stmt::OMPTargetDirectiveClass:
264 case Stmt::OMPTeamsDirectiveClass:
266 case Stmt::OMPCancellationPointDirectiveClass:
269 case Stmt::OMPCancelDirectiveClass:
271 case Stmt::OMPTargetDataDirectiveClass:
273 case Stmt::OMPTargetEnterDataDirectiveClass:
276 case Stmt::OMPTargetExitDataDirectiveClass:
278 case Stmt::OMPTargetParallelDirectiveClass:
280 case Stmt::OMPTargetParallelForDirectiveClass:
283 case Stmt::OMPTaskLoopDirectiveClass:
285 case Stmt::OMPTaskLoopSimdDirectiveClass:
287 case Stmt::OMPMaskedTaskLoopDirectiveClass:
289 case Stmt::OMPMaskedTaskLoopSimdDirectiveClass:
292 case Stmt::OMPMasterTaskLoopDirectiveClass:
294 case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
297 case Stmt::OMPParallelGenericLoopDirectiveClass:
300 case Stmt::OMPParallelMaskedDirectiveClass:
302 case Stmt::OMPParallelMaskedTaskLoopDirectiveClass:
305 case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass:
308 case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
311 case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
314 case Stmt::OMPDistributeDirectiveClass:
316 case Stmt::OMPDistributeParallelForDirectiveClass:
319 case Stmt::OMPDistributeParallelForSimdDirectiveClass:
322 case Stmt::OMPDistributeSimdDirectiveClass:
324 case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
327 case Stmt::OMPTargetParallelForSimdDirectiveClass:
330 case Stmt::OMPTargetSimdDirectiveClass:
332 case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
335 case Stmt::OMPTargetUpdateDirectiveClass:
337 case Stmt::OMPTeamsDistributeDirectiveClass:
340 case Stmt::OMPTeamsDistributeSimdDirectiveClass:
343 case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
346 case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
349 case Stmt::OMPTeamsGenericLoopDirectiveClass:
352 case Stmt::OMPTargetTeamsDirectiveClass:
354 case Stmt::OMPTargetTeamsDistributeDirectiveClass:
357 case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
360 case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
363 case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
366 case Stmt::OMPInteropDirectiveClass:
368 case Stmt::OMPDispatchDirectiveClass:
370 case Stmt::OMPGenericLoopDirectiveClass:
372 case Stmt::OMPReverseDirectiveClass:
374 case Stmt::OMPInterchangeDirectiveClass:
376 case Stmt::OMPAssumeDirectiveClass:
378 case Stmt::OMPMaskedDirectiveClass:
380 case Stmt::OMPStripeDirectiveClass:
382 case Stmt::LabelStmtClass:
383 case Stmt::AttributedStmtClass:
384 case Stmt::GotoStmtClass:
385 case Stmt::DefaultStmtClass:
386 case Stmt::CaseStmtClass:
387 case Stmt::SEHLeaveStmtClass:
388 case Stmt::SYCLKernelCallStmtClass:
389 case Stmt::CapturedStmtClass:
390 case Stmt::ObjCAtTryStmtClass:
391 case Stmt::ObjCAtThrowStmtClass:
392 case Stmt::ObjCAtSynchronizedStmtClass:
393 case Stmt::ObjCForCollectionStmtClass:
394 case Stmt::ObjCAutoreleasePoolStmtClass:
395 case Stmt::SEHTryStmtClass:
396 case Stmt::ObjCAtCatchStmtClass:
397 case Stmt::ObjCAtFinallyStmtClass:
398 case Stmt::DeferStmtClass:
399 cgm.errorNYI(s->getSourceRange(),
400 std::string("emitStmt: ") + s->getStmtClassName());
401 return mlir::failure();
402 }
403
404 llvm_unreachable("Unexpected statement class");
405}
406
407mlir::LogicalResult CIRGenFunction::emitSimpleStmt(const Stmt *s,
408 bool useCurrentScope) {
409 switch (s->getStmtClass()) {
410 default:
411 return mlir::failure();
412 case Stmt::DeclStmtClass:
413 return emitDeclStmt(cast<DeclStmt>(*s));
414 case Stmt::CompoundStmtClass:
415 if (useCurrentScope)
418 case Stmt::GotoStmtClass:
419 return emitGotoStmt(cast<GotoStmt>(*s));
420 case Stmt::ContinueStmtClass:
422
423 // NullStmt doesn't need any handling, but we need to say we handled it.
424 case Stmt::NullStmtClass:
425 break;
426
427 case Stmt::LabelStmtClass:
429 case Stmt::CaseStmtClass:
430 case Stmt::DefaultStmtClass:
431 // If we reached here, we must not handling a switch case in the top level.
433 /*buildingTopLevelCase=*/false);
434 break;
435
436 case Stmt::BreakStmtClass:
438 case Stmt::ReturnStmtClass:
440 }
441
442 return mlir::success();
443}
444
446
447 if (emitLabel(*s.getDecl()).failed())
448 return mlir::failure();
449
450 if (getContext().getLangOpts().EHAsynch && s.isSideEntry())
451 getCIRGenModule().errorNYI(s.getSourceRange(), "IsEHa: not implemented.");
452
453 return emitStmt(s.getSubStmt(), /*useCurrentScope*/ true);
454}
455
456// Add a terminating yield on a body region if no other terminators are used.
457static void terminateBody(CIRGenBuilderTy &builder, mlir::Region &r,
458 mlir::Location loc) {
459 if (r.empty())
460 return;
461
463 unsigned numBlocks = r.getBlocks().size();
464 for (auto &block : r.getBlocks()) {
465 // Already cleanup after return operations, which might create
466 // empty blocks if emitted as last stmt.
467 if (numBlocks != 1 && block.empty() && block.hasNoPredecessors() &&
468 block.hasNoSuccessors())
469 eraseBlocks.push_back(&block);
470
471 if (block.empty() ||
472 !block.back().hasTrait<mlir::OpTrait::IsTerminator>()) {
473 mlir::OpBuilder::InsertionGuard guardCase(builder);
474 builder.setInsertionPointToEnd(&block);
475 builder.createYield(loc);
476 }
477 }
478
479 for (auto *b : eraseBlocks)
480 b->erase();
481}
482
483mlir::LogicalResult CIRGenFunction::emitIfStmt(const IfStmt &s) {
484 mlir::LogicalResult res = mlir::success();
485 // The else branch of a consteval if statement is always the only branch
486 // that can be runtime evaluated.
487 const Stmt *constevalExecuted;
488 if (s.isConsteval()) {
489 constevalExecuted = s.isNegatedConsteval() ? s.getThen() : s.getElse();
490 if (!constevalExecuted) {
491 // No runtime code execution required
492 return res;
493 }
494 }
495
496 // C99 6.8.4.1: The first substatement is executed if the expression
497 // compares unequal to 0. The condition must be a scalar type.
498 auto ifStmtBuilder = [&]() -> mlir::LogicalResult {
499 if (s.isConsteval())
500 return emitStmt(constevalExecuted, /*useCurrentScope=*/true);
501
502 if (s.getInit())
503 if (emitStmt(s.getInit(), /*useCurrentScope=*/true).failed())
504 return mlir::failure();
505
506 if (s.getConditionVariable())
507 emitDecl(*s.getConditionVariable());
508
509 // If the condition folds to a constant and this is an 'if constexpr',
510 // we simplify it early in CIRGen to avoid emitting the full 'if'.
511 bool condConstant;
512 if (constantFoldsToBool(s.getCond(), condConstant, s.isConstexpr())) {
513 if (s.isConstexpr()) {
514 // Handle "if constexpr" explicitly here to avoid generating some
515 // ill-formed code since in CIR the "if" is no longer simplified
516 // in this lambda like in Clang but postponed to other MLIR
517 // passes.
518 if (const Stmt *executed = condConstant ? s.getThen() : s.getElse())
519 return emitStmt(executed, /*useCurrentScope=*/true);
520 // There is nothing to execute at runtime.
521 // TODO(cir): there is still an empty cir.scope generated by the caller.
522 return mlir::success();
523 }
524 }
525
528 return emitIfOnBoolExpr(s.getCond(), s.getThen(), s.getElse());
529 };
530
531 // TODO: Add a new scoped symbol table.
532 // LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
533 // The if scope contains the full source range for IfStmt.
534 mlir::Location scopeLoc = getLoc(s.getSourceRange());
535 cir::ScopeOp::create(builder, scopeLoc, /*scopeBuilder=*/
536 [&](mlir::OpBuilder &b, mlir::Location loc) {
537 LexicalScope lexScope{*this, scopeLoc,
538 builder.getInsertionBlock()};
539 res = ifStmtBuilder();
540 });
541
542 return res;
543}
544
545mlir::LogicalResult CIRGenFunction::emitDeclStmt(const DeclStmt &s) {
546 assert(builder.getInsertionBlock() && "expected valid insertion point");
547
548 for (const Decl *i : s.decls())
549 emitDecl(*i, /*evaluateConditionDecl=*/true);
550
551 return mlir::success();
552}
553
554mlir::LogicalResult CIRGenFunction::emitReturnStmt(const ReturnStmt &s) {
555 mlir::Location loc = getLoc(s.getSourceRange());
556 const Expr *rv = s.getRetValue();
557
558 RunCleanupsScope cleanupScope(*this);
559 bool createNewScope = false;
560 if (const auto *ewc = dyn_cast_or_null<ExprWithCleanups>(rv)) {
561 rv = ewc->getSubExpr();
562 createNewScope = true;
563 }
564
565 auto handleReturnVal = [&]() {
566 if (getContext().getLangOpts().ElideConstructors && s.getNRVOCandidate() &&
567 s.getNRVOCandidate()->isNRVOVariable()) {
569 // Apply the named return value optimization for this return statement,
570 // which means doing nothing: the appropriate result has already been
571 // constructed into the NRVO variable.
572
573 // If there is an NRVO flag for this variable, set it to 1 into indicate
574 // that the cleanup code should not destroy the variable.
575 if (auto nrvoFlag = nrvoFlags[s.getNRVOCandidate()])
576 builder.createFlagStore(loc, true, nrvoFlag);
577 } else if (!rv) {
578 // No return expression. Do nothing.
579 } else if (rv->getType()->isVoidType()) {
580 // Make sure not to return anything, but evaluate the expression
581 // for side effects.
582 if (rv) {
583 emitAnyExpr(rv);
584 }
585 } else if (cast<FunctionDecl>(curGD.getDecl())
586 ->getReturnType()
587 ->isReferenceType()) {
588 // If this function returns a reference, take the address of the
589 // expression rather than the value.
591 builder.CIRBaseBuilderTy::createStore(loc, result.getValue(),
592 *fnRetAlloca);
593 } else {
594 mlir::Value value = nullptr;
596 case cir::TEK_Scalar:
597 value = emitScalarExpr(rv);
598 if (value) { // Change this to an assert once emitScalarExpr is complete
599 builder.CIRBaseBuilderTy::createStore(loc, value, *fnRetAlloca);
600 }
601 break;
602 case cir::TEK_Complex:
605 /*isInit=*/true);
606 break;
613 break;
614 }
615 }
616 };
617
618 if (!createNewScope) {
619 handleReturnVal();
620 } else {
621 mlir::Location scopeLoc =
622 getLoc(rv ? rv->getSourceRange() : s.getSourceRange());
623 // First create cir.scope and later emit it's body. Otherwise all CIRGen
624 // dispatched by `handleReturnVal()` might needs to manipulate blocks and
625 // look into parents, which are all unlinked.
626 mlir::OpBuilder::InsertPoint scopeBody;
627 cir::ScopeOp::create(builder, scopeLoc, /*scopeBuilder=*/
628 [&](mlir::OpBuilder &b, mlir::Location loc) {
629 scopeBody = b.saveInsertionPoint();
630 });
631 {
632 mlir::OpBuilder::InsertionGuard guard(builder);
633 builder.restoreInsertionPoint(scopeBody);
634 CIRGenFunction::LexicalScope lexScope{*this, scopeLoc,
635 builder.getInsertionBlock()};
636 handleReturnVal();
637 }
638 }
639
640 cleanupScope.forceCleanup();
641
642 // In CIR we might have returns in different scopes.
643 // FIXME(cir): cleanup code is handling actual return emission, the logic
644 // should try to match traditional codegen more closely (to the extent which
645 // is possible).
646 auto *retBlock = curLexScope->getOrCreateRetBlock(*this, loc);
648
649 // Insert the new block to continue codegen after branch to ret block.
650 builder.createBlock(builder.getBlock()->getParent());
651
652 return mlir::success();
653}
654
655mlir::LogicalResult CIRGenFunction::emitGotoStmt(const clang::GotoStmt &s) {
656 // FIXME: LLVM codegen inserts emit a stop point here for debug info
657 // sake when the insertion point is available, but doesn't do
658 // anything special when there isn't. We haven't implemented debug
659 // info support just yet, look at this again once we have it.
661
662 cir::GotoOp::create(builder, getLoc(s.getSourceRange()),
663 s.getLabel()->getName());
664
665 // A goto marks the end of a block, create a new one for codegen after
666 // emitGotoStmt can resume building in that block.
667 // Insert the new block to continue codegen after goto.
668 builder.createBlock(builder.getBlock()->getParent());
669
670 return mlir::success();
671}
672
673mlir::LogicalResult
675 mlir::Value val = emitScalarExpr(s.getTarget());
676 assert(indirectGotoBlock &&
677 "If you jumping to a indirect branch should be alareadye emitted");
678 cir::BrOp::create(builder, getLoc(s.getSourceRange()), indirectGotoBlock,
679 val);
680 builder.createBlock(builder.getBlock()->getParent());
681 return mlir::success();
682}
683
684mlir::LogicalResult
686 builder.createContinue(getLoc(s.getKwLoc()));
687
688 // Insert the new block to continue codegen after the continue statement.
689 builder.createBlock(builder.getBlock()->getParent());
690
691 return mlir::success();
692}
693
694mlir::LogicalResult CIRGenFunction::emitLabel(const clang::LabelDecl &d) {
695 // Create a new block to tag with a label and add a branch from
696 // the current one to it. If the block is empty just call attach it
697 // to this label.
698 mlir::Block *currBlock = builder.getBlock();
699 mlir::Block *labelBlock = currBlock;
700
701 if (!currBlock->empty() || currBlock->isEntryBlock()) {
702 {
703 mlir::OpBuilder::InsertionGuard guard(builder);
704 labelBlock = builder.createBlock(builder.getBlock()->getParent());
705 }
706 cir::BrOp::create(builder, getLoc(d.getSourceRange()), labelBlock);
707 }
708
709 builder.setInsertionPointToEnd(labelBlock);
710 cir::LabelOp label =
711 cir::LabelOp::create(builder, getLoc(d.getSourceRange()), d.getName());
712 builder.setInsertionPointToEnd(labelBlock);
713 auto func = cast<cir::FuncOp>(curFn);
714 cgm.mapBlockAddress(cir::BlockAddrInfoAttr::get(builder.getContext(),
715 func.getSymNameAttr(),
716 label.getLabelAttr()),
717 label);
718 // FIXME: emit debug info for labels, incrementProfileCounter
722 return mlir::success();
723}
724
726 builder.createBreak(getLoc(s.getKwLoc()));
727
728 // Insert the new block to continue codegen after the break statement.
729 builder.createBlock(builder.getBlock()->getParent());
730
731 return mlir::success();
732}
733
734template <typename T>
735mlir::LogicalResult
737 mlir::ArrayAttr value, CaseOpKind kind,
738 bool buildingTopLevelCase) {
739
741 "only case or default stmt go here");
742
743 mlir::LogicalResult result = mlir::success();
744
745 mlir::Location loc = getLoc(stmt->getBeginLoc());
746
747 enum class SubStmtKind { Case, Default, Other };
748 SubStmtKind subStmtKind = SubStmtKind::Other;
749 const Stmt *sub = stmt->getSubStmt();
750
751 mlir::OpBuilder::InsertPoint insertPoint;
752 CaseOp::create(builder, loc, value, kind, insertPoint);
753
754 {
755 mlir::OpBuilder::InsertionGuard guardSwitch(builder);
756 builder.restoreInsertionPoint(insertPoint);
757
758 if (isa<DefaultStmt>(sub) && isa<CaseStmt>(stmt)) {
759 subStmtKind = SubStmtKind::Default;
760 builder.createYield(loc);
761 } else if (isa<CaseStmt>(sub) && isa<DefaultStmt, CaseStmt>(stmt)) {
762 subStmtKind = SubStmtKind::Case;
763 builder.createYield(loc);
764 } else {
765 result = emitStmt(sub, /*useCurrentScope=*/!isa<CompoundStmt>(sub));
766 }
767
768 insertPoint = builder.saveInsertionPoint();
769 }
770
771 // If the substmt is default stmt or case stmt, try to handle the special case
772 // to make it into the simple form. e.g.
773 //
774 // swtich () {
775 // case 1:
776 // default:
777 // ...
778 // }
779 //
780 // we prefer generating
781 //
782 // cir.switch() {
783 // cir.case(equal, 1) {
784 // cir.yield
785 // }
786 // cir.case(default) {
787 // ...
788 // }
789 // }
790 //
791 // than
792 //
793 // cir.switch() {
794 // cir.case(equal, 1) {
795 // cir.case(default) {
796 // ...
797 // }
798 // }
799 // }
800 //
801 // We don't need to revert this if we find the current switch can't be in
802 // simple form later since the conversion itself should be harmless.
803 if (subStmtKind == SubStmtKind::Case) {
804 result = emitCaseStmt(*cast<CaseStmt>(sub), condType, buildingTopLevelCase);
805 } else if (subStmtKind == SubStmtKind::Default) {
806 result = emitDefaultStmt(*cast<DefaultStmt>(sub), condType,
807 buildingTopLevelCase);
808 } else if (buildingTopLevelCase) {
809 // If we're building a top level case, try to restore the insert point to
810 // the case we're building, then we can attach more random stmts to the
811 // case to make generating `cir.switch` operation to be a simple form.
812 builder.restoreInsertionPoint(insertPoint);
813 }
814
815 return result;
816}
817
818mlir::LogicalResult CIRGenFunction::emitCaseStmt(const CaseStmt &s,
819 mlir::Type condType,
820 bool buildingTopLevelCase) {
821 cir::CaseOpKind kind;
822 mlir::ArrayAttr value;
823 llvm::APSInt intVal = s.getLHS()->EvaluateKnownConstInt(getContext());
824
825 // If the case statement has an RHS value, it is representing a GNU
826 // case range statement, where LHS is the beginning of the range
827 // and RHS is the end of the range.
828 if (const Expr *rhs = s.getRHS()) {
829 llvm::APSInt endVal = rhs->EvaluateKnownConstInt(getContext());
830 value = builder.getArrayAttr({cir::IntAttr::get(condType, intVal),
831 cir::IntAttr::get(condType, endVal)});
832 kind = cir::CaseOpKind::Range;
833 } else {
834 value = builder.getArrayAttr({cir::IntAttr::get(condType, intVal)});
835 kind = cir::CaseOpKind::Equal;
836 }
837
838 return emitCaseDefaultCascade(&s, condType, value, kind,
839 buildingTopLevelCase);
840}
841
843 mlir::Type condType,
844 bool buildingTopLevelCase) {
845 return emitCaseDefaultCascade(&s, condType, builder.getArrayAttr({}),
846 cir::CaseOpKind::Default, buildingTopLevelCase);
847}
848
849mlir::LogicalResult CIRGenFunction::emitSwitchCase(const SwitchCase &s,
850 bool buildingTopLevelCase) {
851 assert(!condTypeStack.empty() &&
852 "build switch case without specifying the type of the condition");
853
854 if (s.getStmtClass() == Stmt::CaseStmtClass)
856 buildingTopLevelCase);
857
858 if (s.getStmtClass() == Stmt::DefaultStmtClass)
860 buildingTopLevelCase);
861
862 llvm_unreachable("expect case or default stmt");
863}
864
865mlir::LogicalResult
867 ArrayRef<const Attr *> forAttrs) {
868 cir::ForOp forOp;
869
870 // TODO(cir): pass in array of attributes.
871 auto forStmtBuilder = [&]() -> mlir::LogicalResult {
872 mlir::LogicalResult loopRes = mlir::success();
873 // Evaluate the first pieces before the loop.
874 if (s.getInit())
875 if (emitStmt(s.getInit(), /*useCurrentScope=*/true).failed())
876 return mlir::failure();
877 if (emitStmt(s.getRangeStmt(), /*useCurrentScope=*/true).failed())
878 return mlir::failure();
879 if (emitStmt(s.getBeginStmt(), /*useCurrentScope=*/true).failed())
880 return mlir::failure();
881 if (emitStmt(s.getEndStmt(), /*useCurrentScope=*/true).failed())
882 return mlir::failure();
883
885 // From LLVM: if there are any cleanups between here and the loop-exit
886 // scope, create a block to stage a loop exit along.
887 // We probably already do the right thing because of ScopeOp, but make
888 // sure we handle all cases.
890
891 forOp = builder.createFor(
892 getLoc(s.getSourceRange()),
893 /*condBuilder=*/
894 [&](mlir::OpBuilder &b, mlir::Location loc) {
895 assert(!cir::MissingFeatures::createProfileWeightsForLoop());
896 assert(!cir::MissingFeatures::emitCondLikelihoodViaExpectIntrinsic());
897 mlir::Value condVal = evaluateExprAsBool(s.getCond());
898 builder.createCondition(condVal);
899 },
900 /*bodyBuilder=*/
901 [&](mlir::OpBuilder &b, mlir::Location loc) {
902 // https://en.cppreference.com/w/cpp/language/for
903 // In C++ the scope of the init-statement and the scope of
904 // statement are one and the same.
905 bool useCurrentScope = true;
906 if (emitStmt(s.getLoopVarStmt(), useCurrentScope).failed())
907 loopRes = mlir::failure();
908 if (emitStmt(s.getBody(), useCurrentScope).failed())
909 loopRes = mlir::failure();
910 emitStopPoint(&s);
911 },
912 /*stepBuilder=*/
913 [&](mlir::OpBuilder &b, mlir::Location loc) {
914 if (s.getInc())
915 if (emitStmt(s.getInc(), /*useCurrentScope=*/true).failed())
916 loopRes = mlir::failure();
917 builder.createYield(loc);
918 });
919 return loopRes;
920 };
921
922 mlir::LogicalResult res = mlir::success();
923 mlir::Location scopeLoc = getLoc(s.getSourceRange());
924 cir::ScopeOp::create(builder, scopeLoc, /*scopeBuilder=*/
925 [&](mlir::OpBuilder &b, mlir::Location loc) {
926 // Create a cleanup scope for the condition
927 // variable cleanups. Logical equivalent from
928 // LLVM codegn for LexicalScope
929 // ConditionScope(*this, S.getSourceRange())...
930 LexicalScope lexScope{*this, loc,
931 builder.getInsertionBlock()};
932 res = forStmtBuilder();
933 });
934
935 if (res.failed())
936 return res;
937
938 terminateBody(builder, forOp.getBody(), getLoc(s.getEndLoc()));
939 return mlir::success();
940}
941
942mlir::LogicalResult CIRGenFunction::emitForStmt(const ForStmt &s) {
943 cir::ForOp forOp;
944
945 // TODO: pass in an array of attributes.
946 auto forStmtBuilder = [&]() -> mlir::LogicalResult {
947 mlir::LogicalResult loopRes = mlir::success();
948 // Evaluate the first part before the loop.
949 if (s.getInit())
950 if (emitStmt(s.getInit(), /*useCurrentScope=*/true).failed())
951 return mlir::failure();
953 // In the classic codegen, if there are any cleanups between here and the
954 // loop-exit scope, a block is created to stage the loop exit. We probably
955 // already do the right thing because of ScopeOp, but we need more testing
956 // to be sure we handle all cases.
958
959 forOp = builder.createFor(
960 getLoc(s.getSourceRange()),
961 /*condBuilder=*/
962 [&](mlir::OpBuilder &b, mlir::Location loc) {
963 assert(!cir::MissingFeatures::createProfileWeightsForLoop());
964 assert(!cir::MissingFeatures::emitCondLikelihoodViaExpectIntrinsic());
965 mlir::Value condVal;
966 if (s.getCond()) {
967 // If the for statement has a condition scope,
968 // emit the local variable declaration.
969 if (s.getConditionVariable())
970 emitDecl(*s.getConditionVariable());
971 // C99 6.8.5p2/p4: The first substatement is executed if the
972 // expression compares unequal to 0. The condition must be a
973 // scalar type.
974 condVal = evaluateExprAsBool(s.getCond());
975 } else {
976 condVal = cir::ConstantOp::create(b, loc, builder.getTrueAttr());
977 }
978 builder.createCondition(condVal);
979 },
980 /*bodyBuilder=*/
981 [&](mlir::OpBuilder &b, mlir::Location loc) {
982 // The scope of the for loop body is nested within the scope of the
983 // for loop's init-statement and condition.
984 if (emitStmt(s.getBody(), /*useCurrentScope=*/false).failed())
985 loopRes = mlir::failure();
987 },
988 /*stepBuilder=*/
989 [&](mlir::OpBuilder &b, mlir::Location loc) {
990 if (s.getInc())
991 if (emitStmt(s.getInc(), /*useCurrentScope=*/true).failed())
992 loopRes = mlir::failure();
993 builder.createYield(loc);
994 });
995 return loopRes;
996 };
997
998 auto res = mlir::success();
999 auto scopeLoc = getLoc(s.getSourceRange());
1000 cir::ScopeOp::create(builder, scopeLoc, /*scopeBuilder=*/
1001 [&](mlir::OpBuilder &b, mlir::Location loc) {
1002 LexicalScope lexScope{*this, loc,
1003 builder.getInsertionBlock()};
1004 res = forStmtBuilder();
1005 });
1006
1007 if (res.failed())
1008 return res;
1009
1010 terminateBody(builder, forOp.getBody(), getLoc(s.getEndLoc()));
1011 return mlir::success();
1012}
1013
1014mlir::LogicalResult CIRGenFunction::emitDoStmt(const DoStmt &s) {
1015 cir::DoWhileOp doWhileOp;
1016
1017 // TODO: pass in array of attributes.
1018 auto doStmtBuilder = [&]() -> mlir::LogicalResult {
1019 mlir::LogicalResult loopRes = mlir::success();
1021 // From LLVM: if there are any cleanups between here and the loop-exit
1022 // scope, create a block to stage a loop exit along.
1023 // We probably already do the right thing because of ScopeOp, but make
1024 // sure we handle all cases.
1026
1027 doWhileOp = builder.createDoWhile(
1028 getLoc(s.getSourceRange()),
1029 /*condBuilder=*/
1030 [&](mlir::OpBuilder &b, mlir::Location loc) {
1031 assert(!cir::MissingFeatures::createProfileWeightsForLoop());
1032 assert(!cir::MissingFeatures::emitCondLikelihoodViaExpectIntrinsic());
1033 // C99 6.8.5p2/p4: The first substatement is executed if the
1034 // expression compares unequal to 0. The condition must be a
1035 // scalar type.
1036 mlir::Value condVal = evaluateExprAsBool(s.getCond());
1037 builder.createCondition(condVal);
1038 },
1039 /*bodyBuilder=*/
1040 [&](mlir::OpBuilder &b, mlir::Location loc) {
1041 // The scope of the do-while loop body is a nested scope.
1042 if (emitStmt(s.getBody(), /*useCurrentScope=*/false).failed())
1043 loopRes = mlir::failure();
1044 emitStopPoint(&s);
1045 });
1046 return loopRes;
1047 };
1048
1049 mlir::LogicalResult res = mlir::success();
1050 mlir::Location scopeLoc = getLoc(s.getSourceRange());
1051 cir::ScopeOp::create(builder, scopeLoc, /*scopeBuilder=*/
1052 [&](mlir::OpBuilder &b, mlir::Location loc) {
1053 LexicalScope lexScope{*this, loc,
1054 builder.getInsertionBlock()};
1055 res = doStmtBuilder();
1056 });
1057
1058 if (res.failed())
1059 return res;
1060
1061 terminateBody(builder, doWhileOp.getBody(), getLoc(s.getEndLoc()));
1062 return mlir::success();
1063}
1064
1065mlir::LogicalResult CIRGenFunction::emitWhileStmt(const WhileStmt &s) {
1066 cir::WhileOp whileOp;
1067
1068 // TODO: pass in array of attributes.
1069 auto whileStmtBuilder = [&]() -> mlir::LogicalResult {
1070 mlir::LogicalResult loopRes = mlir::success();
1072 // From LLVM: if there are any cleanups between here and the loop-exit
1073 // scope, create a block to stage a loop exit along.
1074 // We probably already do the right thing because of ScopeOp, but make
1075 // sure we handle all cases.
1077
1078 whileOp = builder.createWhile(
1079 getLoc(s.getSourceRange()),
1080 /*condBuilder=*/
1081 [&](mlir::OpBuilder &b, mlir::Location loc) {
1082 assert(!cir::MissingFeatures::createProfileWeightsForLoop());
1083 assert(!cir::MissingFeatures::emitCondLikelihoodViaExpectIntrinsic());
1084 mlir::Value condVal;
1085 // If the for statement has a condition scope,
1086 // emit the local variable declaration.
1087 if (s.getConditionVariable())
1088 emitDecl(*s.getConditionVariable());
1089 // C99 6.8.5p2/p4: The first substatement is executed if the
1090 // expression compares unequal to 0. The condition must be a
1091 // scalar type.
1092 condVal = evaluateExprAsBool(s.getCond());
1093 builder.createCondition(condVal);
1094 },
1095 /*bodyBuilder=*/
1096 [&](mlir::OpBuilder &b, mlir::Location loc) {
1097 // The scope of the while loop body is a nested scope.
1098 if (emitStmt(s.getBody(), /*useCurrentScope=*/false).failed())
1099 loopRes = mlir::failure();
1100 emitStopPoint(&s);
1101 });
1102 return loopRes;
1103 };
1104
1105 mlir::LogicalResult res = mlir::success();
1106 mlir::Location scopeLoc = getLoc(s.getSourceRange());
1107 cir::ScopeOp::create(builder, scopeLoc, /*scopeBuilder=*/
1108 [&](mlir::OpBuilder &b, mlir::Location loc) {
1109 LexicalScope lexScope{*this, loc,
1110 builder.getInsertionBlock()};
1111 res = whileStmtBuilder();
1112 });
1113
1114 if (res.failed())
1115 return res;
1116
1117 terminateBody(builder, whileOp.getBody(), getLoc(s.getEndLoc()));
1118 return mlir::success();
1119}
1120
1121mlir::LogicalResult CIRGenFunction::emitSwitchBody(const Stmt *s) {
1122 // It is rare but legal if the switch body is not a compound stmt. e.g.,
1123 //
1124 // switch(a)
1125 // while(...) {
1126 // case1
1127 // ...
1128 // case2
1129 // ...
1130 // }
1131 if (!isa<CompoundStmt>(s))
1132 return emitStmt(s, /*useCurrentScope=*/true);
1133
1135
1136 mlir::Block *swtichBlock = builder.getBlock();
1137 for (auto *c : compoundStmt->body()) {
1138 if (auto *switchCase = dyn_cast<SwitchCase>(c)) {
1139 builder.setInsertionPointToEnd(swtichBlock);
1140 // Reset insert point automatically, so that we can attach following
1141 // random stmt to the region of previous built case op to try to make
1142 // the being generated `cir.switch` to be in simple form.
1143 if (mlir::failed(
1144 emitSwitchCase(*switchCase, /*buildingTopLevelCase=*/true)))
1145 return mlir::failure();
1146
1147 continue;
1148 }
1149
1150 // Otherwise, just build the statements in the nearest case region.
1151 if (mlir::failed(emitStmt(c, /*useCurrentScope=*/!isa<CompoundStmt>(c))))
1152 return mlir::failure();
1153 }
1154
1155 return mlir::success();
1156}
1157
1159 // TODO: LLVM codegen does some early optimization to fold the condition and
1160 // only emit live cases. CIR should use MLIR to achieve similar things,
1161 // nothing to be done here.
1162 // if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue))...
1164
1165 SwitchOp swop;
1166 auto switchStmtBuilder = [&]() -> mlir::LogicalResult {
1167 if (s.getInit())
1168 if (emitStmt(s.getInit(), /*useCurrentScope=*/true).failed())
1169 return mlir::failure();
1170
1171 if (s.getConditionVariable())
1172 emitDecl(*s.getConditionVariable(), /*evaluateConditionDecl=*/true);
1173
1174 mlir::Value condV = emitScalarExpr(s.getCond());
1175
1176 // TODO: PGO and likelihood (e.g. PGO.haveRegionCounts())
1179 // TODO: if the switch has a condition wrapped by __builtin_unpredictable?
1181
1182 mlir::LogicalResult res = mlir::success();
1183 swop = SwitchOp::create(
1184 builder, getLoc(s.getBeginLoc()), condV,
1185 /*switchBuilder=*/
1186 [&](mlir::OpBuilder &b, mlir::Location loc, mlir::OperationState &os) {
1187 curLexScope->setAsSwitch();
1188
1189 condTypeStack.push_back(condV.getType());
1190
1191 res = emitSwitchBody(s.getBody());
1192
1193 condTypeStack.pop_back();
1194 });
1195
1196 return res;
1197 };
1198
1199 // The switch scope contains the full source range for SwitchStmt.
1200 mlir::Location scopeLoc = getLoc(s.getSourceRange());
1201 mlir::LogicalResult res = mlir::success();
1202 cir::ScopeOp::create(builder, scopeLoc, /*scopeBuilder=*/
1203 [&](mlir::OpBuilder &b, mlir::Location loc) {
1204 LexicalScope lexScope{*this, loc,
1205 builder.getInsertionBlock()};
1206 res = switchStmtBuilder();
1207 });
1208
1210 swop.collectCases(cases);
1211 for (auto caseOp : cases)
1212 terminateBody(builder, caseOp.getCaseRegion(), caseOp.getLoc());
1213 terminateBody(builder, swop.getBody(), swop.getLoc());
1214
1215 swop.setAllEnumCasesCovered(s.isAllEnumCasesCovered());
1216
1217 return res;
1218}
1219
1220void CIRGenFunction::emitReturnOfRValue(mlir::Location loc, RValue rv,
1221 QualType ty) {
1222 if (rv.isScalar()) {
1223 builder.createStore(loc, rv.getValue(), returnValue);
1224 } else if (rv.isAggregate()) {
1225 LValue dest = makeAddrLValue(returnValue, ty);
1228 } else {
1229 cgm.errorNYI(loc, "emitReturnOfRValue: complex return type");
1230 }
1231 mlir::Block *retBlock = curLexScope->getOrCreateRetBlock(*this, loc);
1233 cir::BrOp::create(builder, loc, retBlock);
1234 if (ehStack.stable_begin() != currentCleanupStackDepth)
1235 cgm.errorNYI(loc, "return of r-value with cleanup stack");
1236}
static void terminateBody(CIRGenBuilderTy &builder, mlir::Region &r, mlir::Location loc)
static mlir::LogicalResult emitStmtWithResult(CIRGenFunction &cgf, const Stmt *exprResult, AggValueSlot slot, Address *lastValue)
Defines the clang::Expr interface and subclasses for C++ expressions.
This file defines OpenACC AST classes for statement-level contructs.
This file defines OpenMP AST classes for executable directives and clauses.
__device__ __2f16 b
__device__ __2f16 float __ockl_bool s
__device__ __2f16 float c
cir::YieldOp createYield(mlir::Location loc, mlir::ValueRange value={})
Create a yield operation.
BreakStmt - This represents a break.
Definition Stmt.h:3126
An aggregate value slot.
static AggValueSlot forAddr(Address addr, clang::Qualifiers quals, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited.
void forceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
mlir::LogicalResult emitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &s)
mlir::LogicalResult emitOMPParallelMasterTaskLoopSimdDirective(const OMPParallelMasterTaskLoopSimdDirective &s)
mlir::LogicalResult emitOMPSimdDirective(const OMPSimdDirective &s)
mlir::LogicalResult emitDoStmt(const clang::DoStmt &s)
mlir::LogicalResult emitOMPCriticalDirective(const OMPCriticalDirective &s)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
clang::GlobalDecl curGD
The GlobalDecl for the current function being compiled or the global variable currently being initial...
mlir::LogicalResult emitCoreturnStmt(const CoreturnStmt &s)
mlir::LogicalResult emitOpenACCDataConstruct(const OpenACCDataConstruct &s)
mlir::LogicalResult emitOpenACCCombinedConstruct(const OpenACCCombinedConstruct &s)
mlir::LogicalResult emitOMPParallelMasterDirective(const OMPParallelMasterDirective &s)
mlir::LogicalResult emitOpenACCWaitConstruct(const OpenACCWaitConstruct &s)
mlir::LogicalResult emitOMPCancellationPointDirective(const OMPCancellationPointDirective &s)
mlir::LogicalResult emitOMPParallelMaskedTaskLoopDirective(const OMPParallelMaskedTaskLoopDirective &s)
mlir::LogicalResult emitOMPReverseDirective(const OMPReverseDirective &s)
const clang::LangOptions & getLangOpts() const
mlir::LogicalResult emitOpenACCUpdateConstruct(const OpenACCUpdateConstruct &s)
mlir::LogicalResult emitOMPTileDirective(const OMPTileDirective &s)
mlir::LogicalResult emitIfOnBoolExpr(const clang::Expr *cond, const clang::Stmt *thenS, const clang::Stmt *elseS)
Emit an if on a boolean condition to the specified blocks.
mlir::LogicalResult emitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &s)
mlir::LogicalResult emitOMPTeamsDistributeParallelForDirective(const OMPTeamsDistributeParallelForDirective &s)
mlir::LogicalResult emitOMPBarrierDirective(const OMPBarrierDirective &s)
mlir::LogicalResult emitOMPTargetParallelDirective(const OMPTargetParallelDirective &s)
mlir::LogicalResult emitOpenACCCacheConstruct(const OpenACCCacheConstruct &s)
mlir::LogicalResult emitOMPTargetDirective(const OMPTargetDirective &s)
mlir::LogicalResult emitCXXForRangeStmt(const CXXForRangeStmt &s, llvm::ArrayRef< const Attr * > attrs)
void emitAggregateCopy(LValue dest, LValue src, QualType eltTy, AggValueSlot::Overlap_t mayOverlap, bool isVolatile=false)
Emit an aggregate copy.
JumpDest returnBlock(mlir::Block *retBlock)
Unified return block.
mlir::LogicalResult emitOMPScopeDirective(const OMPScopeDirective &s)
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
mlir::LogicalResult emitOMPDepobjDirective(const OMPDepobjDirective &s)
bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does but contains a label,...
mlir::LogicalResult emitReturnStmt(const clang::ReturnStmt &s)
mlir::LogicalResult emitOpenACCInitConstruct(const OpenACCInitConstruct &s)
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, bool isInitializer)
Emits the code necessary to evaluate an arbitrary expression into the given memory location.
mlir::LogicalResult emitOMPDistributeParallelForSimdDirective(const OMPDistributeParallelForSimdDirective &s)
mlir::LogicalResult emitOMPUnrollDirective(const OMPUnrollDirective &s)
mlir::LogicalResult emitOMPTaskDirective(const OMPTaskDirective &s)
mlir::LogicalResult emitOpenACCSetConstruct(const OpenACCSetConstruct &s)
RValue emitReferenceBindingToExpr(const Expr *e)
Emits a reference binding to the passed in expression.
mlir::LogicalResult emitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &s)
mlir::LogicalResult emitOMPCanonicalLoop(const OMPCanonicalLoop &s)
mlir::LogicalResult emitSwitchStmt(const clang::SwitchStmt &s)
mlir::LogicalResult emitOMPTeamsDirective(const OMPTeamsDirective &s)
mlir::LogicalResult emitCaseStmt(const clang::CaseStmt &s, mlir::Type condType, bool buildingTopLevelCase)
llvm::ScopedHashTableScope< const clang::Decl *, mlir::Value > SymTableScopeTy
mlir::LogicalResult emitOMPMaskedTaskLoopDirective(const OMPMaskedTaskLoopDirective &s)
mlir::LogicalResult emitOMPFuseDirective(const OMPFuseDirective &s)
mlir::LogicalResult emitSimpleStmt(const clang::Stmt *s, bool useCurrentScope)
mlir::LogicalResult emitOMPSectionDirective(const OMPSectionDirective &s)
mlir::Block * indirectGotoBlock
IndirectBranch - The first time an indirect goto is seen we create a block reserved for the indirect ...
mlir::Operation * curFn
The current function or global initializer that is generated code for.
mlir::LogicalResult emitAsmStmt(const clang::AsmStmt &s)
Definition CIRGenAsm.cpp:86
mlir::LogicalResult emitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &s)
mlir::LogicalResult emitOMPDistributeParallelForDirective(const OMPDistributeParallelForDirective &s)
mlir::LogicalResult emitOpenACCComputeConstruct(const OpenACCComputeConstruct &s)
mlir::LogicalResult emitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &s)
EHScopeStack ehStack
Tracks function scope overall cleanup handling.
mlir::LogicalResult emitSwitchBody(const clang::Stmt *s)
mlir::LogicalResult emitForStmt(const clang::ForStmt &s)
mlir::LogicalResult emitOMPTaskwaitDirective(const OMPTaskwaitDirective &s)
mlir::LogicalResult emitOMPFlushDirective(const OMPFlushDirective &s)
mlir::LogicalResult emitOMPGenericLoopDirective(const OMPGenericLoopDirective &s)
mlir::LogicalResult emitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &s)
std::optional< mlir::Value > fnRetAlloca
The compiler-generated variable that holds the return value.
mlir::LogicalResult emitOMPOrderedDirective(const OMPOrderedDirective &s)
mlir::LogicalResult emitOMPTargetParallelForSimdDirective(const OMPTargetParallelForSimdDirective &s)
mlir::LogicalResult emitOMPInterchangeDirective(const OMPInterchangeDirective &s)
mlir::LogicalResult emitOMPDispatchDirective(const OMPDispatchDirective &s)
mlir::LogicalResult emitOMPParallelDirective(const OMPParallelDirective &s)
mlir::LogicalResult emitOMPForSimdDirective(const OMPForSimdDirective &s)
mlir::LogicalResult emitOMPTaskLoopDirective(const OMPTaskLoopDirective &s)
Address returnValue
The temporary alloca to hold the return value.
mlir::LogicalResult emitOMPTargetDataDirective(const OMPTargetDataDirective &s)
mlir::LogicalResult emitLabel(const clang::LabelDecl &d)
mlir::LogicalResult emitOMPTargetParallelGenericLoopDirective(const OMPTargetParallelGenericLoopDirective &s)
static bool hasAggregateEvaluationKind(clang::QualType type)
mlir::LogicalResult emitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &s)
mlir::LogicalResult emitOMPMaskedTaskLoopSimdDirective(const OMPMaskedTaskLoopSimdDirective &s)
mlir::LogicalResult emitOMPAtomicDirective(const OMPAtomicDirective &s)
mlir::LogicalResult emitOpenACCShutdownConstruct(const OpenACCShutdownConstruct &s)
mlir::LogicalResult emitBreakStmt(const clang::BreakStmt &s)
mlir::LogicalResult emitIndirectGotoStmt(const IndirectGotoStmt &s)
mlir::LogicalResult emitOMPTeamsDistributeParallelForSimdDirective(const OMPTeamsDistributeParallelForSimdDirective &s)
mlir::LogicalResult emitOMPTaskgroupDirective(const OMPTaskgroupDirective &s)
mlir::LogicalResult emitOMPParallelMaskedTaskLoopSimdDirective(const OMPParallelMaskedTaskLoopSimdDirective &s)
mlir::LogicalResult emitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &s)
void emitReturnOfRValue(mlir::Location loc, RValue rv, QualType ty)
mlir::LogicalResult emitOMPInteropDirective(const OMPInteropDirective &s)
mlir::LogicalResult emitOMPErrorDirective(const OMPErrorDirective &s)
mlir::LogicalResult emitOMPSingleDirective(const OMPSingleDirective &s)
mlir::LogicalResult emitContinueStmt(const clang::ContinueStmt &s)
mlir::LogicalResult emitOMPTaskyieldDirective(const OMPTaskyieldDirective &s)
mlir::LogicalResult emitOMPTargetTeamsDistributeSimdDirective(const OMPTargetTeamsDistributeSimdDirective &s)
mlir::LogicalResult emitOMPScanDirective(const OMPScanDirective &s)
llvm::SmallVector< mlir::Type, 2 > condTypeStack
The type of the condition for the emitting switch statement.
mlir::LogicalResult emitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &s)
void emitStopPoint(const Stmt *s)
Build a debug stoppoint if we are emitting debug info.
mlir::LogicalResult emitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &s)
mlir::LogicalResult emitOpenACCHostDataConstruct(const OpenACCHostDataConstruct &s)
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
mlir::LogicalResult emitIfStmt(const clang::IfStmt &s)
mlir::LogicalResult emitOMPForDirective(const OMPForDirective &s)
mlir::LogicalResult emitOMPMasterDirective(const OMPMasterDirective &s)
AggValueSlot::Overlap_t getOverlapForReturnValue()
Determine whether a return value slot may overlap some other object.
cir::BrOp emitBranchThroughCleanup(mlir::Location loc, JumpDest dest)
Build a unconditional branch to the lexical scope cleanup block or with the labeled blocked if alread...
mlir::LogicalResult emitSwitchCase(const clang::SwitchCase &s, bool buildingTopLevelCase)
mlir::LogicalResult emitOMPMetaDirective(const OMPMetaDirective &s)
mlir::LogicalResult emitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &s)
void emitDecl(const clang::Decl &d, bool evaluateConditionDecl=false)
mlir::LogicalResult emitOMPParallelGenericLoopDirective(const OMPParallelGenericLoopDirective &s)
mlir::LogicalResult emitOMPMaskedDirective(const OMPMaskedDirective &s)
llvm::DenseMap< const VarDecl *, mlir::Value > nrvoFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
mlir::LogicalResult emitOpenACCEnterDataConstruct(const OpenACCEnterDataConstruct &s)
mlir::LogicalResult emitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &s)
mlir::LogicalResult emitCXXTryStmt(const clang::CXXTryStmt &s)
mlir::LogicalResult emitOMPTargetTeamsDistributeParallelForDirective(const OMPTargetTeamsDistributeParallelForDirective &s)
void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit)
mlir::LogicalResult emitOMPParallelForDirective(const OMPParallelForDirective &s)
mlir::LogicalResult emitCaseDefaultCascade(const T *stmt, mlir::Type condType, mlir::ArrayAttr value, cir::CaseOpKind kind, bool buildingTopLevelCase)
mlir::LogicalResult emitOMPSectionsDirective(const OMPSectionsDirective &s)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
mlir::LogicalResult emitOMPDistributeDirective(const OMPDistributeDirective &s)
RValue emitAnyExpr(const clang::Expr *e, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
Emit code to compute the specified expression which can have any type.
mlir::LogicalResult emitOMPTargetTeamsDistributeParallelForSimdDirective(const OMPTargetTeamsDistributeParallelForSimdDirective &s)
mlir::LogicalResult emitOMPTargetTeamsGenericLoopDirective(const OMPTargetTeamsGenericLoopDirective &s)
mlir::LogicalResult emitDeclStmt(const clang::DeclStmt &s)
mlir::LogicalResult emitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &s)
mlir::LogicalResult emitDefaultStmt(const clang::DefaultStmt &s, mlir::Type condType, bool buildingTopLevelCase)
mlir::LogicalResult emitWhileStmt(const clang::WhileStmt &s)
mlir::LogicalResult emitLabelStmt(const clang::LabelStmt &s)
EHScopeStack::stable_iterator currentCleanupStackDepth
mlir::LogicalResult emitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &s)
clang::ASTContext & getContext() const
mlir::LogicalResult emitCoroutineBody(const CoroutineBodyStmt &s)
mlir::LogicalResult emitCompoundStmt(const clang::CompoundStmt &s, Address *lastValue=nullptr, AggValueSlot slot=AggValueSlot::ignored())
mlir::LogicalResult emitGotoStmt(const clang::GotoStmt &s)
mlir::LogicalResult emitOMPParallelMasterTaskLoopDirective(const OMPParallelMasterTaskLoopDirective &s)
mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope, llvm::ArrayRef< const Attr * > attrs={})
mlir::LogicalResult emitOMPCancelDirective(const OMPCancelDirective &s)
mlir::LogicalResult emitOMPStripeDirective(const OMPStripeDirective &s)
mlir::LogicalResult emitOMPTargetTeamsDistributeDirective(const OMPTargetTeamsDistributeDirective &s)
mlir::LogicalResult emitCompoundStmtWithoutScope(const clang::CompoundStmt &s, Address *lastValue=nullptr, AggValueSlot slot=AggValueSlot::ignored())
mlir::LogicalResult emitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &s)
mlir::LogicalResult emitOpenACCExitDataConstruct(const OpenACCExitDataConstruct &s)
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
mlir::LogicalResult emitOpenACCAtomicConstruct(const OpenACCAtomicConstruct &s)
mlir::LogicalResult emitOMPTargetSimdDirective(const OMPTargetSimdDirective &s)
mlir::LogicalResult emitOMPAssumeDirective(const OMPAssumeDirective &s)
mlir::LogicalResult emitOpenACCLoopConstruct(const OpenACCLoopConstruct &s)
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
Address getAggregateAddress() const
Return the value of the address of the aggregate.
Definition CIRGenValue.h:69
bool isAggregate() const
Definition CIRGenValue.h:51
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:57
bool isScalar() const
Definition CIRGenValue.h:49
CXXForRangeStmt - This represents C++0x [stmt.ranged]'s ranged for statement, represented as 'for (ra...
Definition StmtCXX.h:135
CaseStmt - Represent a case statement.
Definition Stmt.h:1911
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition Stmt.h:1731
ContinueStmt - This represents a continue.
Definition Stmt.h:3110
DeclStmt - Adaptor class for mixing declarations with statements and expressions.
Definition Stmt.h:1622
DoStmt - This represents a 'do/while' stmt.
Definition Stmt.h:2823
This represents one expression.
Definition Expr.h:112
QualType getType() const
Definition Expr.h:144
ForStmt - This represents a 'for (init;cond;inc)' stmt.
Definition Stmt.h:2879
GotoStmt - This represents a direct goto.
Definition Stmt.h:2960
IfStmt - This represents an if/then/else.
Definition Stmt.h:2250
IndirectGotoStmt - This represents an indirect goto.
Definition Stmt.h:2999
Represents the declaration of a label.
Definition Decl.h:524
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.h:554
LabelStmt - Represents a label, which has a substatement.
Definition Stmt.h:2137
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
A (possibly-)qualified type.
Definition TypeBase.h:937
The collection of all-type qualifiers we support.
Definition TypeBase.h:331
ReturnStmt - This represents a return, optionally of an expression: return; return 4;.
Definition Stmt.h:3151
Stmt - This represents one statement.
Definition Stmt.h:85
@ NoStmtClass
Definition Stmt.h:88
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
SwitchStmt - This represents a 'switch' stmt.
Definition Stmt.h:2500
bool isVoidType() const
Definition TypeBase.h:8892
WhileStmt - This represents a 'while' stmt.
Definition Stmt.h:2688
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
const internal::VariadicDynCastAllOfMatcher< Stmt, CompoundStmt > compoundStmt
Matches compound statements.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const internal::VariadicDynCastAllOfMatcher< Stmt, SwitchCase > switchCase
Matches case and default statements inside switch statements.
const internal::VariadicAllOfMatcher< Stmt > stmt
Matches statements.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
const FunctionProtoType * T
U cast(CodeGen::Address addr)
Definition Address.h:327
@ Other
Other implicit parameter.
Definition Decl.h:1746
static bool aggValueSlotGC()
static bool loopInfoStack()
static bool emitCondLikelihoodViaExpectIntrinsic()
static bool constantFoldSwitchStatement()
static bool insertBuiltinUnpredictable()
static bool ehstackBranches()
static bool emitBranchThroughCleanup()
static bool requiresCleanups()
static bool generateDebugInfo()
static bool incrementProfileCounter()
Represents a scope, including function bodies, compound statements, and the substatements of if/while...