From 54185d74fb29f1a822a42800a0c1d080f4de1c5e Mon Sep 17 00:00:00 2001 From: gitoleg Date: Wed, 17 Apr 2024 22:59:32 +0300 Subject: [PATCH] [CIR][CodeGen] Flattening for ScopeOp and LoopOpInterface (#546) This PR is the next step towards goto support and adds flattening for `ScopeOp` and `LoopOpInterface`. Looks like I can't separate this operations and create two PRs, since some errors occur if I do so, e.g. `reference to block defined in another region`. Seems we need to flatten both operations in the same time. Given it's a copy-pasta, I think there is no need to try to make several PRs. I added several tests - just copied them from the lowering part just to demonstrate how it looks like. Note, that changes in `dot.cir` caused by `BrCondOp` updates in the previous PR, when we removed the following casts: ``` %20 = llvm.zext %19 : i1 to i8 %21 = llvm.trunc %20 : i8 to i1 llvm.cond_br %21 ... ``` --- .../lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 158 +++++++++++++++- .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 168 ++---------------- clang/test/CIR/CodeGen/loop.cir | 122 +++++++++++++ clang/test/CIR/CodeGen/scope.cir | 60 +++++++ clang/test/CIR/Lowering/dot.cir | 44 +++-- 5 files changed, 376 insertions(+), 176 deletions(-) create mode 100644 clang/test/CIR/CodeGen/loop.cir create mode 100644 clang/test/CIR/CodeGen/scope.cir diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index aa061c51680c..b9c9481805d7 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -24,6 +24,28 @@ using namespace mlir::cir; namespace { +/// Lowers operations with the terminator trait that have a single successor. +void lowerTerminator(mlir::Operation *op, mlir::Block *dest, + mlir::PatternRewriter &rewriter) { + assert(op->hasTrait() && "not a terminator"); + mlir::OpBuilder::InsertionGuard guard(rewriter); + rewriter.setInsertionPoint(op); + rewriter.replaceOpWithNewOp(op, dest); +} + +/// Walks a region while skipping operations of type `Ops`. This ensures the +/// callback is not applied to said operations and its children. +template +void walkRegionSkipping(mlir::Region ®ion, + mlir::function_ref callback) { + region.walk([&](mlir::Operation *op) { + if (isa(op)) + return mlir::WalkResult::skip(); + callback(op); + return mlir::WalkResult::advance(); + }); +} + struct FlattenCFGPass : public FlattenCFGBase { FlattenCFGPass() = default; @@ -92,8 +114,140 @@ struct CIRIfFlattening : public OpRewritePattern { } }; +class CIRScopeOpFlattening : public mlir::OpRewritePattern { +public: + using OpRewritePattern::OpRewritePattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::ScopeOp scopeOp, + mlir::PatternRewriter &rewriter) const override { + mlir::OpBuilder::InsertionGuard guard(rewriter); + auto loc = scopeOp.getLoc(); + + // Empty scope: just remove it. + if (scopeOp.getRegion().empty()) { + rewriter.eraseOp(scopeOp); + return mlir::success(); + } + + // Split the current block before the ScopeOp to create the inlining + // point. + auto *currentBlock = rewriter.getInsertionBlock(); + auto *remainingOpsBlock = + rewriter.splitBlock(currentBlock, rewriter.getInsertionPoint()); + mlir::Block *continueBlock; + if (scopeOp.getNumResults() == 0) + continueBlock = remainingOpsBlock; + else + llvm_unreachable("NYI"); + + // Inline body region. + auto *beforeBody = &scopeOp.getRegion().front(); + auto *afterBody = &scopeOp.getRegion().back(); + rewriter.inlineRegionBefore(scopeOp.getRegion(), continueBlock); + + // Save stack and then branch into the body of the region. + rewriter.setInsertionPointToEnd(currentBlock); + // TODO(CIR): stackSaveOp + // auto stackSaveOp = rewriter.create( + // loc, mlir::LLVM::LLVMPointerType::get( + // mlir::IntegerType::get(scopeOp.getContext(), 8))); + rewriter.create(loc, mlir::ValueRange(), beforeBody); + + // Replace the scopeop return with a branch that jumps out of the body. + // Stack restore before leaving the body region. + rewriter.setInsertionPointToEnd(afterBody); + if (auto yieldOp = + dyn_cast(afterBody->getTerminator())) { + rewriter.replaceOpWithNewOp(yieldOp, yieldOp.getArgs(), + continueBlock); + } + + // TODO(cir): stackrestore? + + // Replace the op with values return from the body region. + rewriter.replaceOp(scopeOp, continueBlock->getArguments()); + + return mlir::success(); + } +}; + +class CIRLoopOpInterfaceFlattening + : public mlir::OpInterfaceRewritePattern { +public: + using mlir::OpInterfaceRewritePattern< + mlir::cir::LoopOpInterface>::OpInterfaceRewritePattern; + + inline void lowerConditionOp(mlir::cir::ConditionOp op, mlir::Block *body, + mlir::Block *exit, + mlir::PatternRewriter &rewriter) const { + mlir::OpBuilder::InsertionGuard guard(rewriter); + rewriter.setInsertionPoint(op); + rewriter.replaceOpWithNewOp(op, op.getCondition(), + body, exit); + } + + mlir::LogicalResult + matchAndRewrite(mlir::cir::LoopOpInterface op, + mlir::PatternRewriter &rewriter) const final { + // Setup CFG blocks. + auto *entry = rewriter.getInsertionBlock(); + auto *exit = rewriter.splitBlock(entry, rewriter.getInsertionPoint()); + auto *cond = &op.getCond().front(); + auto *body = &op.getBody().front(); + auto *step = (op.maybeGetStep() ? &op.maybeGetStep()->front() : nullptr); + + // Setup loop entry branch. + rewriter.setInsertionPointToEnd(entry); + rewriter.create(op.getLoc(), &op.getEntry().front()); + + // Branch from condition region to body or exit. + auto conditionOp = cast(cond->getTerminator()); + lowerConditionOp(conditionOp, body, exit, rewriter); + + // TODO(cir): Remove the walks below. It visits operations unnecessarily, + // however, to solve this we would likely need a custom DialecConversion + // driver to customize the order that operations are visited. + + // Lower continue statements. + mlir::Block *dest = (step ? step : cond); + op.walkBodySkippingNestedLoops([&](mlir::Operation *op) { + if (isa(op)) + lowerTerminator(op, dest, rewriter); + }); + + // Lower break statements. + walkRegionSkipping( + op.getBody(), [&](mlir::Operation *op) { + if (isa(op)) + lowerTerminator(op, exit, rewriter); + }); + + // Lower optional body region yield. + auto bodyYield = dyn_cast(body->getTerminator()); + if (bodyYield) + lowerTerminator(bodyYield, (step ? step : cond), rewriter); + + // Lower mandatory step region yield. + if (step) + lowerTerminator(cast(step->getTerminator()), cond, + rewriter); + + // Move region contents out of the loop op. + rewriter.inlineRegionBefore(op.getCond(), exit); + rewriter.inlineRegionBefore(op.getBody(), exit); + if (step) + rewriter.inlineRegionBefore(*op.maybeGetStep(), exit); + + rewriter.eraseOp(op); + return mlir::success(); + } +}; + void populateFlattenCFGPatterns(RewritePatternSet &patterns) { - patterns.add(patterns.getContext()); + patterns + .add( + patterns.getContext()); } void FlattenCFGPass::runOnOperation() { @@ -103,7 +257,7 @@ void FlattenCFGPass::runOnOperation() { // Collect operations to apply patterns. SmallVector ops; getOperation()->walk([&](Operation *op) { - if (isa(op)) + if (isa(op)) ops.push_back(op); }); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 517b306b6d09..bf101d015a18 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -481,80 +481,6 @@ class CIRPtrStrideOpLowering } }; -class CIRLoopOpInterfaceLowering - : public mlir::OpInterfaceConversionPattern { -public: - using mlir::OpInterfaceConversionPattern< - mlir::cir::LoopOpInterface>::OpInterfaceConversionPattern; - - inline void - lowerConditionOp(mlir::cir::ConditionOp op, mlir::Block *body, - mlir::Block *exit, - mlir::ConversionPatternRewriter &rewriter) const { - mlir::OpBuilder::InsertionGuard guard(rewriter); - rewriter.setInsertionPoint(op); - rewriter.replaceOpWithNewOp(op, op.getCondition(), - body, exit); - } - - mlir::LogicalResult - matchAndRewrite(mlir::cir::LoopOpInterface op, - mlir::ArrayRef operands, - mlir::ConversionPatternRewriter &rewriter) const final { - // Setup CFG blocks. - auto *entry = rewriter.getInsertionBlock(); - auto *exit = rewriter.splitBlock(entry, rewriter.getInsertionPoint()); - auto *cond = &op.getCond().front(); - auto *body = &op.getBody().front(); - auto *step = (op.maybeGetStep() ? &op.maybeGetStep()->front() : nullptr); - - // Setup loop entry branch. - rewriter.setInsertionPointToEnd(entry); - rewriter.create(op.getLoc(), &op.getEntry().front()); - - // Branch from condition region to body or exit. - auto conditionOp = cast(cond->getTerminator()); - lowerConditionOp(conditionOp, body, exit, rewriter); - - // TODO(cir): Remove the walks below. It visits operations unnecessarily, - // however, to solve this we would likely need a custom DialecConversion - // driver to customize the order that operations are visited. - - // Lower continue statements. - mlir::Block *dest = (step ? step : cond); - op.walkBodySkippingNestedLoops([&](mlir::Operation *op) { - if (isa(op)) - lowerTerminator(op, dest, rewriter); - }); - - // Lower break statements. - walkRegionSkipping( - op.getBody(), [&](mlir::Operation *op) { - if (isa(op)) - lowerTerminator(op, exit, rewriter); - }); - - // Lower optional body region yield. - auto bodyYield = dyn_cast(body->getTerminator()); - if (bodyYield) - lowerTerminator(bodyYield, (step ? step : cond), rewriter); - - // Lower mandatory step region yield. - if (step) - lowerTerminator(cast(step->getTerminator()), cond, - rewriter); - - // Move region contents out of the loop op. - rewriter.inlineRegionBefore(op.getCond(), exit); - rewriter.inlineRegionBefore(op.getBody(), exit); - if (step) - rewriter.inlineRegionBefore(*op.maybeGetStep(), exit); - - rewriter.eraseOp(op); - return mlir::success(); - } -}; - class CIRBrCondOpLowering : public mlir::OpConversionPattern { public: @@ -785,65 +711,6 @@ class CIRCastOpLowering : public mlir::OpConversionPattern { } }; -class CIRScopeOpLowering - : public mlir::OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - mlir::LogicalResult - matchAndRewrite(mlir::cir::ScopeOp scopeOp, OpAdaptor adaptor, - mlir::ConversionPatternRewriter &rewriter) const override { - mlir::OpBuilder::InsertionGuard guard(rewriter); - auto loc = scopeOp.getLoc(); - - // Empty scope: just remove it. - if (scopeOp.getRegion().empty()) { - rewriter.eraseOp(scopeOp); - return mlir::success(); - } - - // Split the current block before the ScopeOp to create the inlining - // point. - auto *currentBlock = rewriter.getInsertionBlock(); - auto *remainingOpsBlock = - rewriter.splitBlock(currentBlock, rewriter.getInsertionPoint()); - mlir::Block *continueBlock; - if (scopeOp.getNumResults() == 0) - continueBlock = remainingOpsBlock; - else - llvm_unreachable("NYI"); - - // Inline body region. - auto *beforeBody = &scopeOp.getRegion().front(); - auto *afterBody = &scopeOp.getRegion().back(); - rewriter.inlineRegionBefore(scopeOp.getRegion(), continueBlock); - - // Save stack and then branch into the body of the region. - rewriter.setInsertionPointToEnd(currentBlock); - // TODO(CIR): stackSaveOp - // auto stackSaveOp = rewriter.create( - // loc, mlir::LLVM::LLVMPointerType::get( - // mlir::IntegerType::get(scopeOp.getContext(), 8))); - rewriter.create(loc, mlir::ValueRange(), beforeBody); - - // Replace the scopeop return with a branch that jumps out of the body. - // Stack restore before leaving the body region. - rewriter.setInsertionPointToEnd(afterBody); - if (auto yieldOp = - dyn_cast(afterBody->getTerminator())) { - rewriter.replaceOpWithNewOp(yieldOp, yieldOp.getArgs(), - continueBlock); - } - - // TODO(cir): stackrestore? - - // Replace the op with values return from the body region. - rewriter.replaceOp(scopeOp, continueBlock->getArguments()); - - return mlir::success(); - } -}; - class CIRReturnLowering : public mlir::OpConversionPattern { public: @@ -3082,23 +2949,22 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRCmpOpLowering, CIRBitClrsbOpLowering, CIRBitClzOpLowering, CIRBitCtzOpLowering, CIRBitFfsOpLowering, CIRBitParityOpLowering, CIRBitPopcountOpLowering, CIRAtomicFetchLowering, CIRByteswapOpLowering, - CIRLoopOpInterfaceLowering, CIRBrCondOpLowering, CIRPtrStrideOpLowering, - CIRCallLowering, CIRUnaryOpLowering, CIRBinOpLowering, CIRShiftOpLowering, - CIRLoadLowering, CIRConstantLowering, CIRStoreLowering, CIRAllocaLowering, - CIRFuncLowering, CIRScopeOpLowering, CIRCastOpLowering, - CIRGlobalOpLowering, CIRGetGlobalOpLowering, CIRVAStartLowering, - CIRVAEndLowering, CIRVACopyLowering, CIRVAArgLowering, CIRBrOpLowering, - CIRTernaryOpLowering, CIRGetMemberOpLowering, CIRSwitchOpLowering, - CIRPtrDiffOpLowering, CIRCopyOpLowering, CIRMemCpyOpLowering, - CIRFAbsOpLowering, CIRExpectOpLowering, CIRVTableAddrPointOpLowering, - CIRVectorCreateLowering, CIRVectorInsertLowering, - CIRVectorExtractLowering, CIRVectorCmpOpLowering, CIRVectorSplatLowering, - CIRVectorTernaryLowering, CIRVectorShuffleIntsLowering, - CIRVectorShuffleVecLowering, CIRStackSaveLowering, - CIRStackRestoreLowering, CIRUnreachableLowering, CIRTrapLowering, - CIRInlineAsmOpLowering, CIRSetBitfieldLowering, CIRGetBitfieldLowering, - CIRPrefetchLowering, CIRObjSizeOpLowering, CIRIsConstantOpLowering>( - converter, patterns.getContext()); + CIRBrCondOpLowering, CIRPtrStrideOpLowering, CIRCallLowering, + CIRUnaryOpLowering, CIRBinOpLowering, CIRShiftOpLowering, CIRLoadLowering, + CIRConstantLowering, CIRStoreLowering, CIRAllocaLowering, CIRFuncLowering, + CIRCastOpLowering, CIRGlobalOpLowering, CIRGetGlobalOpLowering, + CIRVAStartLowering, CIRVAEndLowering, CIRVACopyLowering, CIRVAArgLowering, + CIRBrOpLowering, CIRTernaryOpLowering, CIRGetMemberOpLowering, + CIRSwitchOpLowering, CIRPtrDiffOpLowering, CIRCopyOpLowering, + CIRMemCpyOpLowering, CIRFAbsOpLowering, CIRExpectOpLowering, + CIRVTableAddrPointOpLowering, CIRVectorCreateLowering, + CIRVectorInsertLowering, CIRVectorExtractLowering, CIRVectorCmpOpLowering, + CIRVectorSplatLowering, CIRVectorTernaryLowering, + CIRVectorShuffleIntsLowering, CIRVectorShuffleVecLowering, + CIRStackSaveLowering, CIRStackRestoreLowering, CIRUnreachableLowering, + CIRTrapLowering, CIRInlineAsmOpLowering, CIRSetBitfieldLowering, + CIRGetBitfieldLowering, CIRPrefetchLowering, CIRObjSizeOpLowering, + CIRIsConstantOpLowering>(converter, patterns.getContext()); } namespace { @@ -3266,7 +3132,7 @@ static void buildCtorDtorList( // pass it will be placed into the unreachable block. And the possible error // after the lowering pass is: error: 'cir.return' op expects parent op to be // one of 'cir.func, cir.scope, cir.if ... The reason that this operation was -// not lowered and the new parent is lllvm.func. +// not lowered and the new parent is llvm.func. // // In the future we may want to get rid of this function and use DCE pass or // something similar. But now we need to guarantee the absence of the dialect diff --git a/clang/test/CIR/CodeGen/loop.cir b/clang/test/CIR/CodeGen/loop.cir new file mode 100644 index 000000000000..8204216b6f52 --- /dev/null +++ b/clang/test/CIR/CodeGen/loop.cir @@ -0,0 +1,122 @@ +// RUN: cir-opt %s -cir-flatten-cfg -o - | FileCheck %s + +!s32i = !cir.int + +module { + + cir.func @testFor(%arg0 : !cir.bool) { + cir.for : cond { + cir.condition(%arg0) + } body { + cir.yield + } step { + cir.yield + } + cir.return + } +// CHECK: cir.func @testFor(%arg0: !cir.bool) { +// CHECK: cir.br ^bb[[#COND:]] +// CHECK: ^bb[[#COND]]: +// CHECK: cir.brcond %arg0 ^bb[[#BODY:]], ^bb[[#EXIT:]] +// CHECK: ^bb[[#BODY]]: +// CHECK: cir.br ^bb[[#STEP:]] +// CHECK: ^bb[[#STEP]]: +// CHECK: cir.br ^bb[[#COND:]] +// CHECK: ^bb[[#EXIT]]: +// CHECK: cir.return +// CHECK: } + + // Test while cir.loop operation lowering. + cir.func @testWhile(%arg0 : !cir.bool) { + cir.while { + cir.condition(%arg0) + } do { + cir.yield + } + cir.return + } +// CHECK: cir.func @testWhile(%arg0: !cir.bool) { +// CHECK: cir.br ^bb[[#COND:]] +// CHECK: ^bb[[#COND]]: +// CHECK: cir.brcond %arg0 ^bb[[#BODY:]], ^bb[[#EXIT:]] +// CHECK: ^bb[[#BODY]]: +// CHECK: cir.br ^bb[[#COND:]] +// CHECK: ^bb[[#EXIT]]: +// CHECK: cir.return +// CHECK: } + + // Test do-while cir.loop operation lowering. + cir.func @testDoWhile(%arg0 : !cir.bool) { + cir.do { + cir.yield + } while { + cir.condition(%arg0) + } + cir.return + } +// CHECK: cir.func @testDoWhile(%arg0: !cir.bool) { +// CHECK: cir.br ^bb[[#BODY:]] +// CHECK: ^bb[[#COND]]: +// CHECK: cir.brcond %arg0 ^bb[[#BODY:]], ^bb[[#EXIT:]] +// CHECK: ^bb[[#BODY]]: +// CHECK: cir.br ^bb[[#COND:]] +// CHECK: ^bb[[#EXIT]]: +// CHECK: cir.return +// CHECK: } + + // test corner case + // while (1) { + // break; + // } + cir.func @testWhileWithBreakTerminatedBody(%arg0 : !cir.bool) { + cir.while { + cir.condition(%arg0) + } do { + cir.break + } + cir.return + } +// CHECK: cir.func @testWhileWithBreakTerminatedBody(%arg0: !cir.bool) { +// CHECK: cir.br ^bb[[#COND:]] +// CHECK: ^bb[[#COND]]: +// CHECK: cir.brcond %arg0 ^bb[[#BODY:]], ^bb[[#EXIT:]] +// CHECK: ^bb[[#BODY]]: +// CHECK: cir.br ^bb[[#EXIT]] +// CHECK: ^bb[[#EXIT]]: +// CHECK: cir.return +// CHECK: } + + // test C only corner case - no fails during the lowering + // for (;;) { + // break; + // } + cir.func @forWithBreakTerminatedScopeInBody(%arg0 : !cir.bool) { + cir.for : cond { + cir.condition(%arg0) + } body { + cir.scope { // FIXME(cir): Redundant scope emitted during C codegen. + cir.break + } + cir.yield + } step { + cir.yield + } + cir.return + } +// CHECK: cir.func @forWithBreakTerminatedScopeInBody(%arg0: !cir.bool) { +// CHECK: cir.br ^bb[[#COND:]] +// CHECK: ^bb[[#COND]]: +// CHECK: cir.brcond %arg0 ^bb[[#BODY:]], ^bb[[#EXIT:]] +// CHECK: ^bb[[#BODY]]: +// CHECK: cir.br ^bb[[#EX_SCOPE_IN:]] +// CHECK: ^bb[[#EX_SCOPE_IN]]: +// CHECK: cir.br ^bb[[#EXIT:]] +// CHECK: ^bb[[#EX_SCOPE_EXIT:]]: +// CHECK: cir.br ^bb[[#STEP:]] +// CHECK: ^bb[[#STEP]]: +// CHECK: cir.br ^bb[[#COND:]] +// CHECK: ^bb[[#EXIT]]: +// CHECK: cir.return +// CHECK: } + +} \ No newline at end of file diff --git a/clang/test/CIR/CodeGen/scope.cir b/clang/test/CIR/CodeGen/scope.cir new file mode 100644 index 000000000000..813862e7c2fb --- /dev/null +++ b/clang/test/CIR/CodeGen/scope.cir @@ -0,0 +1,60 @@ +// RUN: cir-opt %s -cir-flatten-cfg -o - | FileCheck %s + +!u32i = !cir.int + +module { + cir.func @foo() { + cir.scope { + %0 = cir.alloca !u32i, cir.ptr , ["a", init] {alignment = 4 : i64} + %1 = cir.const(#cir.int<4> : !u32i) : !u32i + cir.store %1, %0 : !u32i, cir.ptr + } + cir.return + } +// CHECK: cir.func @foo() { +// CHECK: cir.br ^bb1 +// CHECK: ^bb1: // pred: ^bb0 +// CHECK: %0 = cir.alloca !u32i, cir.ptr , ["a", init] {alignment = 4 : i64} +// CHECK: %1 = cir.const(#cir.int<4> : !u32i) : !u32i +// CHECK: cir.store %1, %0 : !u32i, cir.ptr +// CHECK: cir.br ^bb2 +// CHECK: ^bb2: // pred: ^bb1 +// CHECK: cir.return +// CHECK: } + + // Should drop empty scopes. + cir.func @empty_scope() { + cir.scope { + } + cir.return + } +// CHECK: cir.func @empty_scope() { +// CHECK: cir.return +// CHECK: } + + cir.func @scope_with_return() -> !u32i { + %0 = cir.alloca !u32i, cir.ptr , ["__retval"] {alignment = 4 : i64} + cir.scope { + %2 = cir.const(#cir.int<0> : !u32i) : !u32i + cir.store %2, %0 : !u32i, cir.ptr + %3 = cir.load %0 : cir.ptr , !u32i + cir.return %3 : !u32i + } + %1 = cir.load %0 : cir.ptr , !u32i + cir.return %1 : !u32i + } + +// CHECK: cir.func @scope_with_return() -> !u32i { +// CHECK: %0 = cir.alloca !u32i, cir.ptr , ["__retval"] {alignment = 4 : i64} +// CHECK: cir.br ^bb1 +// CHECK: ^bb1: // pred: ^bb0 +// CHECK: %1 = cir.const(#cir.int<0> : !u32i) : !u32i +// CHECK: cir.store %1, %0 : !u32i, cir.ptr +// CHECK: %2 = cir.load %0 : cir.ptr , !u32i +// CHECK: cir.return %2 : !u32i +// CHECK: ^bb2: // no predecessors +// CHECK: %3 = cir.load %0 : cir.ptr , !u32i +// CHECK: cir.return %3 : !u32i +// CHECK: } + +} diff --git a/clang/test/CIR/Lowering/dot.cir b/clang/test/CIR/Lowering/dot.cir index 02fb1c92affb..5b7742fc1400 100644 --- a/clang/test/CIR/Lowering/dot.cir +++ b/clang/test/CIR/Lowering/dot.cir @@ -83,35 +83,33 @@ module { // MLIR-NEXT: %17 = llvm.zext %16 : i1 to i32 // MLIR-NEXT: %18 = llvm.mlir.constant(0 : i32) : i32 // MLIR-NEXT: %19 = llvm.icmp "ne" %17, %18 : i32 -// MLIR-NEXT: %20 = llvm.zext %19 : i1 to i8 -// MLIR-NEXT: %21 = llvm.trunc %20 : i8 to i1 -// MLIR-NEXT: llvm.cond_br %21, ^bb3, ^bb5 +// MLIR-NEXT: llvm.cond_br %19, ^bb3, ^bb5 // MLIR-NEXT: ^bb3: // pred: ^bb2 -// MLIR-NEXT: %22 = llvm.load %1 : !llvm.ptr -> !llvm.ptr -// MLIR-NEXT: %23 = llvm.load %12 : !llvm.ptr -> i32 -// MLIR-NEXT: %24 = llvm.getelementptr %22[%23] : (!llvm.ptr, i32) -> !llvm.ptr, f64 -// MLIR-NEXT: %25 = llvm.load %24 : !llvm.ptr -> f64 -// MLIR-NEXT: %26 = llvm.load %3 : !llvm.ptr -> !llvm.ptr -// MLIR-NEXT: %27 = llvm.load %12 : !llvm.ptr -> i32 -// MLIR-NEXT: %28 = llvm.getelementptr %26[%27] : (!llvm.ptr, i32) -> !llvm.ptr, f64 -// MLIR-NEXT: %29 = llvm.load %28 : !llvm.ptr -> f64 -// MLIR-NEXT: %30 = llvm.fmul %25, %29 : f64 -// MLIR-NEXT: %31 = llvm.load %9 : !llvm.ptr -> f64 -// MLIR-NEXT: %32 = llvm.fadd %31, %30 : f64 -// MLIR-NEXT: llvm.store %32, %9 : f64, !llvm.ptr +// MLIR-NEXT: %20 = llvm.load %1 : !llvm.ptr -> !llvm.ptr +// MLIR-NEXT: %21 = llvm.load %12 : !llvm.ptr -> i32 +// MLIR-NEXT: %22 = llvm.getelementptr %20[%21] : (!llvm.ptr, i32) -> !llvm.ptr, f64 +// MLIR-NEXT: %23 = llvm.load %22 : !llvm.ptr -> f64 +// MLIR-NEXT: %24 = llvm.load %3 : !llvm.ptr -> !llvm.ptr +// MLIR-NEXT: %25 = llvm.load %12 : !llvm.ptr -> i32 +// MLIR-NEXT: %26 = llvm.getelementptr %24[%25] : (!llvm.ptr, i32) -> !llvm.ptr, f64 +// MLIR-NEXT: %27 = llvm.load %26 : !llvm.ptr -> f64 +// MLIR-NEXT: %28 = llvm.fmul %23, %27 : f64 +// MLIR-NEXT: %29 = llvm.load %9 : !llvm.ptr -> f64 +// MLIR-NEXT: %30 = llvm.fadd %29, %28 : f64 +// MLIR-NEXT: llvm.store %30, %9 : f64, !llvm.ptr // MLIR-NEXT: llvm.br ^bb4 // MLIR-NEXT: ^bb4: // pred: ^bb3 -// MLIR-NEXT: %33 = llvm.load %12 : !llvm.ptr -> i32 -// MLIR-NEXT: %34 = llvm.mlir.constant(1 : i32) : i32 -// MLIR-NEXT: %35 = llvm.add %33, %34 : i32 -// MLIR-NEXT: llvm.store %35, %12 : i32, !llvm.ptr +// MLIR-NEXT: %31 = llvm.load %12 : !llvm.ptr -> i32 +// MLIR-NEXT: %32 = llvm.mlir.constant(1 : i32) : i32 +// MLIR-NEXT: %33 = llvm.add %31, %32 : i32 +// MLIR-NEXT: llvm.store %33, %12 : i32, !llvm.ptr // MLIR-NEXT: llvm.br ^bb2 // MLIR-NEXT: ^bb5: // pred: ^bb2 // MLIR-NEXT: llvm.br ^bb6 // MLIR-NEXT: ^bb6: // pred: ^bb5 -// MLIR-NEXT: %36 = llvm.load %9 : !llvm.ptr -> f64 -// MLIR-NEXT: llvm.store %36, %7 : f64, !llvm.ptr -// MLIR-NEXT: %37 = llvm.load %7 : !llvm.ptr -> f64 -// MLIR-NEXT: llvm.return %37 : f64 +// MLIR-NEXT: %34 = llvm.load %9 : !llvm.ptr -> f64 +// MLIR-NEXT: llvm.store %34, %7 : f64, !llvm.ptr +// MLIR-NEXT: %35 = llvm.load %7 : !llvm.ptr -> f64 +// MLIR-NEXT: llvm.return %35 : f64 // MLIR-NEXT: } // MLIR-NEXT: }