@@ -37,13 +37,12 @@ cir::BrOp CIRGenFunction::emitBranchThroughCleanup(mlir::Location Loc,
37
37
JumpDest Dest) {
38
38
// Remove this once we go for making sure unreachable code is
39
39
// well modeled (or not).
40
- assert (builder.getInsertionBlock () && " not yet implemented" );
41
40
assert (!cir::MissingFeatures::ehStack ());
42
41
43
42
// Insert a branch: to the cleanup block (unsolved) or to the already
44
43
// materialized label. Keep track of unsolved goto's.
45
- auto brOp = builder. create <BrOp>(
46
- Loc, Dest. isValid () ? Dest. getBlock () : ReturnBlock () .getBlock ());
44
+ assert (Dest. getBlock () && " assumes incoming valid dest " );
45
+ auto brOp = builder. create <BrOp>( Loc, Dest.getBlock ());
47
46
48
47
// Calculate the innermost active normal cleanup.
49
48
EHScopeStack::stable_iterator TopCleanup =
@@ -70,7 +69,33 @@ cir::BrOp CIRGenFunction::emitBranchThroughCleanup(mlir::Location Loc,
70
69
return brOp;
71
70
}
72
71
73
- // FIXME(cir): otherwise, thread through all the normal cleanups in scope.
72
+ // Otherwise, thread through all the normal cleanups in scope.
73
+ auto index = builder.getUInt32 (Dest.getDestIndex (), Loc);
74
+ assert (!cir::MissingFeatures::cleanupIndexAndBIAdjustment ());
75
+
76
+ // Add this destination to all the scopes involved.
77
+ EHScopeStack::stable_iterator I = TopCleanup;
78
+ EHScopeStack::stable_iterator E = Dest.getScopeDepth ();
79
+ if (E.strictlyEncloses (I)) {
80
+ while (true ) {
81
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find (I));
82
+ assert (Scope.isNormalCleanup ());
83
+ I = Scope.getEnclosingNormalCleanup ();
84
+
85
+ // If this is the last cleanup we're propagating through, tell it
86
+ // that there's a resolved jump moving through it.
87
+ if (!E.strictlyEncloses (I)) {
88
+ Scope.addBranchAfter (index , Dest.getBlock ());
89
+ break ;
90
+ }
91
+
92
+ // Otherwise, tell the scope that there's a jump propagating
93
+ // through it. If this isn't new information, all the rest of
94
+ // the work has been done before.
95
+ if (!Scope.addBranchThrough (Dest.getBlock ()))
96
+ break ;
97
+ }
98
+ }
74
99
return brOp;
75
100
}
76
101
@@ -305,6 +330,18 @@ static void emitCleanup(CIRGenFunction &CGF, EHScopeStack::Cleanup *Fn,
305
330
// No need to emit continuation block because CIR uses a cir.if.
306
331
}
307
332
333
+ static mlir::Block *createNormalEntry (CIRGenFunction &cgf,
334
+ EHCleanupScope &scope) {
335
+ assert (scope.isNormalCleanup ());
336
+ mlir::Block *entry = scope.getNormalBlock ();
337
+ if (!entry) {
338
+ mlir::OpBuilder::InsertionGuard guard (cgf.getBuilder ());
339
+ entry = cgf.currLexScope ->getOrCreateCleanupBlock (cgf.getBuilder ());
340
+ scope.setNormalBlock (entry);
341
+ }
342
+ return entry;
343
+ }
344
+
308
345
// / Pops a cleanup block. If the block includes a normal cleanup, the
309
346
// / current insertion point is threaded through the cleanup, as are
310
347
// / any branch fixups on the cleanup.
@@ -341,7 +378,8 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
341
378
342
379
// - whether there's a fallthrough
343
380
auto *FallthroughSource = builder.getInsertionBlock ();
344
- bool HasFallthrough = (FallthroughSource != nullptr && IsActive);
381
+ bool HasFallthrough =
382
+ (FallthroughSource != nullptr && (IsActive || HasExistingBranches));
345
383
346
384
// Branch-through fall-throughs leave the insertion point set to the
347
385
// end of the last cleanup, which points to the current scope. The
@@ -442,7 +480,131 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
442
480
// Otherwise, the best approach is to thread everything through
443
481
// the cleanup block and then try to clean up after ourselves.
444
482
} else {
445
- llvm_unreachable (" NYI" );
483
+ // Force the entry block to exist.
484
+ mlir::Block *normalEntry = createNormalEntry (*this , Scope);
485
+
486
+ // I. Set up the fallthrough edge in.
487
+ mlir::OpBuilder::InsertPoint savedInactiveFallthroughIP;
488
+
489
+ // If there's a fallthrough, we need to store the cleanup
490
+ // destination index. For fall-throughs this is always zero.
491
+ if (HasFallthrough) {
492
+ if (!HasPrebranchedFallthrough) {
493
+ assert (!cir::MissingFeatures::cleanupDestinationIndex ());
494
+ }
495
+
496
+ // Otherwise, save and clear the IP if we don't have fallthrough
497
+ // because the cleanup is inactive.
498
+ } else if (FallthroughSource) {
499
+ assert (!IsActive && " source without fallthrough for active cleanup" );
500
+ savedInactiveFallthroughIP = getBuilder ().saveInsertionPoint ();
501
+ }
502
+
503
+ // II. Emit the entry block. This implicitly branches to it if
504
+ // we have fallthrough. All the fixups and existing branches
505
+ // should already be branched to it.
506
+ builder.setInsertionPointToEnd (normalEntry);
507
+
508
+ // intercept normal cleanup to mark SEH scope end
509
+ if (IsEHa) {
510
+ llvm_unreachable (" NYI" );
511
+ }
512
+
513
+ // III. Figure out where we're going and build the cleanup
514
+ // epilogue.
515
+ bool HasEnclosingCleanups =
516
+ (Scope.getEnclosingNormalCleanup () != EHStack.stable_end ());
517
+
518
+ // Compute the branch-through dest if we need it:
519
+ // - if there are branch-throughs threaded through the scope
520
+ // - if fall-through is a branch-through
521
+ // - if there are fixups that will be optimistically forwarded
522
+ // to the enclosing cleanup
523
+ mlir::Block *branchThroughDest = nullptr ;
524
+ if (Scope.hasBranchThroughs () ||
525
+ (FallthroughSource && FallthroughIsBranchThrough) ||
526
+ (HasFixups && HasEnclosingCleanups)) {
527
+ llvm_unreachable (" NYI" );
528
+ }
529
+
530
+ mlir::Block *fallthroughDest = nullptr ;
531
+
532
+ // If there's exactly one branch-after and no other threads,
533
+ // we can route it without a switch.
534
+ // Skip for SEH, since ExitSwitch is used to generate code to indicate
535
+ // abnormal termination. (SEH: Except _leave and fall-through at
536
+ // the end, all other exits in a _try (return/goto/continue/break)
537
+ // are considered as abnormal terminations, using NormalCleanupDestSlot
538
+ // to indicate abnormal termination)
539
+ if (!Scope.hasBranchThroughs () && !HasFixups && !HasFallthrough &&
540
+ !currentFunctionUsesSEHTry () && Scope.getNumBranchAfters () == 1 ) {
541
+ llvm_unreachable (" NYI" );
542
+ // Build a switch-out if we need it:
543
+ // - if there are branch-afters threaded through the scope
544
+ // - if fall-through is a branch-after
545
+ // - if there are fixups that have nowhere left to go and
546
+ // so must be immediately resolved
547
+ } else if (Scope.getNumBranchAfters () ||
548
+ (HasFallthrough && !FallthroughIsBranchThrough) ||
549
+ (HasFixups && !HasEnclosingCleanups)) {
550
+ assert (!cir::MissingFeatures::cleanupBranchAfterSwitch ());
551
+ } else {
552
+ // We should always have a branch-through destination in this case.
553
+ assert (branchThroughDest);
554
+ assert (!cir::MissingFeatures::cleanupAlwaysBranchThrough ());
555
+ }
556
+
557
+ // IV. Pop the cleanup and emit it.
558
+ Scope.markEmitted ();
559
+ EHStack.popCleanup ();
560
+ assert (EHStack.hasNormalCleanups () == HasEnclosingCleanups);
561
+
562
+ emitCleanup (*this , Fn, cleanupFlags, NormalActiveFlag);
563
+
564
+ // Append the prepared cleanup prologue from above.
565
+ assert (!cir::MissingFeatures::cleanupAppendInsts ());
566
+
567
+ // Optimistically hope that any fixups will continue falling through.
568
+ for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups (); I < E;
569
+ ++I) {
570
+ llvm_unreachable (" NYI" );
571
+ }
572
+
573
+ // V. Set up the fallthrough edge out.
574
+
575
+ // Case 1: a fallthrough source exists but doesn't branch to the
576
+ // cleanup because the cleanup is inactive.
577
+ if (!HasFallthrough && FallthroughSource) {
578
+ // Prebranched fallthrough was forwarded earlier.
579
+ // Non-prebranched fallthrough doesn't need to be forwarded.
580
+ // Either way, all we need to do is restore the IP we cleared before.
581
+ assert (!IsActive);
582
+ llvm_unreachable (" NYI" );
583
+
584
+ // Case 2: a fallthrough source exists and should branch to the
585
+ // cleanup, but we're not supposed to branch through to the next
586
+ // cleanup.
587
+ } else if (HasFallthrough && fallthroughDest) {
588
+ llvm_unreachable (" NYI" );
589
+
590
+ // Case 3: a fallthrough source exists and should branch to the
591
+ // cleanup and then through to the next.
592
+ } else if (HasFallthrough) {
593
+ // Everything is already set up for this.
594
+
595
+ // Case 4: no fallthrough source exists.
596
+ } else {
597
+ // FIXME(cir): should we clear insertion point here?
598
+ }
599
+
600
+ // VI. Assorted cleaning.
601
+
602
+ // Check whether we can merge NormalEntry into a single predecessor.
603
+ // This might invalidate (non-IR) pointers to NormalEntry.
604
+ //
605
+ // If it did invalidate those pointers, and NormalEntry was the same
606
+ // as NormalExit, go back and patch up the fixups.
607
+ assert (!cir::MissingFeatures::simplifyCleanupEntry ());
446
608
}
447
609
}
448
610
0 commit comments