Skip to content

Commit

Permalink
[CIR][CIRGen][NFCI] Take a step into getting scope information to mat…
Browse files Browse the repository at this point in the history
…ch OG

We are missing cleanups all around, more incremental progress towards fixing
that. This is supposed to be NFC intended, but we have to start changing some
bits in order to properly match cleanup bits in OG.

Start tagging places with more MissingFeatures to allow us to incrementally
improve the situation.
  • Loading branch information
bcardosolopes committed Dec 11, 2024
1 parent 8c82ee2 commit 9a63c50
Show file tree
Hide file tree
Showing 8 changed files with 321 additions and 69 deletions.
14 changes: 11 additions & 3 deletions clang/include/clang/CIR/MissingFeatures.h
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,6 @@ struct MissingFeatures {
static bool emitTypeCheck() { return false; }
static bool tbaa() { return false; }
static bool tbaa_struct() { return false; }
static bool cleanups() { return false; }
static bool emitNullabilityCheck() { return false; }
static bool ptrAuth() { return false; }

Expand Down Expand Up @@ -160,12 +159,22 @@ struct MissingFeatures {
static bool fastMathFlags() { return false; }
static bool fastMathFuncAttributes() { return false; }

// Cleanup
static bool cleanups() { return false; }
static bool simplifyCleanupEntry() { return false; }
static bool requiresCleanups() { return false; }
static bool cleanupBranchAfterSwitch() { return false; }
static bool cleanupAlwaysBranchThrough() { return false; }
static bool cleanupDestinationIndex() { return false; }
static bool cleanupDestroyNRVOVariable() { return false; }
static bool cleanupAppendInsts() { return false; }
static bool cleanupIndexAndBIAdjustment() { return false; }

// Exception handling
static bool isSEHTryScope() { return false; }
static bool ehStack() { return false; }
static bool emitStartEHSpec() { return false; }
static bool emitEndEHSpec() { return false; }
static bool simplifyCleanupEntry() { return false; }

// Type qualifiers.
static bool atomicTypes() { return false; }
Expand Down Expand Up @@ -208,7 +217,6 @@ struct MissingFeatures {
static bool addAutoInitAnnotation() { return false; }
static bool addHeapAllocSiteMetadata() { return false; }
static bool loopInfoStack() { return false; }
static bool requiresCleanups() { return false; }
static bool constantFoldsToSimpleInteger() { return false; }
static bool checkFunctionCallABI() { return false; }
static bool zeroInitializer() { return false; }
Expand Down
174 changes: 168 additions & 6 deletions clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,13 +37,12 @@ cir::BrOp CIRGenFunction::emitBranchThroughCleanup(mlir::Location Loc,
JumpDest Dest) {
// Remove this once we go for making sure unreachable code is
// well modeled (or not).
assert(builder.getInsertionBlock() && "not yet implemented");
assert(!cir::MissingFeatures::ehStack());

// Insert a branch: to the cleanup block (unsolved) or to the already
// materialized label. Keep track of unsolved goto's.
auto brOp = builder.create<BrOp>(
Loc, Dest.isValid() ? Dest.getBlock() : ReturnBlock().getBlock());
assert(Dest.getBlock() && "assumes incoming valid dest");
auto brOp = builder.create<BrOp>(Loc, Dest.getBlock());

// Calculate the innermost active normal cleanup.
EHScopeStack::stable_iterator TopCleanup =
Expand All @@ -70,7 +69,33 @@ cir::BrOp CIRGenFunction::emitBranchThroughCleanup(mlir::Location Loc,
return brOp;
}

// FIXME(cir): otherwise, thread through all the normal cleanups in scope.
// Otherwise, thread through all the normal cleanups in scope.
auto index = builder.getUInt32(Dest.getDestIndex(), Loc);
assert(!cir::MissingFeatures::cleanupIndexAndBIAdjustment());

// Add this destination to all the scopes involved.
EHScopeStack::stable_iterator I = TopCleanup;
EHScopeStack::stable_iterator E = Dest.getScopeDepth();
if (E.strictlyEncloses(I)) {
while (true) {
EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(I));
assert(Scope.isNormalCleanup());
I = Scope.getEnclosingNormalCleanup();

// If this is the last cleanup we're propagating through, tell it
// that there's a resolved jump moving through it.
if (!E.strictlyEncloses(I)) {
Scope.addBranchAfter(index, Dest.getBlock());
break;
}

// Otherwise, tell the scope that there's a jump propagating
// through it. If this isn't new information, all the rest of
// the work has been done before.
if (!Scope.addBranchThrough(Dest.getBlock()))
break;
}
}
return brOp;
}

Expand Down Expand Up @@ -305,6 +330,18 @@ static void emitCleanup(CIRGenFunction &CGF, EHScopeStack::Cleanup *Fn,
// No need to emit continuation block because CIR uses a cir.if.
}

static mlir::Block *createNormalEntry(CIRGenFunction &cgf,
EHCleanupScope &scope) {
assert(scope.isNormalCleanup());
mlir::Block *entry = scope.getNormalBlock();
if (!entry) {
mlir::OpBuilder::InsertionGuard guard(cgf.getBuilder());
entry = cgf.currLexScope->getOrCreateCleanupBlock(cgf.getBuilder());
scope.setNormalBlock(entry);
}
return entry;
}

/// Pops a cleanup block. If the block includes a normal cleanup, the
/// current insertion point is threaded through the cleanup, as are
/// any branch fixups on the cleanup.
Expand Down Expand Up @@ -341,7 +378,8 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {

// - whether there's a fallthrough
auto *FallthroughSource = builder.getInsertionBlock();
bool HasFallthrough = (FallthroughSource != nullptr && IsActive);
bool HasFallthrough =
(FallthroughSource != nullptr && (IsActive || HasExistingBranches));

// Branch-through fall-throughs leave the insertion point set to the
// end of the last cleanup, which points to the current scope. The
Expand Down Expand Up @@ -442,7 +480,131 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
// Otherwise, the best approach is to thread everything through
// the cleanup block and then try to clean up after ourselves.
} else {
llvm_unreachable("NYI");
// Force the entry block to exist.
mlir::Block *normalEntry = createNormalEntry(*this, Scope);

// I. Set up the fallthrough edge in.
mlir::OpBuilder::InsertPoint savedInactiveFallthroughIP;

// If there's a fallthrough, we need to store the cleanup
// destination index. For fall-throughs this is always zero.
if (HasFallthrough) {
if (!HasPrebranchedFallthrough) {
assert(!cir::MissingFeatures::cleanupDestinationIndex());
}

// Otherwise, save and clear the IP if we don't have fallthrough
// because the cleanup is inactive.
} else if (FallthroughSource) {
assert(!IsActive && "source without fallthrough for active cleanup");
savedInactiveFallthroughIP = getBuilder().saveInsertionPoint();
}

// II. Emit the entry block. This implicitly branches to it if
// we have fallthrough. All the fixups and existing branches
// should already be branched to it.
builder.setInsertionPointToEnd(normalEntry);

// intercept normal cleanup to mark SEH scope end
if (IsEHa) {
llvm_unreachable("NYI");
}

// III. Figure out where we're going and build the cleanup
// epilogue.
bool HasEnclosingCleanups =
(Scope.getEnclosingNormalCleanup() != EHStack.stable_end());

// Compute the branch-through dest if we need it:
// - if there are branch-throughs threaded through the scope
// - if fall-through is a branch-through
// - if there are fixups that will be optimistically forwarded
// to the enclosing cleanup
mlir::Block *branchThroughDest = nullptr;
if (Scope.hasBranchThroughs() ||
(FallthroughSource && FallthroughIsBranchThrough) ||
(HasFixups && HasEnclosingCleanups)) {
llvm_unreachable("NYI");
}

mlir::Block *fallthroughDest = nullptr;

// If there's exactly one branch-after and no other threads,
// we can route it without a switch.
// Skip for SEH, since ExitSwitch is used to generate code to indicate
// abnormal termination. (SEH: Except _leave and fall-through at
// the end, all other exits in a _try (return/goto/continue/break)
// are considered as abnormal terminations, using NormalCleanupDestSlot
// to indicate abnormal termination)
if (!Scope.hasBranchThroughs() && !HasFixups && !HasFallthrough &&
!currentFunctionUsesSEHTry() && Scope.getNumBranchAfters() == 1) {
llvm_unreachable("NYI");
// Build a switch-out if we need it:
// - if there are branch-afters threaded through the scope
// - if fall-through is a branch-after
// - if there are fixups that have nowhere left to go and
// so must be immediately resolved
} else if (Scope.getNumBranchAfters() ||
(HasFallthrough && !FallthroughIsBranchThrough) ||
(HasFixups && !HasEnclosingCleanups)) {
assert(!cir::MissingFeatures::cleanupBranchAfterSwitch());
} else {
// We should always have a branch-through destination in this case.
assert(branchThroughDest);
assert(!cir::MissingFeatures::cleanupAlwaysBranchThrough());
}

// IV. Pop the cleanup and emit it.
Scope.markEmitted();
EHStack.popCleanup();
assert(EHStack.hasNormalCleanups() == HasEnclosingCleanups);

emitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag);

// Append the prepared cleanup prologue from above.
assert(!cir::MissingFeatures::cleanupAppendInsts());

// Optimistically hope that any fixups will continue falling through.
for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups(); I < E;
++I) {
llvm_unreachable("NYI");
}

// V. Set up the fallthrough edge out.

// Case 1: a fallthrough source exists but doesn't branch to the
// cleanup because the cleanup is inactive.
if (!HasFallthrough && FallthroughSource) {
// Prebranched fallthrough was forwarded earlier.
// Non-prebranched fallthrough doesn't need to be forwarded.
// Either way, all we need to do is restore the IP we cleared before.
assert(!IsActive);
llvm_unreachable("NYI");

// Case 2: a fallthrough source exists and should branch to the
// cleanup, but we're not supposed to branch through to the next
// cleanup.
} else if (HasFallthrough && fallthroughDest) {
llvm_unreachable("NYI");

// Case 3: a fallthrough source exists and should branch to the
// cleanup and then through to the next.
} else if (HasFallthrough) {
// Everything is already set up for this.

// Case 4: no fallthrough source exists.
} else {
// FIXME(cir): should we clear insertion point here?
}

// VI. Assorted cleaning.

// Check whether we can merge NormalEntry into a single predecessor.
// This might invalidate (non-IR) pointers to NormalEntry.
//
// If it did invalidate those pointers, and NormalEntry was the same
// as NormalExit, go back and patch up the fixups.
assert(!cir::MissingFeatures::simplifyCleanupEntry());
}
}

Expand Down
2 changes: 1 addition & 1 deletion clang/lib/CIR/CodeGen/CIRGenDecl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -916,7 +916,7 @@ template <class Derived> struct DestroyNRVOVariable : EHScopeStack::Cleanup {
QualType Ty;

void Emit(CIRGenFunction &CGF, Flags flags) override {
llvm_unreachable("NYI");
assert(!cir::MissingFeatures::cleanupDestroyNRVOVariable());
}

virtual ~DestroyNRVOVariable() = default;
Expand Down
72 changes: 57 additions & 15 deletions clang/lib/CIR/CodeGen/CIRGenFunction.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -357,15 +357,23 @@ void CIRGenFunction::LexicalScope::cleanup() {

// Cleanup are done right before codegen resume a scope. This is where
// objects are destroyed.
unsigned curLoc = 0;
SmallVector<mlir::Block *> retBlocks;
for (auto *retBlock : localScope->getRetBlocks()) {
mlir::OpBuilder::InsertionGuard guard(builder);
builder.setInsertionPointToEnd(retBlock);
mlir::Location retLoc = *localScope->getRetLocs()[curLoc];
curLoc++;
retBlocks.push_back(retBlock);
mlir::Location retLoc = localScope->getRetLoc(retBlock);
(void)emitReturn(retLoc);
}

auto removeUnusedRetBlocks = [&]() {
for (mlir::Block *retBlock : retBlocks) {
if (!retBlock->getUses().empty())
continue;
retBlock->erase();
}
};

auto insertCleanupAndLeave = [&](mlir::Block *InsPt) {
mlir::OpBuilder::InsertionGuard guard(builder);
builder.setInsertionPointToEnd(InsPt);
Expand All @@ -381,9 +389,34 @@ void CIRGenFunction::LexicalScope::cleanup() {
if (!cleanupBlock && localScope->getCleanupBlock(builder)) {
cleanupBlock = localScope->getCleanupBlock(builder);
builder.create<BrOp>(InsPt->back().getLoc(), cleanupBlock);
if (!cleanupBlock->mightHaveTerminator()) {
mlir::OpBuilder::InsertionGuard guard(builder);
builder.setInsertionPointToEnd(cleanupBlock);
builder.create<YieldOp>(localScope->EndLoc);
}
}

if (localScope->Depth == 0) {
// TODO(cir): get rid of all this special cases once cleanups are properly
// implemented.
// TODO(cir): most of this code should move into emitBranchThroughCleanup
if (localScope->getRetBlocks().size() == 1) {
mlir::Block *retBlock = localScope->getRetBlocks()[0];
mlir::Location loc = localScope->getRetLoc(retBlock);
if (retBlock->getUses().empty())
retBlock->erase();
else {
// Thread return block via cleanup block.
if (cleanupBlock) {
for (auto &blockUse : retBlock->getUses()) {
auto brOp = dyn_cast<cir::BrOp>(blockUse.getOwner());
brOp.setSuccessor(cleanupBlock);
}
}
builder.create<BrOp>(loc, retBlock);
return;
}
}
emitImplicitReturn();
return;
}
Expand Down Expand Up @@ -428,6 +461,7 @@ void CIRGenFunction::LexicalScope::cleanup() {
// get into this condition and emit the proper cleanup. This is
// needed to get nrvo to interop with dtor logic.
PerformCleanup = false;
removeUnusedRetBlocks();
return;
}

Expand Down Expand Up @@ -537,7 +571,7 @@ void CIRGenFunction::finishFunction(SourceLocation EndLoc) {
// the ret after it's been at EndLoc.
if (auto *DI = getDebugInfo())
assert(!cir::MissingFeatures::generateDebugInfo() && "NYI");
builder.clearInsertionPoint();
// FIXME(cir): should we clearInsertionPoint? breaks many testcases
PopCleanupBlocks(PrologueCleanupDepth);
}

Expand Down Expand Up @@ -686,7 +720,7 @@ cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, cir::FuncOp Fn,
assert(Fn.isDeclaration() && "Function already has body?");
mlir::Block *EntryBB = Fn.addEntryBlock();
builder.setInsertionPointToStart(EntryBB);

mlir::Block *maybeEmptyLastBlock = nullptr;
{
// Initialize lexical scope information.
LexicalScope lexScope{*this, fusedLoc, EntryBB};
Expand Down Expand Up @@ -736,18 +770,22 @@ cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, cir::FuncOp Fn,
llvm_unreachable("no definition for emitted function");

assert(builder.getInsertionBlock() && "Should be valid");
}
maybeEmptyLastBlock = builder.getInsertionBlock();

if (mlir::failed(Fn.verifyBody()))
return nullptr;
if (mlir::failed(Fn.verifyBody()))
return nullptr;

// Emit the standard function epilogue.
finishFunction(BodyRange.getEnd());
// Emit the standard function epilogue.
finishFunction(BodyRange.getEnd());

// If we haven't marked the function nothrow through other means, do a quick
// pass now to see if we can.
assert(!cir::MissingFeatures::tryMarkNoThrow());
// If we haven't marked the function nothrow through other means, do a quick
// pass now to see if we can.
assert(!cir::MissingFeatures::tryMarkNoThrow());
}

if (maybeEmptyLastBlock && maybeEmptyLastBlock->getUses().empty() &&
maybeEmptyLastBlock->empty())
maybeEmptyLastBlock->erase();
return Fn;
}

Expand Down Expand Up @@ -1171,9 +1209,13 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
if (FD && FD->isMain() && cir::MissingFeatures::zerocallusedregs())
llvm_unreachable("NYI");

mlir::Block *EntryBB = &Fn.getBlocks().front();
// CIRGen has its own logic for entry blocks, usually per operation region.
mlir::Block *retBlock = currLexScope->getOrCreateRetBlock(*this, getLoc(Loc));
// returnBlock handles per region getJumpDestInCurrentScope LLVM traditional
// codegen logic.
(void)returnBlock(retBlock);

// TODO: allocapt insertion? probably don't need for CIR
mlir::Block *EntryBB = &Fn.getBlocks().front();

if (cir::MissingFeatures::requiresReturnValueCheck())
llvm_unreachable("NYI");
Expand Down
Loading

0 comments on commit 9a63c50

Please sign in to comment.