diff --git a/.circleci/config.yml b/.circleci/config.yml
index 68f0ab765c..6eeefd1ed9 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -116,7 +116,6 @@ jobs:
- run:
name: Setup Go language
command: |
- brew update
brew install go@1.16
brew link go@1.16
# Check that homebrew installed the expected go version
diff --git a/cmd/evm/staterunner.go b/cmd/evm/staterunner.go
index c7953cbd3a..c0e9d1dbe9 100644
--- a/cmd/evm/staterunner.go
+++ b/cmd/evm/staterunner.go
@@ -95,7 +95,7 @@ func stateTestCmd(ctx *cli.Context) error {
for _, st := range test.Subtests() {
// Run the test and aggregate the result
result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true}
- state, err := test.Run(st, cfg, false)
+ _, state, err := test.Run(st, cfg, false)
// print state root for evmlab tracing
if ctx.GlobalBool(MachineFlag.Name) && state != nil {
fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%x\"}\n", state.IntermediateRoot(false))
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index da826001ab..08ea864fd1 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -214,9 +214,9 @@ var (
Usage: `Blockchain garbage collection mode ("full", "archive")`,
Value: "full",
}
- SnapshotFlag = cli.BoolFlag{
+ SnapshotFlag = cli.BoolTFlag{
Name: "snapshot",
- Usage: `Enables snapshot-database mode -- experimental work in progress feature`,
+ Usage: `Enables snapshot-database mode (default = enable)`,
}
LightKDFFlag = cli.BoolFlag{
Name: "lightkdf",
@@ -1693,7 +1693,8 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheSnapshotFlag.Name) {
cfg.SnapshotCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheSnapshotFlag.Name) / 100
}
- if !ctx.GlobalIsSet(SnapshotFlag.Name) {
+ if !ctx.GlobalBool(SnapshotFlag.Name) {
+ cfg.TrieCleanCache += cfg.SnapshotCache
cfg.SnapshotCache = 0 // Disabled
}
if ctx.GlobalIsSet(DocRootFlag.Name) {
@@ -1944,7 +1945,7 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai
TrieTimeLimit: eth.DefaultConfig.TrieTimeout,
SnapshotLimit: eth.DefaultConfig.SnapshotCache,
}
- if !ctx.GlobalIsSet(SnapshotFlag.Name) {
+ if !ctx.GlobalBool(SnapshotFlag.Name) {
cache.SnapshotLimit = 0 // Disabled
}
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheTrieFlag.Name) {
diff --git a/core/blockchain.go b/core/blockchain.go
index d1a0228943..1f95914d34 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -196,9 +196,10 @@ type BlockChain struct {
processor Processor // Block transaction processor interface
vmConfig vm.Config
- badBlocks *lru.Cache // Bad block cache
- shouldPreserve func(*types.Block) bool // Function used to determine whether should preserve the given block.
- terminateInsert func(common.Hash, uint64) bool // Testing hook used to terminate ancient receipt chain insertion.
+ badBlocks *lru.Cache // Bad block cache
+ shouldPreserve func(*types.Block) bool // Function used to determine whether should preserve the given block.
+ terminateInsert func(common.Hash, uint64) bool // Testing hook used to terminate ancient receipt chain insertion.
+ writeLegacyJournal bool // Testing flag used to flush the snapshot journal in legacy format.
}
// NewBlockChain returns a fully initialised block chain using information
@@ -262,9 +263,29 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
// Make sure the state associated with the block is available
head := bc.CurrentBlock()
if _, err := state.New(head.Root(), bc.stateCache, bc.snaps); err != nil {
- log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash())
- if err := bc.SetHead(head.NumberU64()); err != nil {
- return nil, err
+ // Head state is missing, before the state recovery, find out the
+ // disk layer point of snapshot(if it's enabled). Make sure the
+ // rewound point is lower than disk layer.
+ var diskRoot common.Hash
+ if bc.cacheConfig.SnapshotLimit > 0 {
+ diskRoot = rawdb.ReadSnapshotRoot(bc.db)
+ }
+ if diskRoot != (common.Hash{}) {
+ log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash(), "snaproot", diskRoot)
+
+ snapDisk, err := bc.SetHeadBeyondRoot(head.NumberU64(), diskRoot)
+ if err != nil {
+ return nil, err
+ }
+ // Chain rewound, persist old snapshot number to indicate recovery procedure
+ if snapDisk != 0 {
+ rawdb.WriteSnapshotRecoveryNumber(bc.db, snapDisk)
+ }
+ } else {
+ log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash())
+ if err := bc.SetHead(head.NumberU64()); err != nil {
+ return nil, err
+ }
}
}
// Ensure that a previous crash in SetHead doesn't leave extra ancients
@@ -320,7 +341,18 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
}
// Load any existing snapshot, regenerating it if loading failed
if bc.cacheConfig.SnapshotLimit > 0 {
- bc.snaps = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, bc.CurrentBlock().Root(), !bc.cacheConfig.SnapshotWait)
+ // If the chain was rewound past the snapshot persistent layer (causing
+ // a recovery block number to be persisted to disk), check if we're still
+ // in recovery mode and in that case, don't invalidate the snapshot on a
+ // head mismatch.
+ var recover bool
+
+ head := bc.CurrentBlock()
+ if layer := rawdb.ReadSnapshotRecoveryNumber(bc.db); layer != nil && *layer > head.NumberU64() {
+ log.Warn("Enabling snapshot recovery", "chainhead", head.NumberU64(), "diskbase", *layer)
+ recover = true
+ }
+ bc.snaps = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, head.Root(), !bc.cacheConfig.SnapshotWait, recover)
}
// Take ownership of this particular state
go bc.update()
@@ -416,9 +448,25 @@ func (bc *BlockChain) loadLastState() error {
// was fast synced or full synced and in which state, the method will try to
// delete minimal data from disk whilst retaining chain consistency.
func (bc *BlockChain) SetHead(head uint64) error {
+ _, err := bc.SetHeadBeyondRoot(head, common.Hash{})
+ return err
+}
+
+// SetHeadBeyondRoot rewinds the local chain to a new head with the extra condition
+// that the rewind must pass the specified state root. This method is meant to be
+// used when rewiding with snapshots enabled to ensure that we go back further than
+// persistent disk layer. Depending on whether the node was fast synced or full, and
+// in which state, the method will try to delete minimal data from disk whilst
+// retaining chain consistency.
+//
+// The method returns the block number where the requested root cap was found.
+func (bc *BlockChain) SetHeadBeyondRoot(head uint64, root common.Hash) (uint64, error) {
bc.chainmu.Lock()
defer bc.chainmu.Unlock()
+ // Track the block number of the requested root hash
+ var rootNumber uint64 // (no root == always 0)
+
// Retrieve the last pivot block to short circuit rollbacks beyond it and the
// current freezer limit to start nuking id underflown
pivot := rawdb.ReadLastPivotNumber(bc.db)
@@ -434,8 +482,16 @@ func (bc *BlockChain) SetHead(head uint64) error {
log.Error("Gap in the chain, rewinding to genesis", "number", header.Number, "hash", header.Hash())
newHeadBlock = bc.genesisBlock
} else {
- // Block exists, keep rewinding until we find one with state
+ // Block exists, keep rewinding until we find one with state,
+ // keeping rewinding until we exceed the optional threshold
+ // root hash
+ beyondRoot := (root == common.Hash{}) // Flag whether we're beyond the requested root (no root, always true)
+
for {
+ // If a root threshold was requested but not yet crossed, check
+ if root != (common.Hash{}) && !beyondRoot && newHeadBlock.Root() == root {
+ beyondRoot, rootNumber = true, newHeadBlock.NumberU64()
+ }
if _, err := state.New(newHeadBlock.Root(), bc.stateCache, bc.snaps); err != nil {
log.Info("Block state missing, rewinding further", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
if pivot == nil || newHeadBlock.NumberU64() > *pivot {
@@ -446,8 +502,12 @@ func (bc *BlockChain) SetHead(head uint64) error {
newHeadBlock = bc.genesisBlock
}
}
- log.Info("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
- break
+ if beyondRoot || newHeadBlock.NumberU64() == 0 {
+ log.Info("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
+ break
+ }
+ log.Info("Skipping block with threshold state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "root", newHeadBlock.Root())
+ newHeadBlock = bc.GetBlock(newHeadBlock.ParentHash(), newHeadBlock.NumberU64()-1) // Keep rewinding
}
}
rawdb.WriteHeadBlockHash(db, newHeadBlock.Hash())
@@ -528,7 +588,7 @@ func (bc *BlockChain) SetHead(head uint64) error {
bc.txLookupCache.Purge()
bc.futureBlocks.Purge()
- return bc.loadLastState()
+ return rootNumber, bc.loadLastState()
}
// FastSyncCommitHead sets the current head block to the one defined by the hash
@@ -571,6 +631,11 @@ func (bc *BlockChain) Snapshot() *snapshot.Tree {
return bc.snaps
}
+// Add this to solve conflicts due to cherry-picking
+func (bc *BlockChain) Snapshots() *snapshot.Tree {
+ return bc.Snapshot()
+}
+
// CurrentFastBlock retrieves the current fast-sync head block of the canonical
// chain. The block is retrieved from the blockchain's internal cache.
func (bc *BlockChain) CurrentFastBlock() *types.Block {
@@ -880,8 +945,14 @@ func (bc *BlockChain) Stop() {
var snapBase common.Hash
if bc.snaps != nil {
var err error
- if snapBase, err = bc.snaps.Journal(bc.CurrentBlock().Root()); err != nil {
- log.Error("Failed to journal state snapshot", "err", err)
+ if bc.writeLegacyJournal {
+ if snapBase, err = bc.snaps.LegacyJournal(bc.CurrentBlock().Root()); err != nil {
+ log.Error("Failed to journal state snapshot", "err", err)
+ }
+ } else {
+ if snapBase, err = bc.snaps.Journal(bc.CurrentBlock().Root()); err != nil {
+ log.Error("Failed to journal state snapshot", "err", err)
+ }
}
}
// Ensure the state of a recent block is also stored to disk before exiting.
diff --git a/core/blockchain_repair_test.go b/core/blockchain_repair_test.go
index ea2fbf61d4..6524a823be 100644
--- a/core/blockchain_repair_test.go
+++ b/core/blockchain_repair_test.go
@@ -25,6 +25,7 @@ import (
"io/ioutil"
"os"
"testing"
+ "time"
"github.com/celo-org/celo-blockchain/common"
mockEngine "github.com/celo-org/celo-blockchain/consensus/consensustest"
@@ -38,7 +39,10 @@ import (
// committed to disk and then the process crashed. In this case we expect the full
// chain to be rolled back to the committed block, but the chain data itself left
// in the database for replaying.
-func TestShortRepair(t *testing.T) {
+func TestShortRepair(t *testing.T) { testShortRepair(t, false) }
+func TestShortRepairWithSnapshots(t *testing.T) { testShortRepair(t, true) }
+
+func testShortRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
//
@@ -68,14 +72,17 @@ func TestShortRepair(t *testing.T) {
expHeadHeader: 8,
expHeadFastBlock: 8,
expHeadBlock: 4,
- })
+ }, snapshots)
}
// Tests a recovery for a short canonical chain where the fast sync pivot point was
// already committed, after which the process crashed. In this case we expect the full
// chain to be rolled back to the committed block, but the chain data itself left in
// the database for replaying.
-func TestShortFastSyncedRepair(t *testing.T) {
+func TestShortFastSyncedRepair(t *testing.T) { testShortFastSyncedRepair(t, false) }
+func TestShortFastSyncedRepairWithSnapshots(t *testing.T) { testShortFastSyncedRepair(t, true) }
+
+func testShortFastSyncedRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
//
@@ -105,14 +112,17 @@ func TestShortFastSyncedRepair(t *testing.T) {
expHeadHeader: 8,
expHeadFastBlock: 8,
expHeadBlock: 4,
- })
+ }, snapshots)
}
// Tests a recovery for a short canonical chain where the fast sync pivot point was
// not yet committed, but the process crashed. In this case we expect the chain to
// detect that it was fast syncing and not delete anything, since we can just pick
// up directly where we left off.
-func TestShortFastSyncingRepair(t *testing.T) {
+func TestShortFastSyncingRepair(t *testing.T) { testShortFastSyncingRepair(t, false) }
+func TestShortFastSyncingRepairWithSnapshots(t *testing.T) { testShortFastSyncingRepair(t, true) }
+
+func testShortFastSyncingRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
//
@@ -142,7 +152,7 @@ func TestShortFastSyncingRepair(t *testing.T) {
expHeadHeader: 8,
expHeadFastBlock: 8,
expHeadBlock: 0,
- })
+ }, snapshots)
}
// Tests a recovery for a short canonical chain and a shorter side chain, where a
@@ -150,7 +160,10 @@ func TestShortFastSyncingRepair(t *testing.T) {
// test scenario the side chain is below the committed block. In this case we expect
// the canonical chain to be rolled back to the committed block, but the chain data
// itself left in the database for replaying.
-func TestShortOldForkedRepair(t *testing.T) {
+func TestShortOldForkedRepair(t *testing.T) { testShortOldForkedRepair(t, false) }
+func TestShortOldForkedRepairWithSnapshots(t *testing.T) { testShortOldForkedRepair(t, true) }
+
+func testShortOldForkedRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
// └->S1->S2->S3
@@ -182,7 +195,7 @@ func TestShortOldForkedRepair(t *testing.T) {
expHeadHeader: 8,
expHeadFastBlock: 8,
expHeadBlock: 4,
- })
+ }, snapshots)
}
// Tests a recovery for a short canonical chain and a shorter side chain, where
@@ -191,6 +204,13 @@ func TestShortOldForkedRepair(t *testing.T) {
// this case we expect the canonical chain to be rolled back to the committed block,
// but the chain data itself left in the database for replaying.
func TestShortOldForkedFastSyncedRepair(t *testing.T) {
+ testShortOldForkedFastSyncedRepair(t, false)
+}
+func TestShortOldForkedFastSyncedRepairWithSnapshots(t *testing.T) {
+ testShortOldForkedFastSyncedRepair(t, true)
+}
+
+func testShortOldForkedFastSyncedRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
// └->S1->S2->S3
@@ -222,7 +242,7 @@ func TestShortOldForkedFastSyncedRepair(t *testing.T) {
expHeadHeader: 8,
expHeadFastBlock: 8,
expHeadBlock: 4,
- })
+ }, snapshots)
}
// Tests a recovery for a short canonical chain and a shorter side chain, where
@@ -231,6 +251,13 @@ func TestShortOldForkedFastSyncedRepair(t *testing.T) {
// the chain to detect that it was fast syncing and not delete anything, since we
// can just pick up directly where we left off.
func TestShortOldForkedFastSyncingRepair(t *testing.T) {
+ testShortOldForkedFastSyncingRepair(t, false)
+}
+func TestShortOldForkedFastSyncingRepairWithSnapshots(t *testing.T) {
+ testShortOldForkedFastSyncingRepair(t, true)
+}
+
+func testShortOldForkedFastSyncingRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
// └->S1->S2->S3
@@ -262,7 +289,7 @@ func TestShortOldForkedFastSyncingRepair(t *testing.T) {
expHeadHeader: 8,
expHeadFastBlock: 8,
expHeadBlock: 0,
- })
+ }, snapshots)
}
// Tests a recovery for a short canonical chain and a shorter side chain, where a
@@ -270,7 +297,10 @@ func TestShortOldForkedFastSyncingRepair(t *testing.T) {
// test scenario the side chain reaches above the committed block. In this case we
// expect the canonical chain to be rolled back to the committed block, but the
// chain data itself left in the database for replaying.
-func TestShortNewlyForkedRepair(t *testing.T) {
+func TestShortNewlyForkedRepair(t *testing.T) { testShortNewlyForkedRepair(t, false) }
+func TestShortNewlyForkedRepairWithSnapshots(t *testing.T) { testShortNewlyForkedRepair(t, true) }
+
+func testShortNewlyForkedRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
// └->S1->S2->S3->S4->S5->S6
@@ -302,7 +332,7 @@ func TestShortNewlyForkedRepair(t *testing.T) {
expHeadHeader: 8,
expHeadFastBlock: 8,
expHeadBlock: 4,
- })
+ }, snapshots)
}
// Tests a recovery for a short canonical chain and a shorter side chain, where
@@ -311,6 +341,13 @@ func TestShortNewlyForkedRepair(t *testing.T) {
// In this case we expect the canonical chain to be rolled back to the committed
// block, but the chain data itself left in the database for replaying.
func TestShortNewlyForkedFastSyncedRepair(t *testing.T) {
+ testShortNewlyForkedFastSyncedRepair(t, false)
+}
+func TestShortNewlyForkedFastSyncedRepairWithSnapshots(t *testing.T) {
+ testShortNewlyForkedFastSyncedRepair(t, true)
+}
+
+func testShortNewlyForkedFastSyncedRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
// └->S1->S2->S3->S4->S5->S6
@@ -342,7 +379,7 @@ func TestShortNewlyForkedFastSyncedRepair(t *testing.T) {
expHeadHeader: 8,
expHeadFastBlock: 8,
expHeadBlock: 4,
- })
+ }, snapshots)
}
// Tests a recovery for a short canonical chain and a shorter side chain, where
@@ -351,6 +388,13 @@ func TestShortNewlyForkedFastSyncedRepair(t *testing.T) {
// case we expect the chain to detect that it was fast syncing and not delete
// anything, since we can just pick up directly where we left off.
func TestShortNewlyForkedFastSyncingRepair(t *testing.T) {
+ testShortNewlyForkedFastSyncingRepair(t, false)
+}
+func TestShortNewlyForkedFastSyncingRepairWithSnapshots(t *testing.T) {
+ testShortNewlyForkedFastSyncingRepair(t, true)
+}
+
+func testShortNewlyForkedFastSyncingRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
// └->S1->S2->S3->S4->S5->S6
@@ -382,14 +426,17 @@ func TestShortNewlyForkedFastSyncingRepair(t *testing.T) {
expHeadHeader: 8,
expHeadFastBlock: 8,
expHeadBlock: 0,
- })
+ }, snapshots)
}
// Tests a recovery for a long canonical chain with frozen blocks where a recent
// block - newer than the ancient limit - was already committed to disk and then
// the process crashed. In this case we expect the chain to be rolled back to the
// committed block, with everything afterwads kept as fast sync data.
-func TestLongShallowRepair(t *testing.T) {
+func TestLongShallowRepair(t *testing.T) { testLongShallowRepair(t, false) }
+func TestLongShallowRepairWithSnapshots(t *testing.T) { testLongShallowRepair(t, true) }
+
+func testLongShallowRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
//
@@ -424,14 +471,17 @@ func TestLongShallowRepair(t *testing.T) {
expHeadHeader: 18,
expHeadFastBlock: 18,
expHeadBlock: 4,
- })
+ }, snapshots)
}
// Tests a recovery for a long canonical chain with frozen blocks where a recent
// block - older than the ancient limit - was already committed to disk and then
// the process crashed. In this case we expect the chain to be rolled back to the
// committed block, with everything afterwads deleted.
-func TestLongDeepRepair(t *testing.T) {
+func TestLongDeepRepair(t *testing.T) { testLongDeepRepair(t, false) }
+func TestLongDeepRepairWithSnapshots(t *testing.T) { testLongDeepRepair(t, true) }
+
+func testLongDeepRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
//
@@ -465,7 +515,7 @@ func TestLongDeepRepair(t *testing.T) {
expHeadHeader: 4,
expHeadFastBlock: 4,
expHeadBlock: 4,
- })
+ }, snapshots)
}
// Tests a recovery for a long canonical chain with frozen blocks where the fast
@@ -473,6 +523,13 @@ func TestLongDeepRepair(t *testing.T) {
// which the process crashed. In this case we expect the chain to be rolled back
// to the committed block, with everything afterwads kept as fast sync data.
func TestLongFastSyncedShallowRepair(t *testing.T) {
+ testLongFastSyncedShallowRepair(t, false)
+}
+func TestLongFastSyncedShallowRepairWithSnapshots(t *testing.T) {
+ testLongFastSyncedShallowRepair(t, true)
+}
+
+func testLongFastSyncedShallowRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
//
@@ -507,14 +564,17 @@ func TestLongFastSyncedShallowRepair(t *testing.T) {
expHeadHeader: 18,
expHeadFastBlock: 18,
expHeadBlock: 4,
- })
+ }, snapshots)
}
// Tests a recovery for a long canonical chain with frozen blocks where the fast
// sync pivot point - older than the ancient limit - was already committed, after
// which the process crashed. In this case we expect the chain to be rolled back
// to the committed block, with everything afterwads deleted.
-func TestLongFastSyncedDeepRepair(t *testing.T) {
+func TestLongFastSyncedDeepRepair(t *testing.T) { testLongFastSyncedDeepRepair(t, false) }
+func TestLongFastSyncedDeepRepairWithSnapshots(t *testing.T) { testLongFastSyncedDeepRepair(t, true) }
+
+func testLongFastSyncedDeepRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
//
@@ -548,7 +608,7 @@ func TestLongFastSyncedDeepRepair(t *testing.T) {
expHeadHeader: 4,
expHeadFastBlock: 4,
expHeadBlock: 4,
- })
+ }, snapshots)
}
// Tests a recovery for a long canonical chain with frozen blocks where the fast
@@ -557,6 +617,13 @@ func TestLongFastSyncedDeepRepair(t *testing.T) {
// syncing and not delete anything, since we can just pick up directly where we
// left off.
func TestLongFastSyncingShallowRepair(t *testing.T) {
+ testLongFastSyncingShallowRepair(t, false)
+}
+func TestLongFastSyncingShallowRepairWithSnapshots(t *testing.T) {
+ testLongFastSyncingShallowRepair(t, true)
+}
+
+func testLongFastSyncingShallowRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
//
@@ -591,7 +658,7 @@ func TestLongFastSyncingShallowRepair(t *testing.T) {
expHeadHeader: 18,
expHeadFastBlock: 18,
expHeadBlock: 0,
- })
+ }, snapshots)
}
// Tests a recovery for a long canonical chain with frozen blocks where the fast
@@ -599,7 +666,10 @@ func TestLongFastSyncingShallowRepair(t *testing.T) {
// process crashed. In this case we expect the chain to detect that it was fast
// syncing and not delete anything, since we can just pick up directly where we
// left off.
-func TestLongFastSyncingDeepRepair(t *testing.T) {
+func TestLongFastSyncingDeepRepair(t *testing.T) { testLongFastSyncingDeepRepair(t, false) }
+func TestLongFastSyncingDeepRepairWithSnapshots(t *testing.T) { testLongFastSyncingDeepRepair(t, true) }
+
+func testLongFastSyncingDeepRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
//
@@ -634,7 +704,7 @@ func TestLongFastSyncingDeepRepair(t *testing.T) {
expHeadHeader: 24,
expHeadFastBlock: 24,
expHeadBlock: 0,
- })
+ }, snapshots)
}
// Tests a recovery for a long canonical chain with frozen blocks and a shorter
@@ -644,6 +714,13 @@ func TestLongFastSyncingDeepRepair(t *testing.T) {
// rolled back to the committed block, with everything afterwads kept as fast
// sync data; the side chain completely nuked by the freezer.
func TestLongOldForkedShallowRepair(t *testing.T) {
+ testLongOldForkedShallowRepair(t, false)
+}
+func TestLongOldForkedShallowRepairWithSnapshots(t *testing.T) {
+ testLongOldForkedShallowRepair(t, true)
+}
+
+func testLongOldForkedShallowRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
// └->S1->S2->S3
@@ -679,7 +756,7 @@ func TestLongOldForkedShallowRepair(t *testing.T) {
expHeadHeader: 18,
expHeadFastBlock: 18,
expHeadBlock: 4,
- })
+ }, snapshots)
}
// Tests a recovery for a long canonical chain with frozen blocks and a shorter
@@ -688,7 +765,10 @@ func TestLongOldForkedShallowRepair(t *testing.T) {
// chain is below the committed block. In this case we expect the canonical chain
// to be rolled back to the committed block, with everything afterwads deleted;
// the side chain completely nuked by the freezer.
-func TestLongOldForkedDeepRepair(t *testing.T) {
+func TestLongOldForkedDeepRepair(t *testing.T) { testLongOldForkedDeepRepair(t, false) }
+func TestLongOldForkedDeepRepairWithSnapshots(t *testing.T) { testLongOldForkedDeepRepair(t, true) }
+
+func testLongOldForkedDeepRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
// └->S1->S2->S3
@@ -723,7 +803,7 @@ func TestLongOldForkedDeepRepair(t *testing.T) {
expHeadHeader: 4,
expHeadFastBlock: 4,
expHeadBlock: 4,
- })
+ }, snapshots)
}
// Tests a recovery for a long canonical chain with frozen blocks and a shorter
@@ -733,6 +813,13 @@ func TestLongOldForkedDeepRepair(t *testing.T) {
// to be rolled back to the committed block, with everything afterwads kept as
// fast sync data; the side chain completely nuked by the freezer.
func TestLongOldForkedFastSyncedShallowRepair(t *testing.T) {
+ testLongOldForkedFastSyncedShallowRepair(t, false)
+}
+func TestLongOldForkedFastSyncedShallowRepairWithSnapshots(t *testing.T) {
+ testLongOldForkedFastSyncedShallowRepair(t, true)
+}
+
+func testLongOldForkedFastSyncedShallowRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
// └->S1->S2->S3
@@ -768,7 +855,7 @@ func TestLongOldForkedFastSyncedShallowRepair(t *testing.T) {
expHeadHeader: 18,
expHeadFastBlock: 18,
expHeadBlock: 4,
- })
+ }, snapshots)
}
// Tests a recovery for a long canonical chain with frozen blocks and a shorter
@@ -778,6 +865,13 @@ func TestLongOldForkedFastSyncedShallowRepair(t *testing.T) {
// chain to be rolled back to the committed block, with everything afterwads deleted;
// the side chain completely nuked by the freezer.
func TestLongOldForkedFastSyncedDeepRepair(t *testing.T) {
+ testLongOldForkedFastSyncedDeepRepair(t, false)
+}
+func TestLongOldForkedFastSyncedDeepRepairWithSnapshots(t *testing.T) {
+ testLongOldForkedFastSyncedDeepRepair(t, true)
+}
+
+func testLongOldForkedFastSyncedDeepRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
// └->S1->S2->S3
@@ -812,7 +906,7 @@ func TestLongOldForkedFastSyncedDeepRepair(t *testing.T) {
expHeadHeader: 4,
expHeadFastBlock: 4,
expHeadBlock: 4,
- })
+ }, snapshots)
}
// Tests a recovery for a long canonical chain with frozen blocks and a shorter
@@ -822,6 +916,13 @@ func TestLongOldForkedFastSyncedDeepRepair(t *testing.T) {
// that it was fast syncing and not delete anything. The side chain is completely
// nuked by the freezer.
func TestLongOldForkedFastSyncingShallowRepair(t *testing.T) {
+ testLongOldForkedFastSyncingShallowRepair(t, false)
+}
+func TestLongOldForkedFastSyncingShallowRepairWithSnapshots(t *testing.T) {
+ testLongOldForkedFastSyncingShallowRepair(t, true)
+}
+
+func testLongOldForkedFastSyncingShallowRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
// └->S1->S2->S3
@@ -857,7 +958,7 @@ func TestLongOldForkedFastSyncingShallowRepair(t *testing.T) {
expHeadHeader: 18,
expHeadFastBlock: 18,
expHeadBlock: 0,
- })
+ }, snapshots)
}
// Tests a recovery for a long canonical chain with frozen blocks and a shorter
@@ -867,6 +968,13 @@ func TestLongOldForkedFastSyncingShallowRepair(t *testing.T) {
// that it was fast syncing and not delete anything. The side chain is completely
// nuked by the freezer.
func TestLongOldForkedFastSyncingDeepRepair(t *testing.T) {
+ testLongOldForkedFastSyncingDeepRepair(t, false)
+}
+func TestLongOldForkedFastSyncingDeepRepairWithSnapshots(t *testing.T) {
+ testLongOldForkedFastSyncingDeepRepair(t, true)
+}
+
+func testLongOldForkedFastSyncingDeepRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
// └->S1->S2->S3
@@ -902,7 +1010,7 @@ func TestLongOldForkedFastSyncingDeepRepair(t *testing.T) {
expHeadHeader: 24,
expHeadFastBlock: 24,
expHeadBlock: 0,
- })
+ }, snapshots)
}
// Tests a recovery for a long canonical chain with frozen blocks and a shorter
@@ -912,6 +1020,13 @@ func TestLongOldForkedFastSyncingDeepRepair(t *testing.T) {
// rolled back to the committed block, with everything afterwads kept as fast
// sync data; the side chain completely nuked by the freezer.
func TestLongNewerForkedShallowRepair(t *testing.T) {
+ testLongNewerForkedShallowRepair(t, false)
+}
+func TestLongNewerForkedShallowRepairWithSnapshots(t *testing.T) {
+ testLongNewerForkedShallowRepair(t, true)
+}
+
+func testLongNewerForkedShallowRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -947,7 +1062,7 @@ func TestLongNewerForkedShallowRepair(t *testing.T) {
expHeadHeader: 18,
expHeadFastBlock: 18,
expHeadBlock: 4,
- })
+ }, snapshots)
}
// Tests a recovery for a long canonical chain with frozen blocks and a shorter
@@ -956,7 +1071,10 @@ func TestLongNewerForkedShallowRepair(t *testing.T) {
// chain is above the committed block. In this case we expect the canonical chain
// to be rolled back to the committed block, with everything afterwads deleted;
// the side chain completely nuked by the freezer.
-func TestLongNewerForkedDeepRepair(t *testing.T) {
+func TestLongNewerForkedDeepRepair(t *testing.T) { testLongNewerForkedDeepRepair(t, false) }
+func TestLongNewerForkedDeepRepairWithSnapshots(t *testing.T) { testLongNewerForkedDeepRepair(t, true) }
+
+func testLongNewerForkedDeepRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -991,7 +1109,7 @@ func TestLongNewerForkedDeepRepair(t *testing.T) {
expHeadHeader: 4,
expHeadFastBlock: 4,
expHeadBlock: 4,
- })
+ }, snapshots)
}
// Tests a recovery for a long canonical chain with frozen blocks and a shorter
@@ -1001,6 +1119,13 @@ func TestLongNewerForkedDeepRepair(t *testing.T) {
// to be rolled back to the committed block, with everything afterwads kept as fast
// sync data; the side chain completely nuked by the freezer.
func TestLongNewerForkedFastSyncedShallowRepair(t *testing.T) {
+ testLongNewerForkedFastSyncedShallowRepair(t, false)
+}
+func TestLongNewerForkedFastSyncedShallowRepairWithSnapshots(t *testing.T) {
+ testLongNewerForkedFastSyncedShallowRepair(t, true)
+}
+
+func testLongNewerForkedFastSyncedShallowRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -1036,7 +1161,7 @@ func TestLongNewerForkedFastSyncedShallowRepair(t *testing.T) {
expHeadHeader: 18,
expHeadFastBlock: 18,
expHeadBlock: 4,
- })
+ }, snapshots)
}
// Tests a recovery for a long canonical chain with frozen blocks and a shorter
@@ -1046,6 +1171,13 @@ func TestLongNewerForkedFastSyncedShallowRepair(t *testing.T) {
// chain to be rolled back to the committed block, with everything afterwads deleted;
// the side chain completely nuked by the freezer.
func TestLongNewerForkedFastSyncedDeepRepair(t *testing.T) {
+ testLongNewerForkedFastSyncedDeepRepair(t, false)
+}
+func TestLongNewerForkedFastSyncedDeepRepairWithSnapshots(t *testing.T) {
+ testLongNewerForkedFastSyncedDeepRepair(t, true)
+}
+
+func testLongNewerForkedFastSyncedDeepRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -1080,7 +1212,7 @@ func TestLongNewerForkedFastSyncedDeepRepair(t *testing.T) {
expHeadHeader: 4,
expHeadFastBlock: 4,
expHeadBlock: 4,
- })
+ }, snapshots)
}
// Tests a recovery for a long canonical chain with frozen blocks and a shorter
@@ -1090,6 +1222,13 @@ func TestLongNewerForkedFastSyncedDeepRepair(t *testing.T) {
// that it was fast syncing and not delete anything. The side chain is completely
// nuked by the freezer.
func TestLongNewerForkedFastSyncingShallowRepair(t *testing.T) {
+ testLongNewerForkedFastSyncingShallowRepair(t, false)
+}
+func TestLongNewerForkedFastSyncingShallowRepairWithSnapshots(t *testing.T) {
+ testLongNewerForkedFastSyncingShallowRepair(t, true)
+}
+
+func testLongNewerForkedFastSyncingShallowRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -1125,7 +1264,7 @@ func TestLongNewerForkedFastSyncingShallowRepair(t *testing.T) {
expHeadHeader: 18,
expHeadFastBlock: 18,
expHeadBlock: 0,
- })
+ }, snapshots)
}
// Tests a recovery for a long canonical chain with frozen blocks and a shorter
@@ -1135,6 +1274,13 @@ func TestLongNewerForkedFastSyncingShallowRepair(t *testing.T) {
// that it was fast syncing and not delete anything. The side chain is completely
// nuked by the freezer.
func TestLongNewerForkedFastSyncingDeepRepair(t *testing.T) {
+ testLongNewerForkedFastSyncingDeepRepair(t, false)
+}
+func TestLongNewerForkedFastSyncingDeepRepairWithSnapshots(t *testing.T) {
+ testLongNewerForkedFastSyncingDeepRepair(t, true)
+}
+
+func testLongNewerForkedFastSyncingDeepRepair(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -1170,13 +1316,13 @@ func TestLongNewerForkedFastSyncingDeepRepair(t *testing.T) {
expHeadHeader: 24,
expHeadFastBlock: 24,
expHeadBlock: 0,
- })
+ }, snapshots)
}
-func testRepair(t *testing.T, tt *rewindTest) {
+func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
// It's hard to follow the test case, visualize the input
//log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
- //fmt.Println(tt.dump(true))
+ // fmt.Println(tt.dump(true))
// Create a temporary persistent database
datadir, err := ioutil.TempDir("", "")
@@ -1195,8 +1341,18 @@ func testRepair(t *testing.T, tt *rewindTest) {
var (
genesis = new(Genesis).MustCommit(db)
engine = mockEngine.NewFaker()
+ config = &CacheConfig{
+ TrieCleanLimit: 256,
+ TrieDirtyLimit: 256,
+ TrieTimeLimit: 5 * time.Minute,
+ SnapshotLimit: 0, // Disable snapshot by default
+ }
)
- chain, err := NewBlockChain(db, nil, params.IstanbulTestChainConfig, engine, vm.Config{}, nil)
+ if snapshots {
+ config.SnapshotLimit = 256
+ config.SnapshotWait = true
+ }
+ chain, err := NewBlockChain(db, config, params.IstanbulTestChainConfig, engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("Failed to create chain: %v", err)
}
@@ -1219,6 +1375,11 @@ func testRepair(t *testing.T, tt *rewindTest) {
}
if tt.commitBlock > 0 {
chain.stateCache.TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), true)
+ if snapshots {
+ if err := chain.snaps.Cap(canonblocks[tt.commitBlock-1].Root(), 0); err != nil {
+ t.Fatalf("Failed to flatten snapshots: %v", err)
+ }
+ }
}
if _, err := chain.InsertChain(canonblocks[tt.commitBlock:]); err != nil {
t.Fatalf("Failed to import canonical chain tail: %v", err)
diff --git a/core/blockchain_sethead_test.go b/core/blockchain_sethead_test.go
index e056c447b9..b4f90898ed 100644
--- a/core/blockchain_sethead_test.go
+++ b/core/blockchain_sethead_test.go
@@ -25,6 +25,7 @@ import (
"os"
"strings"
"testing"
+ "time"
"github.com/celo-org/celo-blockchain/common"
mockEngine "github.com/celo-org/celo-blockchain/consensus/consensustest"
@@ -149,7 +150,10 @@ func (tt *rewindTest) Dump(crash bool) string {
// chain to be rolled back to the committed block. Everything above the sethead
// point should be deleted. In between the committed block and the requested head
// the data can remain as "fast sync" data to avoid redownloading it.
-func TestShortSetHead(t *testing.T) {
+func TestShortSetHead(t *testing.T) { testShortSetHead(t, false) }
+func TestShortSetHeadWithSnapshots(t *testing.T) { testShortSetHead(t, true) }
+
+func testShortSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
//
@@ -180,7 +184,7 @@ func TestShortSetHead(t *testing.T) {
expHeadHeader: 7,
expHeadFastBlock: 7,
expHeadBlock: 4,
- })
+ }, snapshots)
}
// Tests a sethead for a short canonical chain where the fast sync pivot point was
@@ -189,7 +193,10 @@ func TestShortSetHead(t *testing.T) {
// Everything above the sethead point should be deleted. In between the committed
// block and the requested head the data can remain as "fast sync" data to avoid
// redownloading it.
-func TestShortFastSyncedSetHead(t *testing.T) {
+func TestShortFastSyncedSetHead(t *testing.T) { testShortFastSyncedSetHead(t, false) }
+func TestShortFastSyncedSetHeadWithSnapshots(t *testing.T) { testShortFastSyncedSetHead(t, true) }
+
+func testShortFastSyncedSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
//
@@ -220,7 +227,7 @@ func TestShortFastSyncedSetHead(t *testing.T) {
expHeadHeader: 7,
expHeadFastBlock: 7,
expHeadBlock: 4,
- })
+ }, snapshots)
}
// Tests a sethead for a short canonical chain where the fast sync pivot point was
@@ -228,7 +235,10 @@ func TestShortFastSyncedSetHead(t *testing.T) {
// detect that it was fast syncing and delete everything from the new head, since
// we can just pick up fast syncing from there. The head full block should be set
// to the genesis.
-func TestShortFastSyncingSetHead(t *testing.T) {
+func TestShortFastSyncingSetHead(t *testing.T) { testShortFastSyncingSetHead(t, false) }
+func TestShortFastSyncingSetHeadWithSnapshots(t *testing.T) { testShortFastSyncingSetHead(t, true) }
+
+func testShortFastSyncingSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
//
@@ -259,7 +269,7 @@ func TestShortFastSyncingSetHead(t *testing.T) {
expHeadHeader: 7,
expHeadFastBlock: 7,
expHeadBlock: 0,
- })
+ }, snapshots)
}
// Tests a sethead for a short canonical chain and a shorter side chain, where a
@@ -269,7 +279,10 @@ func TestShortFastSyncingSetHead(t *testing.T) {
// above the sethead point should be deleted. In between the committed block and
// the requested head the data can remain as "fast sync" data to avoid redownloading
// it. The side chain should be left alone as it was shorter.
-func TestShortOldForkedSetHead(t *testing.T) {
+func TestShortOldForkedSetHead(t *testing.T) { testShortOldForkedSetHead(t, false) }
+func TestShortOldForkedSetHeadWithSnapshots(t *testing.T) { testShortOldForkedSetHead(t, true) }
+
+func testShortOldForkedSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
// └->S1->S2->S3
@@ -302,7 +315,7 @@ func TestShortOldForkedSetHead(t *testing.T) {
expHeadHeader: 7,
expHeadFastBlock: 7,
expHeadBlock: 4,
- })
+ }, snapshots)
}
// Tests a sethead for a short canonical chain and a shorter side chain, where
@@ -313,6 +326,13 @@ func TestShortOldForkedSetHead(t *testing.T) {
// committed block and the requested head the data can remain as "fast sync" data
// to avoid redownloading it. The side chain should be left alone as it was shorter.
func TestShortOldForkedFastSyncedSetHead(t *testing.T) {
+ testShortOldForkedFastSyncedSetHead(t, false)
+}
+func TestShortOldForkedFastSyncedSetHeadWithSnapshots(t *testing.T) {
+ testShortOldForkedFastSyncedSetHead(t, true)
+}
+
+func testShortOldForkedFastSyncedSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
// └->S1->S2->S3
@@ -345,7 +365,7 @@ func TestShortOldForkedFastSyncedSetHead(t *testing.T) {
expHeadHeader: 7,
expHeadFastBlock: 7,
expHeadBlock: 4,
- })
+ }, snapshots)
}
// Tests a sethead for a short canonical chain and a shorter side chain, where
@@ -355,6 +375,13 @@ func TestShortOldForkedFastSyncedSetHead(t *testing.T) {
// head, since we can just pick up fast syncing from there. The head full block
// should be set to the genesis.
func TestShortOldForkedFastSyncingSetHead(t *testing.T) {
+ testShortOldForkedFastSyncingSetHead(t, false)
+}
+func TestShortOldForkedFastSyncingSetHeadWithSnapshots(t *testing.T) {
+ testShortOldForkedFastSyncingSetHead(t, true)
+}
+
+func testShortOldForkedFastSyncingSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
// └->S1->S2->S3
@@ -387,7 +414,7 @@ func TestShortOldForkedFastSyncingSetHead(t *testing.T) {
expHeadHeader: 7,
expHeadFastBlock: 7,
expHeadBlock: 0,
- })
+ }, snapshots)
}
// Tests a sethead for a short canonical chain and a shorter side chain, where a
@@ -401,7 +428,10 @@ func TestShortOldForkedFastSyncingSetHead(t *testing.T) {
// The side chain could be left to be if the fork point was before the new head
// we are deleting to, but it would be exceedingly hard to detect that case and
// properly handle it, so we'll trade extra work in exchange for simpler code.
-func TestShortNewlyForkedSetHead(t *testing.T) {
+func TestShortNewlyForkedSetHead(t *testing.T) { testShortNewlyForkedSetHead(t, false) }
+func TestShortNewlyForkedSetHeadWithSnapshots(t *testing.T) { testShortNewlyForkedSetHead(t, true) }
+
+func testShortNewlyForkedSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD)
// └->S1->S2->S3->S4->S5->S6->S7->S8
@@ -434,7 +464,7 @@ func TestShortNewlyForkedSetHead(t *testing.T) {
expHeadHeader: 7,
expHeadFastBlock: 7,
expHeadBlock: 4,
- })
+ }, snapshots)
}
// Tests a sethead for a short canonical chain and a shorter side chain, where
@@ -448,6 +478,13 @@ func TestShortNewlyForkedSetHead(t *testing.T) {
// we are deleting to, but it would be exceedingly hard to detect that case and
// properly handle it, so we'll trade extra work in exchange for simpler code.
func TestShortNewlyForkedFastSyncedSetHead(t *testing.T) {
+ testShortNewlyForkedFastSyncedSetHead(t, false)
+}
+func TestShortNewlyForkedFastSyncedSetHeadWithSnapshots(t *testing.T) {
+ testShortNewlyForkedFastSyncedSetHead(t, true)
+}
+
+func testShortNewlyForkedFastSyncedSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD)
// └->S1->S2->S3->S4->S5->S6->S7->S8
@@ -480,7 +517,7 @@ func TestShortNewlyForkedFastSyncedSetHead(t *testing.T) {
expHeadHeader: 7,
expHeadFastBlock: 7,
expHeadBlock: 4,
- })
+ }, snapshots)
}
// Tests a sethead for a short canonical chain and a shorter side chain, where
@@ -494,6 +531,13 @@ func TestShortNewlyForkedFastSyncedSetHead(t *testing.T) {
// we are deleting to, but it would be exceedingly hard to detect that case and
// properly handle it, so we'll trade extra work in exchange for simpler code.
func TestShortNewlyForkedFastSyncingSetHead(t *testing.T) {
+ testShortNewlyForkedFastSyncingSetHead(t, false)
+}
+func TestShortNewlyForkedFastSyncingSetHeadWithSnapshots(t *testing.T) {
+ testShortNewlyForkedFastSyncingSetHead(t, true)
+}
+
+func testShortNewlyForkedFastSyncingSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD)
// └->S1->S2->S3->S4->S5->S6->S7->S8
@@ -526,7 +570,7 @@ func TestShortNewlyForkedFastSyncingSetHead(t *testing.T) {
expHeadHeader: 7,
expHeadFastBlock: 7,
expHeadBlock: 0,
- })
+ }, snapshots)
}
// Tests a sethead for a long canonical chain with frozen blocks where a recent
@@ -535,7 +579,10 @@ func TestShortNewlyForkedFastSyncingSetHead(t *testing.T) {
// to the committed block. Everything above the sethead point should be deleted.
// In between the committed block and the requested head the data can remain as
// "fast sync" data to avoid redownloading it.
-func TestLongShallowSetHead(t *testing.T) {
+func TestLongShallowSetHead(t *testing.T) { testLongShallowSetHead(t, false) }
+func TestLongShallowSetHeadWithSnapshots(t *testing.T) { testLongShallowSetHead(t, true) }
+
+func testLongShallowSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
//
@@ -571,7 +618,7 @@ func TestLongShallowSetHead(t *testing.T) {
expHeadHeader: 6,
expHeadFastBlock: 6,
expHeadBlock: 4,
- })
+ }, snapshots)
}
// Tests a sethead for a long canonical chain with frozen blocks where a recent
@@ -579,7 +626,10 @@ func TestLongShallowSetHead(t *testing.T) {
// sethead was called. In this case we expect the full chain to be rolled back
// to the committed block. Since the ancient limit was underflown, everything
// needs to be deleted onwards to avoid creating a gap.
-func TestLongDeepSetHead(t *testing.T) {
+func TestLongDeepSetHead(t *testing.T) { testLongDeepSetHead(t, false) }
+func TestLongDeepSetHeadWithSnapshots(t *testing.T) { testLongDeepSetHead(t, true) }
+
+func testLongDeepSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
//
@@ -614,7 +664,7 @@ func TestLongDeepSetHead(t *testing.T) {
expHeadHeader: 4,
expHeadFastBlock: 4,
expHeadBlock: 4,
- })
+ }, snapshots)
}
// Tests a sethead for a long canonical chain with frozen blocks where the fast
@@ -624,6 +674,13 @@ func TestLongDeepSetHead(t *testing.T) {
// deleted. In between the committed block and the requested head the data can
// remain as "fast sync" data to avoid redownloading it.
func TestLongFastSyncedShallowSetHead(t *testing.T) {
+ testLongFastSyncedShallowSetHead(t, false)
+}
+func TestLongFastSyncedShallowSetHeadWithSnapshots(t *testing.T) {
+ testLongFastSyncedShallowSetHead(t, true)
+}
+
+func testLongFastSyncedShallowSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
//
@@ -659,7 +716,7 @@ func TestLongFastSyncedShallowSetHead(t *testing.T) {
expHeadHeader: 6,
expHeadFastBlock: 6,
expHeadBlock: 4,
- })
+ }, snapshots)
}
// Tests a sethead for a long canonical chain with frozen blocks where the fast
@@ -667,7 +724,10 @@ func TestLongFastSyncedShallowSetHead(t *testing.T) {
// which sethead was called. In this case we expect the full chain to be rolled
// back to the committed block. Since the ancient limit was underflown, everything
// needs to be deleted onwards to avoid creating a gap.
-func TestLongFastSyncedDeepSetHead(t *testing.T) {
+func TestLongFastSyncedDeepSetHead(t *testing.T) { testLongFastSyncedDeepSetHead(t, false) }
+func TestLongFastSyncedDeepSetHeadWithSnapshots(t *testing.T) { testLongFastSyncedDeepSetHead(t, true) }
+
+func testLongFastSyncedDeepSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
//
@@ -702,7 +762,7 @@ func TestLongFastSyncedDeepSetHead(t *testing.T) {
expHeadHeader: 4,
expHeadFastBlock: 4,
expHeadBlock: 4,
- })
+ }, snapshots)
}
// Tests a sethead for a long canonical chain with frozen blocks where the fast
@@ -711,6 +771,13 @@ func TestLongFastSyncedDeepSetHead(t *testing.T) {
// syncing and delete everything from the new head, since we can just pick up fast
// syncing from there.
func TestLongFastSyncingShallowSetHead(t *testing.T) {
+ testLongFastSyncingShallowSetHead(t, false)
+}
+func TestLongFastSyncingShallowSetHeadWithSnapshots(t *testing.T) {
+ testLongFastSyncingShallowSetHead(t, true)
+}
+
+func testLongFastSyncingShallowSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
//
@@ -746,7 +813,7 @@ func TestLongFastSyncingShallowSetHead(t *testing.T) {
expHeadHeader: 6,
expHeadFastBlock: 6,
expHeadBlock: 0,
- })
+ }, snapshots)
}
// Tests a sethead for a long canonical chain with frozen blocks where the fast
@@ -755,6 +822,13 @@ func TestLongFastSyncingShallowSetHead(t *testing.T) {
// syncing and delete everything from the new head, since we can just pick up fast
// syncing from there.
func TestLongFastSyncingDeepSetHead(t *testing.T) {
+ testLongFastSyncingDeepSetHead(t, false)
+}
+func TestLongFastSyncingDeepSetHeadWithSnapshots(t *testing.T) {
+ testLongFastSyncingDeepSetHead(t, true)
+}
+
+func testLongFastSyncingDeepSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
//
@@ -789,7 +863,7 @@ func TestLongFastSyncingDeepSetHead(t *testing.T) {
expHeadHeader: 6,
expHeadFastBlock: 6,
expHeadBlock: 0,
- })
+ }, snapshots)
}
// Tests a sethead for a long canonical chain with frozen blocks and a shorter side
@@ -800,6 +874,13 @@ func TestLongFastSyncingDeepSetHead(t *testing.T) {
// can remain as "fast sync" data to avoid redownloading it. The side chain is nuked
// by the freezer.
func TestLongOldForkedShallowSetHead(t *testing.T) {
+ testLongOldForkedShallowSetHead(t, false)
+}
+func TestLongOldForkedShallowSetHeadWithSnapshots(t *testing.T) {
+ testLongOldForkedShallowSetHead(t, true)
+}
+
+func testLongOldForkedShallowSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
// └->S1->S2->S3
@@ -836,7 +917,7 @@ func TestLongOldForkedShallowSetHead(t *testing.T) {
expHeadHeader: 6,
expHeadFastBlock: 6,
expHeadBlock: 4,
- })
+ }, snapshots)
}
// Tests a sethead for a long canonical chain with frozen blocks and a shorter side
@@ -845,7 +926,10 @@ func TestLongOldForkedShallowSetHead(t *testing.T) {
// chain to be rolled back to the committed block. Since the ancient limit was
// underflown, everything needs to be deleted onwards to avoid creating a gap. The
// side chain is nuked by the freezer.
-func TestLongOldForkedDeepSetHead(t *testing.T) {
+func TestLongOldForkedDeepSetHead(t *testing.T) { testLongOldForkedDeepSetHead(t, false) }
+func TestLongOldForkedDeepSetHeadWithSnapshots(t *testing.T) { testLongOldForkedDeepSetHead(t, true) }
+
+func testLongOldForkedDeepSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
// └->S1->S2->S3
@@ -881,7 +965,7 @@ func TestLongOldForkedDeepSetHead(t *testing.T) {
expHeadHeader: 4,
expHeadFastBlock: 4,
expHeadBlock: 4,
- })
+ }, snapshots)
}
// Tests a sethead for a long canonical chain with frozen blocks and a shorter
@@ -893,6 +977,13 @@ func TestLongOldForkedDeepSetHead(t *testing.T) {
// requested head the data can remain as "fast sync" data to avoid redownloading
// it. The side chain is nuked by the freezer.
func TestLongOldForkedFastSyncedShallowSetHead(t *testing.T) {
+ testLongOldForkedFastSyncedShallowSetHead(t, false)
+}
+func TestLongOldForkedFastSyncedShallowSetHeadWithSnapshots(t *testing.T) {
+ testLongOldForkedFastSyncedShallowSetHead(t, true)
+}
+
+func testLongOldForkedFastSyncedShallowSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
// └->S1->S2->S3
@@ -929,7 +1020,7 @@ func TestLongOldForkedFastSyncedShallowSetHead(t *testing.T) {
expHeadHeader: 6,
expHeadFastBlock: 6,
expHeadBlock: 4,
- })
+ }, snapshots)
}
// Tests a sethead for a long canonical chain with frozen blocks and a shorter
@@ -940,6 +1031,13 @@ func TestLongOldForkedFastSyncedShallowSetHead(t *testing.T) {
// underflown, everything needs to be deleted onwards to avoid creating a gap. The
// side chain is nuked by the freezer.
func TestLongOldForkedFastSyncedDeepSetHead(t *testing.T) {
+ testLongOldForkedFastSyncedDeepSetHead(t, false)
+}
+func TestLongOldForkedFastSyncedDeepSetHeadWithSnapshots(t *testing.T) {
+ testLongOldForkedFastSyncedDeepSetHead(t, true)
+}
+
+func testLongOldForkedFastSyncedDeepSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
// └->S1->S2->S3
@@ -975,7 +1073,7 @@ func TestLongOldForkedFastSyncedDeepSetHead(t *testing.T) {
expHeadHeader: 4,
expHeadFastBlock: 4,
expHeadBlock: 4,
- })
+ }, snapshots)
}
// Tests a sethead for a long canonical chain with frozen blocks and a shorter
@@ -986,6 +1084,13 @@ func TestLongOldForkedFastSyncedDeepSetHead(t *testing.T) {
// just pick up fast syncing from there. The side chain is completely nuked by the
// freezer.
func TestLongOldForkedFastSyncingShallowSetHead(t *testing.T) {
+ testLongOldForkedFastSyncingShallowSetHead(t, false)
+}
+func TestLongOldForkedFastSyncingShallowSetHeadWithSnapshots(t *testing.T) {
+ testLongOldForkedFastSyncingShallowSetHead(t, true)
+}
+
+func testLongOldForkedFastSyncingShallowSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
// └->S1->S2->S3
@@ -1022,7 +1127,7 @@ func TestLongOldForkedFastSyncingShallowSetHead(t *testing.T) {
expHeadHeader: 6,
expHeadFastBlock: 6,
expHeadBlock: 0,
- })
+ }, snapshots)
}
// Tests a sethead for a long canonical chain with frozen blocks and a shorter
@@ -1033,6 +1138,13 @@ func TestLongOldForkedFastSyncingShallowSetHead(t *testing.T) {
// just pick up fast syncing from there. The side chain is completely nuked by the
// freezer.
func TestLongOldForkedFastSyncingDeepSetHead(t *testing.T) {
+ testLongOldForkedFastSyncingDeepSetHead(t, false)
+}
+func TestLongOldForkedFastSyncingDeepSetHeadWithSnapshots(t *testing.T) {
+ testLongOldForkedFastSyncingDeepSetHead(t, true)
+}
+
+func testLongOldForkedFastSyncingDeepSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
// └->S1->S2->S3
@@ -1068,7 +1180,7 @@ func TestLongOldForkedFastSyncingDeepSetHead(t *testing.T) {
expHeadHeader: 6,
expHeadFastBlock: 6,
expHeadBlock: 0,
- })
+ }, snapshots)
}
// Tests a sethead for a long canonical chain with frozen blocks and a shorter
@@ -1077,6 +1189,13 @@ func TestLongOldForkedFastSyncingDeepSetHead(t *testing.T) {
// chain is above the committed block. In this case the freezer will delete the
// sidechain since it's dangling, reverting to TestLongShallowSetHead.
func TestLongNewerForkedShallowSetHead(t *testing.T) {
+ testLongNewerForkedShallowSetHead(t, false)
+}
+func TestLongNewerForkedShallowSetHeadWithSnapshots(t *testing.T) {
+ testLongNewerForkedShallowSetHead(t, true)
+}
+
+func testLongNewerForkedShallowSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -1113,7 +1232,7 @@ func TestLongNewerForkedShallowSetHead(t *testing.T) {
expHeadHeader: 6,
expHeadFastBlock: 6,
expHeadBlock: 4,
- })
+ }, snapshots)
}
// Tests a sethead for a long canonical chain with frozen blocks and a shorter
@@ -1122,6 +1241,13 @@ func TestLongNewerForkedShallowSetHead(t *testing.T) {
// chain is above the committed block. In this case the freezer will delete the
// sidechain since it's dangling, reverting to TestLongDeepSetHead.
func TestLongNewerForkedDeepSetHead(t *testing.T) {
+ testLongNewerForkedDeepSetHead(t, false)
+}
+func TestLongNewerForkedDeepSetHeadWithSnapshots(t *testing.T) {
+ testLongNewerForkedDeepSetHead(t, true)
+}
+
+func testLongNewerForkedDeepSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -1157,7 +1283,7 @@ func TestLongNewerForkedDeepSetHead(t *testing.T) {
expHeadHeader: 4,
expHeadFastBlock: 4,
expHeadBlock: 4,
- })
+ }, snapshots)
}
// Tests a sethead for a long canonical chain with frozen blocks and a shorter
@@ -1166,6 +1292,13 @@ func TestLongNewerForkedDeepSetHead(t *testing.T) {
// the side chain is above the committed block. In this case the freezer will delete
// the sidechain since it's dangling, reverting to TestLongFastSyncedShallowSetHead.
func TestLongNewerForkedFastSyncedShallowSetHead(t *testing.T) {
+ testLongNewerForkedFastSyncedShallowSetHead(t, false)
+}
+func TestLongNewerForkedFastSyncedShallowSetHeadWithSnapshots(t *testing.T) {
+ testLongNewerForkedFastSyncedShallowSetHead(t, true)
+}
+
+func testLongNewerForkedFastSyncedShallowSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -1202,7 +1335,7 @@ func TestLongNewerForkedFastSyncedShallowSetHead(t *testing.T) {
expHeadHeader: 6,
expHeadFastBlock: 6,
expHeadBlock: 4,
- })
+ }, snapshots)
}
// Tests a sethead for a long canonical chain with frozen blocks and a shorter
@@ -1211,6 +1344,13 @@ func TestLongNewerForkedFastSyncedShallowSetHead(t *testing.T) {
// the side chain is above the committed block. In this case the freezer will delete
// the sidechain since it's dangling, reverting to TestLongFastSyncedDeepSetHead.
func TestLongNewerForkedFastSyncedDeepSetHead(t *testing.T) {
+ testLongNewerForkedFastSyncedDeepSetHead(t, false)
+}
+func TestLongNewerForkedFastSyncedDeepSetHeadWithSnapshots(t *testing.T) {
+ testLongNewerForkedFastSyncedDeepSetHead(t, true)
+}
+
+func testLongNewerForkedFastSyncedDeepSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -1246,7 +1386,7 @@ func TestLongNewerForkedFastSyncedDeepSetHead(t *testing.T) {
expHeadHeader: 4,
expHeadFastBlock: 4,
expHeadBlock: 4,
- })
+ }, snapshots)
}
// Tests a sethead for a long canonical chain with frozen blocks and a shorter
@@ -1255,6 +1395,13 @@ func TestLongNewerForkedFastSyncedDeepSetHead(t *testing.T) {
// chain is above the committed block. In this case the freezer will delete the
// sidechain since it's dangling, reverting to TestLongFastSyncinghallowSetHead.
func TestLongNewerForkedFastSyncingShallowSetHead(t *testing.T) {
+ testLongNewerForkedFastSyncingShallowSetHead(t, false)
+}
+func TestLongNewerForkedFastSyncingShallowSetHeadWithSnapshots(t *testing.T) {
+ testLongNewerForkedFastSyncingShallowSetHead(t, true)
+}
+
+func testLongNewerForkedFastSyncingShallowSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -1291,7 +1438,7 @@ func TestLongNewerForkedFastSyncingShallowSetHead(t *testing.T) {
expHeadHeader: 6,
expHeadFastBlock: 6,
expHeadBlock: 0,
- })
+ }, snapshots)
}
// Tests a sethead for a long canonical chain with frozen blocks and a shorter
@@ -1300,6 +1447,13 @@ func TestLongNewerForkedFastSyncingShallowSetHead(t *testing.T) {
// chain is above the committed block. In this case the freezer will delete the
// sidechain since it's dangling, reverting to TestLongFastSyncingDeepSetHead.
func TestLongNewerForkedFastSyncingDeepSetHead(t *testing.T) {
+ testLongNewerForkedFastSyncingDeepSetHead(t, false)
+}
+func TestLongNewerForkedFastSyncingDeepSetHeadWithSnapshots(t *testing.T) {
+ testLongNewerForkedFastSyncingDeepSetHead(t, true)
+}
+
+func testLongNewerForkedFastSyncingDeepSetHead(t *testing.T, snapshots bool) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
// └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
@@ -1335,13 +1489,13 @@ func TestLongNewerForkedFastSyncingDeepSetHead(t *testing.T) {
expHeadHeader: 6,
expHeadFastBlock: 6,
expHeadBlock: 0,
- })
+ }, snapshots)
}
-func testSetHead(t *testing.T, tt *rewindTest) {
+func testSetHead(t *testing.T, tt *rewindTest, snapshots bool) {
// It's hard to follow the test case, visualize the input
- //log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
- //fmt.Println(tt.dump(false))
+ // log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
+ // fmt.Println(tt.dump(false))
// Create a temporary persistent database
datadir, err := ioutil.TempDir("", "")
@@ -1360,8 +1514,18 @@ func testSetHead(t *testing.T, tt *rewindTest) {
var (
genesis = new(Genesis).MustCommit(db)
engine = mockEngine.NewFaker()
+ config = &CacheConfig{
+ TrieCleanLimit: 256,
+ TrieDirtyLimit: 256,
+ TrieTimeLimit: 5 * time.Minute,
+ SnapshotLimit: 0, // Disable snapshot
+ }
)
- chain, err := NewBlockChain(db, nil, params.IstanbulTestChainConfig, engine, vm.Config{}, nil)
+ if snapshots {
+ config.SnapshotLimit = 256
+ config.SnapshotWait = true
+ }
+ chain, err := NewBlockChain(db, config, params.IstanbulTestChainConfig, engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("Failed to create chain: %v", err)
}
@@ -1384,6 +1548,11 @@ func testSetHead(t *testing.T, tt *rewindTest) {
}
if tt.commitBlock > 0 {
chain.stateCache.TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), true)
+ if snapshots {
+ if err := chain.snaps.Cap(canonblocks[tt.commitBlock-1].Root(), 0); err != nil {
+ t.Fatalf("Failed to flatten snapshots: %v", err)
+ }
+ }
}
if _, err := chain.InsertChain(canonblocks[tt.commitBlock:]); err != nil {
t.Fatalf("Failed to import canonical chain tail: %v", err)
diff --git a/core/blockchain_snapshot_test.go b/core/blockchain_snapshot_test.go
new file mode 100644
index 0000000000..21707ea37a
--- /dev/null
+++ b/core/blockchain_snapshot_test.go
@@ -0,0 +1,1024 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+// Tests that abnormal program termination (i.e.crash) and restart can recovery
+// the snapshot properly if the snapshot is enabled.
+
+package core
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/celo-org/celo-blockchain/consensus"
+ mockEngine "github.com/celo-org/celo-blockchain/consensus/consensustest"
+ "github.com/celo-org/celo-blockchain/core/rawdb"
+ "github.com/celo-org/celo-blockchain/core/state/snapshot"
+ "github.com/celo-org/celo-blockchain/core/types"
+ "github.com/celo-org/celo-blockchain/core/vm"
+ "github.com/celo-org/celo-blockchain/ethdb"
+ "github.com/celo-org/celo-blockchain/params"
+)
+
+// snapshotTestBasic wraps the common testing fields in the snapshot tests.
+type snapshotTestBasic struct {
+ legacy bool // Wether write the snapshot journal in legacy format
+ chainBlocks int // Number of blocks to generate for the canonical chain
+ snapshotBlock uint64 // Block number of the relevant snapshot disk layer
+ commitBlock uint64 // Block number for which to commit the state to disk
+
+ expCanonicalBlocks int // Number of canonical blocks expected to remain in the database (excl. genesis)
+ expHeadHeader uint64 // Block number of the expected head header
+ expHeadFastBlock uint64 // Block number of the expected head fast sync block
+ expHeadBlock uint64 // Block number of the expected head full block
+ expSnapshotBottom uint64 // The block height corresponding to the snapshot disk layer
+
+ // share fields, set in runtime
+ datadir string
+ db ethdb.Database
+ gendb ethdb.Database
+ engine consensus.Engine
+}
+
+func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Block) {
+ // Create a temporary persistent database
+ datadir, err := ioutil.TempDir("", "")
+ if err != nil {
+ t.Fatalf("Failed to create temporary datadir: %v", err)
+ }
+ os.RemoveAll(datadir)
+
+ db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "")
+ if err != nil {
+ t.Fatalf("Failed to create persistent database: %v", err)
+ }
+ // Initialize a fresh chain
+ var (
+ genesis = new(Genesis).MustCommit(db)
+ engine = mockEngine.NewFaker()
+ gendb = rawdb.NewMemoryDatabase()
+
+ // Snapshot is enabled, the first snapshot is created from the Genesis.
+ // The snapshot memory allowance is 256MB, it means no snapshot flush
+ // will happen during the block insertion.
+ cacheConfig = defaultCacheConfig
+ )
+ chain, err := NewBlockChain(db, cacheConfig, params.IstanbulTestChainConfig, engine, vm.Config{}, nil)
+ if err != nil {
+ t.Fatalf("Failed to create chain: %v", err)
+ }
+ blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, gendb, basic.chainBlocks, func(i int, b *BlockGen) {})
+
+ // Insert the blocks with configured settings.
+ var breakpoints []uint64
+ if basic.commitBlock > basic.snapshotBlock {
+ breakpoints = append(breakpoints, basic.snapshotBlock, basic.commitBlock)
+ } else {
+ breakpoints = append(breakpoints, basic.commitBlock, basic.snapshotBlock)
+ }
+ var startPoint uint64
+ for _, point := range breakpoints {
+ if _, err := chain.InsertChain(blocks[startPoint:point]); err != nil {
+ t.Fatalf("Failed to import canonical chain start: %v", err)
+ }
+ startPoint = point
+
+ if basic.commitBlock > 0 && basic.commitBlock == point {
+ chain.stateCache.TrieDB().Commit(blocks[point-1].Root(), true)
+ }
+ if basic.snapshotBlock > 0 && basic.snapshotBlock == point {
+ if basic.legacy {
+ // Here we commit the snapshot disk root to simulate
+ // committing the legacy snapshot.
+ rawdb.WriteSnapshotRoot(db, blocks[point-1].Root())
+ } else {
+ // Flushing the entire snap tree into the disk, the
+ // relavant (a) snapshot root and (b) snapshot generator
+ // will be persisted atomically.
+ chain.snaps.Cap(blocks[point-1].Root(), 0)
+ diskRoot, blockRoot := chain.snaps.DiskRoot(), blocks[point-1].Root()
+ if !bytes.Equal(diskRoot.Bytes(), blockRoot.Bytes()) {
+ t.Fatalf("Failed to flush disk layer change, want %x, got %x", blockRoot, diskRoot)
+ }
+ }
+ }
+ }
+ if _, err := chain.InsertChain(blocks[startPoint:]); err != nil {
+ t.Fatalf("Failed to import canonical chain tail: %v", err)
+ }
+
+ // Set runtime fields
+ basic.datadir = datadir
+ basic.db = db
+ basic.gendb = gendb
+ basic.engine = engine
+
+ // Ugly hack, notify the chain to flush the journal in legacy format
+ // if it's requested.
+ if basic.legacy {
+ chain.writeLegacyJournal = true
+ }
+ return chain, blocks
+}
+
+func (basic *snapshotTestBasic) verify(t *testing.T, chain *BlockChain, blocks []*types.Block) {
+ // Iterate over all the remaining blocks and ensure there are no gaps
+ verifyNoGaps(t, chain, true, blocks)
+ verifyCutoff(t, chain, true, blocks, basic.expCanonicalBlocks)
+
+ if head := chain.CurrentHeader(); head.Number.Uint64() != basic.expHeadHeader {
+ t.Errorf("Head header mismatch: have %d, want %d", head.Number, basic.expHeadHeader)
+ }
+ if head := chain.CurrentFastBlock(); head.NumberU64() != basic.expHeadFastBlock {
+ t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), basic.expHeadFastBlock)
+ }
+ if head := chain.CurrentBlock(); head.NumberU64() != basic.expHeadBlock {
+ t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), basic.expHeadBlock)
+ }
+
+ // Check the disk layer, ensure they are matched
+ block := chain.GetBlockByNumber(basic.expSnapshotBottom)
+ if block == nil {
+ t.Errorf("The correspnding block[%d] of snapshot disk layer is missing", basic.expSnapshotBottom)
+ } else if !bytes.Equal(chain.snaps.DiskRoot().Bytes(), block.Root().Bytes()) {
+ t.Errorf("The snapshot disk layer root is incorrect, want %x, get %x", block.Root(), chain.snaps.DiskRoot())
+ }
+
+ // Check the snapshot, ensure it's integrated
+ if err := snapshot.VerifyState(chain.snaps, block.Root()); err != nil {
+ t.Errorf("The disk layer is not integrated %v", err)
+ }
+}
+
+func (basic *snapshotTestBasic) Dump() string {
+ buffer := new(strings.Builder)
+
+ fmt.Fprint(buffer, "Chain:\n G")
+ for i := 0; i < basic.chainBlocks; i++ {
+ fmt.Fprintf(buffer, "->C%d", i+1)
+ }
+ fmt.Fprint(buffer, " (HEAD)\n\n")
+
+ fmt.Fprintf(buffer, "Commit: G")
+ if basic.commitBlock > 0 {
+ fmt.Fprintf(buffer, ", C%d", basic.commitBlock)
+ }
+ fmt.Fprint(buffer, "\n")
+
+ fmt.Fprintf(buffer, "Snapshot: G")
+ if basic.snapshotBlock > 0 {
+ fmt.Fprintf(buffer, ", C%d", basic.snapshotBlock)
+ }
+ fmt.Fprint(buffer, "\n")
+
+ //if crash {
+ // fmt.Fprintf(buffer, "\nCRASH\n\n")
+ //} else {
+ // fmt.Fprintf(buffer, "\nSetHead(%d)\n\n", basic.setHead)
+ //}
+ fmt.Fprintf(buffer, "------------------------------\n\n")
+
+ fmt.Fprint(buffer, "Expected in leveldb:\n G")
+ for i := 0; i < basic.expCanonicalBlocks; i++ {
+ fmt.Fprintf(buffer, "->C%d", i+1)
+ }
+ fmt.Fprintf(buffer, "\n\n")
+ fmt.Fprintf(buffer, "Expected head header : C%d\n", basic.expHeadHeader)
+ fmt.Fprintf(buffer, "Expected head fast block: C%d\n", basic.expHeadFastBlock)
+ if basic.expHeadBlock == 0 {
+ fmt.Fprintf(buffer, "Expected head block : G\n")
+ } else {
+ fmt.Fprintf(buffer, "Expected head block : C%d\n", basic.expHeadBlock)
+ }
+ if basic.expSnapshotBottom == 0 {
+ fmt.Fprintf(buffer, "Expected snapshot disk : G\n")
+ } else {
+ fmt.Fprintf(buffer, "Expected snapshot disk : C%d\n", basic.expSnapshotBottom)
+ }
+ return buffer.String()
+}
+
+func (basic *snapshotTestBasic) teardown() {
+ basic.db.Close()
+ basic.gendb.Close()
+ os.RemoveAll(basic.datadir)
+}
+
+// snapshotTest is a test case type for normal snapshot recovery.
+// It can be used for testing that restart Geth normally.
+type snapshotTest struct {
+ snapshotTestBasic
+}
+
+func (snaptest *snapshotTest) test(t *testing.T) {
+ // It's hard to follow the test case, visualize the input
+ // log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
+ // fmt.Println(tt.dump())
+ chain, blocks := snaptest.prepare(t)
+
+ // Restart the chain normally
+ chain.Stop()
+ newchain, err := NewBlockChain(snaptest.db, nil, params.IstanbulTestChainConfig, snaptest.engine, vm.Config{}, nil)
+ if err != nil {
+ t.Fatalf("Failed to recreate chain: %v", err)
+ }
+ defer newchain.Stop()
+
+ snaptest.verify(t, newchain, blocks)
+}
+
+// crashSnapshotTest is a test case type for innormal snapshot recovery.
+// It can be used for testing that restart Geth after the crash.
+type crashSnapshotTest struct {
+ snapshotTestBasic
+}
+
+func (snaptest *crashSnapshotTest) test(t *testing.T) {
+ // It's hard to follow the test case, visualize the input
+ // log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
+ // fmt.Println(tt.dump())
+ chain, blocks := snaptest.prepare(t)
+
+ // Pull the plug on the database, simulating a hard crash
+ db := chain.db
+ db.Close()
+
+ // Start a new blockchain back up and see where the repair leads us
+ newdb, err := rawdb.NewLevelDBDatabaseWithFreezer(snaptest.datadir, 0, 0, snaptest.datadir, "")
+ if err != nil {
+ t.Fatalf("Failed to reopen persistent database: %v", err)
+ }
+ defer newdb.Close()
+
+ // The interesting thing is: instead of starting the blockchain after
+ // the crash, we do restart twice here: one after the crash and one
+ // after the normal stop. It's used to ensure the broken snapshot
+ // can be detected all the time.
+ newchain, err := NewBlockChain(newdb, nil, params.IstanbulTestChainConfig, snaptest.engine, vm.Config{}, nil)
+ if err != nil {
+ t.Fatalf("Failed to recreate chain: %v", err)
+ }
+ newchain.Stop()
+
+ newchain, err = NewBlockChain(newdb, nil, params.IstanbulTestChainConfig, snaptest.engine, vm.Config{}, nil)
+ if err != nil {
+ t.Fatalf("Failed to recreate chain: %v", err)
+ }
+ defer newchain.Stop()
+
+ snaptest.verify(t, newchain, blocks)
+}
+
+// gappedSnapshotTest is a test type used to test this scenario:
+// - have a complete snapshot
+// - restart without enabling the snapshot
+// - insert a few blocks
+// - restart with enabling the snapshot again
+type gappedSnapshotTest struct {
+ snapshotTestBasic
+ gapped int // Number of blocks to insert without enabling snapshot
+}
+
+func (snaptest *gappedSnapshotTest) test(t *testing.T) {
+ // It's hard to follow the test case, visualize the input
+ // log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
+ // fmt.Println(tt.dump())
+ chain, blocks := snaptest.prepare(t)
+
+ // Insert blocks without enabling snapshot if gapping is required.
+ chain.Stop()
+ gappedBlocks, _ := GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], snaptest.engine, snaptest.gendb, snaptest.gapped, func(i int, b *BlockGen) {})
+
+ // Insert a few more blocks without enabling snapshot
+ var cacheConfig = &CacheConfig{
+ TrieCleanLimit: 256,
+ TrieDirtyLimit: 256,
+ TrieTimeLimit: 5 * time.Minute,
+ SnapshotLimit: 0,
+ }
+ newchain, err := NewBlockChain(snaptest.db, cacheConfig, params.IstanbulTestChainConfig, snaptest.engine, vm.Config{}, nil)
+ if err != nil {
+ t.Fatalf("Failed to recreate chain: %v", err)
+ }
+ newchain.InsertChain(gappedBlocks)
+ newchain.Stop()
+
+ // Restart the chain with enabling the snapshot
+ newchain, err = NewBlockChain(snaptest.db, nil, params.IstanbulTestChainConfig, snaptest.engine, vm.Config{}, nil)
+ if err != nil {
+ t.Fatalf("Failed to recreate chain: %v", err)
+ }
+ defer newchain.Stop()
+
+ snaptest.verify(t, newchain, blocks)
+}
+
+// setHeadSnapshotTest is the test type used to test this scenario:
+// - have a complete snapshot
+// - set the head to a lower point
+// - restart
+type setHeadSnapshotTest struct {
+ snapshotTestBasic
+ setHead uint64 // Block number to set head back to
+}
+
+func (snaptest *setHeadSnapshotTest) test(t *testing.T) {
+ // It's hard to follow the test case, visualize the input
+ // log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
+ // fmt.Println(tt.dump())
+ chain, blocks := snaptest.prepare(t)
+
+ // Rewind the chain if setHead operation is required.
+ chain.SetHead(snaptest.setHead)
+ chain.Stop()
+
+ newchain, err := NewBlockChain(snaptest.db, nil, params.IstanbulTestChainConfig, snaptest.engine, vm.Config{}, nil)
+ if err != nil {
+ t.Fatalf("Failed to recreate chain: %v", err)
+ }
+ defer newchain.Stop()
+
+ snaptest.verify(t, newchain, blocks)
+}
+
+// restartCrashSnapshotTest is the test type used to test this scenario:
+// - have a complete snapshot
+// - restart chain
+// - insert more blocks with enabling the snapshot
+// - commit the snapshot
+// - crash
+// - restart again
+type restartCrashSnapshotTest struct {
+ snapshotTestBasic
+ newBlocks int
+}
+
+func (snaptest *restartCrashSnapshotTest) test(t *testing.T) {
+ // It's hard to follow the test case, visualize the input
+ // log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
+ // fmt.Println(tt.dump())
+ chain, blocks := snaptest.prepare(t)
+
+ // Firstly, stop the chain properly, with all snapshot journal
+ // and state committed.
+ chain.Stop()
+
+ newchain, err := NewBlockChain(snaptest.db, nil, params.IstanbulTestChainConfig, snaptest.engine, vm.Config{}, nil)
+ if err != nil {
+ t.Fatalf("Failed to recreate chain: %v", err)
+ }
+ newBlocks, _ := GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], snaptest.engine, snaptest.gendb, snaptest.newBlocks, func(i int, b *BlockGen) {})
+ newchain.InsertChain(newBlocks)
+
+ // Commit the entire snapshot into the disk if requested. Note only
+ // (a) snapshot root and (b) snapshot generator will be committed,
+ // the diff journal is not.
+ newchain.Snapshots().Cap(newBlocks[len(newBlocks)-1].Root(), 0)
+
+ // Simulate the blockchain crash
+ // Don't call chain.Stop here, so that no snapshot
+ // journal and latest state will be committed
+
+ // Restart the chain after the crash
+ newchain, err = NewBlockChain(snaptest.db, nil, params.IstanbulTestChainConfig, snaptest.engine, vm.Config{}, nil)
+ if err != nil {
+ t.Fatalf("Failed to recreate chain: %v", err)
+ }
+ defer newchain.Stop()
+
+ snaptest.verify(t, newchain, blocks)
+}
+
+// wipeCrashSnapshotTest is the test type used to test this scenario:
+// - have a complete snapshot
+// - restart, insert more blocks without enabling the snapshot
+// - restart again with enabling the snapshot
+// - crash
+type wipeCrashSnapshotTest struct {
+ snapshotTestBasic
+ newBlocks int
+}
+
+func (snaptest *wipeCrashSnapshotTest) test(t *testing.T) {
+ // It's hard to follow the test case, visualize the input
+ // log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
+ // fmt.Println(tt.dump())
+ chain, blocks := snaptest.prepare(t)
+
+ // Firstly, stop the chain properly, with all snapshot journal
+ // and state committed.
+ chain.Stop()
+
+ config := &CacheConfig{
+ TrieCleanLimit: 256,
+ TrieDirtyLimit: 256,
+ TrieTimeLimit: 5 * time.Minute,
+ SnapshotLimit: 0,
+ }
+ newchain, err := NewBlockChain(snaptest.db, config, params.IstanbulTestChainConfig, snaptest.engine, vm.Config{}, nil)
+ if err != nil {
+ t.Fatalf("Failed to recreate chain: %v", err)
+ }
+ newBlocks, _ := GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], snaptest.engine, snaptest.gendb, snaptest.newBlocks, func(i int, b *BlockGen) {})
+ newchain.InsertChain(newBlocks)
+ newchain.Stop()
+
+ // Restart the chain, the wiper should starts working
+ config = &CacheConfig{
+ TrieCleanLimit: 256,
+ TrieDirtyLimit: 256,
+ TrieTimeLimit: 5 * time.Minute,
+ SnapshotLimit: 256,
+ SnapshotWait: false, // Don't wait rebuild
+ }
+ _, err = NewBlockChain(snaptest.db, config, params.IstanbulTestChainConfig, snaptest.engine, vm.Config{}, nil)
+ if err != nil {
+ t.Fatalf("Failed to recreate chain: %v", err)
+ }
+ // Simulate the blockchain crash.
+
+ newchain, err = NewBlockChain(snaptest.db, nil, params.IstanbulTestChainConfig, snaptest.engine, vm.Config{}, nil)
+ if err != nil {
+ t.Fatalf("Failed to recreate chain: %v", err)
+ }
+ snaptest.verify(t, newchain, blocks)
+}
+
+// Tests a Geth restart with valid snapshot. Before the shutdown, all snapshot
+// journal will be persisted correctly. In this case no snapshot recovery is
+// required.
+func TestRestartWithNewSnapshot(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ //
+ // Commit: G
+ // Snapshot: G
+ //
+ // SetHead(0)
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Expected head header : C8
+ // Expected head fast block: C8
+ // Expected head block : C8
+ // Expected snapshot disk : G
+ test := &snapshotTest{
+ snapshotTestBasic{
+ legacy: false,
+ chainBlocks: 8,
+ snapshotBlock: 0,
+ commitBlock: 0,
+ expCanonicalBlocks: 8,
+ expHeadHeader: 8,
+ expHeadFastBlock: 8,
+ expHeadBlock: 8,
+ expSnapshotBottom: 0, // Initial disk layer built from genesis
+ },
+ }
+ test.test(t)
+ test.teardown()
+}
+
+// Tests a Geth restart with valid but "legacy" snapshot. Before the shutdown,
+// all snapshot journal will be persisted correctly. In this case no snapshot
+// recovery is required.
+func TestRestartWithLegacySnapshot(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ //
+ // Commit: G
+ // Snapshot: G
+ //
+ // SetHead(0)
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Expected head header : C8
+ // Expected head fast block: C8
+ // Expected head block : C8
+ // Expected snapshot disk : G
+ t.Skip("Legacy format testing is not supported")
+ test := &snapshotTest{
+ snapshotTestBasic{
+ legacy: true,
+ chainBlocks: 8,
+ snapshotBlock: 0,
+ commitBlock: 0,
+ expCanonicalBlocks: 8,
+ expHeadHeader: 8,
+ expHeadFastBlock: 8,
+ expHeadBlock: 8,
+ expSnapshotBottom: 0, // Initial disk layer built from genesis
+ },
+ }
+ test.test(t)
+ test.teardown()
+}
+
+// Tests a Geth was crashed and restarts with a broken snapshot. In this case the
+// chain head should be rewound to the point with available state. And also the
+// new head should must be lower than disk layer. But there is no committed point
+// so the chain should be rewound to genesis and the disk layer should be left
+// for recovery.
+func TestNoCommitCrashWithNewSnapshot(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ //
+ // Commit: G
+ // Snapshot: G, C4
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Expected head header : C8
+ // Expected head fast block: C8
+ // Expected head block : G
+ // Expected snapshot disk : C4
+ test := &crashSnapshotTest{
+ snapshotTestBasic{
+ legacy: false,
+ chainBlocks: 8,
+ snapshotBlock: 4,
+ commitBlock: 0,
+ expCanonicalBlocks: 8,
+ expHeadHeader: 8,
+ expHeadFastBlock: 8,
+ expHeadBlock: 0,
+ expSnapshotBottom: 4, // Last committed disk layer, wait recovery
+ },
+ }
+ test.test(t)
+ test.teardown()
+}
+
+// Tests a Geth was crashed and restarts with a broken snapshot. In this case the
+// chain head should be rewound to the point with available state. And also the
+// new head should must be lower than disk layer. But there is only a low committed
+// point so the chain should be rewound to committed point and the disk layer
+// should be left for recovery.
+func TestLowCommitCrashWithNewSnapshot(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ //
+ // Commit: G, C2
+ // Snapshot: G, C4
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Expected head header : C8
+ // Expected head fast block: C8
+ // Expected head block : C2
+ // Expected snapshot disk : C4
+ test := &crashSnapshotTest{
+ snapshotTestBasic{
+ legacy: false,
+ chainBlocks: 8,
+ snapshotBlock: 4,
+ commitBlock: 2,
+ expCanonicalBlocks: 8,
+ expHeadHeader: 8,
+ expHeadFastBlock: 8,
+ expHeadBlock: 2,
+ expSnapshotBottom: 4, // Last committed disk layer, wait recovery
+ },
+ }
+ test.test(t)
+ test.teardown()
+}
+
+// Tests a Geth was crashed and restarts with a broken snapshot. In this case
+// the chain head should be rewound to the point with available state. And also
+// the new head should must be lower than disk layer. But there is only a high
+// committed point so the chain should be rewound to genesis and the disk layer
+// should be left for recovery.
+func TestHighCommitCrashWithNewSnapshot(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ //
+ // Commit: G, C6
+ // Snapshot: G, C4
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Expected head header : C8
+ // Expected head fast block: C8
+ // Expected head block : G
+ // Expected snapshot disk : C4
+ test := &crashSnapshotTest{
+ snapshotTestBasic{
+ legacy: false,
+ chainBlocks: 8,
+ snapshotBlock: 4,
+ commitBlock: 6,
+ expCanonicalBlocks: 8,
+ expHeadHeader: 8,
+ expHeadFastBlock: 8,
+ expHeadBlock: 0,
+ expSnapshotBottom: 4, // Last committed disk layer, wait recovery
+ },
+ }
+ test.test(t)
+ test.teardown()
+}
+
+// Tests a Geth was crashed and restarts with a broken and "legacy format"
+// snapshot. In this case the entire legacy snapshot should be discared
+// and rebuild from the new chain head. The new head here refers to the
+// genesis because there is no committed point.
+func TestNoCommitCrashWithLegacySnapshot(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ //
+ // Commit: G
+ // Snapshot: G, C4
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Expected head header : C8
+ // Expected head fast block: C8
+ // Expected head block : G
+ // Expected snapshot disk : G
+ t.Skip("Legacy format testing is not supported")
+ test := &crashSnapshotTest{
+ snapshotTestBasic{
+ legacy: true,
+ chainBlocks: 8,
+ snapshotBlock: 4,
+ commitBlock: 0,
+ expCanonicalBlocks: 8,
+ expHeadHeader: 8,
+ expHeadFastBlock: 8,
+ expHeadBlock: 0,
+ expSnapshotBottom: 0, // Rebuilt snapshot from the latest HEAD(genesis)
+ },
+ }
+ test.test(t)
+ test.teardown()
+}
+
+// Tests a Geth was crashed and restarts with a broken and "legacy format"
+// snapshot. In this case the entire legacy snapshot should be discared
+// and rebuild from the new chain head. The new head here refers to the
+// block-2 because it's committed into the disk.
+func TestLowCommitCrashWithLegacySnapshot(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ //
+ // Commit: G, C2
+ // Snapshot: G, C4
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Expected head header : C8
+ // Expected head fast block: C8
+ // Expected head block : C2
+ // Expected snapshot disk : C2
+ t.Skip("Legacy format testing is not supported")
+ test := &crashSnapshotTest{
+ snapshotTestBasic{
+ legacy: true,
+ chainBlocks: 8,
+ snapshotBlock: 4,
+ commitBlock: 2,
+ expCanonicalBlocks: 8,
+ expHeadHeader: 8,
+ expHeadFastBlock: 8,
+ expHeadBlock: 2,
+ expSnapshotBottom: 2, // Rebuilt snapshot from the latest HEAD
+ },
+ }
+ test.test(t)
+ test.teardown()
+}
+
+// Tests a Geth was crashed and restarts with a broken and "legacy format"
+// snapshot. In this case the entire legacy snapshot should be discared
+// and rebuild from the new chain head.
+//
+// The new head here refers to the the genesis, the reason is:
+// - the state of block-6 is committed into the disk
+// - the legacy disk layer of block-4 is committed into the disk
+// - the head is rewound the genesis in order to find an available
+// state lower than disk layer
+func TestHighCommitCrashWithLegacySnapshot(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ //
+ // Commit: G, C6
+ // Snapshot: G, C4
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Expected head header : C8
+ // Expected head fast block: C8
+ // Expected head block : G
+ // Expected snapshot disk : G
+ t.Skip("Legacy format testing is not supported")
+ test := &crashSnapshotTest{
+ snapshotTestBasic{
+ legacy: true,
+ chainBlocks: 8,
+ snapshotBlock: 4,
+ commitBlock: 6,
+ expCanonicalBlocks: 8,
+ expHeadHeader: 8,
+ expHeadFastBlock: 8,
+ expHeadBlock: 0,
+ expSnapshotBottom: 0, // Rebuilt snapshot from the latest HEAD(genesis)
+ },
+ }
+ test.test(t)
+ test.teardown()
+}
+
+// Tests a Geth was running with snapshot enabled. Then restarts without
+// enabling snapshot and after that re-enable the snapshot again. In this
+// case the snapshot should be rebuilt with latest chain head.
+func TestGappedNewSnapshot(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ //
+ // Commit: G
+ // Snapshot: G
+ //
+ // SetHead(0)
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10
+ //
+ // Expected head header : C10
+ // Expected head fast block: C10
+ // Expected head block : C10
+ // Expected snapshot disk : C10
+ test := &gappedSnapshotTest{
+ snapshotTestBasic: snapshotTestBasic{
+ legacy: false,
+ chainBlocks: 8,
+ snapshotBlock: 0,
+ commitBlock: 0,
+ expCanonicalBlocks: 10,
+ expHeadHeader: 10,
+ expHeadFastBlock: 10,
+ expHeadBlock: 10,
+ expSnapshotBottom: 10, // Rebuilt snapshot from the latest HEAD
+ },
+ gapped: 2,
+ }
+ test.test(t)
+ test.teardown()
+}
+
+// Tests a Geth was running with leagcy snapshot enabled. Then restarts
+// without enabling snapshot and after that re-enable the snapshot again.
+// In this case the snapshot should be rebuilt with latest chain head.
+func TestGappedLegacySnapshot(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ //
+ // Commit: G
+ // Snapshot: G
+ //
+ // SetHead(0)
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10
+ //
+ // Expected head header : C10
+ // Expected head fast block: C10
+ // Expected head block : C10
+ // Expected snapshot disk : C10
+ t.Skip("Legacy format testing is not supported")
+ test := &gappedSnapshotTest{
+ snapshotTestBasic: snapshotTestBasic{
+ legacy: true,
+ chainBlocks: 8,
+ snapshotBlock: 0,
+ commitBlock: 0,
+ expCanonicalBlocks: 10,
+ expHeadHeader: 10,
+ expHeadFastBlock: 10,
+ expHeadBlock: 10,
+ expSnapshotBottom: 10, // Rebuilt snapshot from the latest HEAD
+ },
+ gapped: 2,
+ }
+ test.test(t)
+ test.teardown()
+}
+
+// Tests the Geth was running with snapshot enabled and resetHead is applied.
+// In this case the head is rewound to the target(with state available). After
+// that the chain is restarted and the original disk layer is kept.
+func TestSetHeadWithNewSnapshot(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ //
+ // Commit: G
+ // Snapshot: G
+ //
+ // SetHead(4)
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4
+ //
+ // Expected head header : C4
+ // Expected head fast block: C4
+ // Expected head block : C4
+ // Expected snapshot disk : G
+ test := &setHeadSnapshotTest{
+ snapshotTestBasic: snapshotTestBasic{
+ legacy: false,
+ chainBlocks: 8,
+ snapshotBlock: 0,
+ commitBlock: 0,
+ expCanonicalBlocks: 4,
+ expHeadHeader: 4,
+ expHeadFastBlock: 4,
+ expHeadBlock: 4,
+ expSnapshotBottom: 0, // The initial disk layer is built from the genesis
+ },
+ setHead: 4,
+ }
+ test.test(t)
+ test.teardown()
+}
+
+// Tests the Geth was running with snapshot(legacy-format) enabled and resetHead
+// is applied. In this case the head is rewound to the target(with state available).
+// After that the chain is restarted and the original disk layer is kept.
+func TestSetHeadWithLegacySnapshot(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ //
+ // Commit: G
+ // Snapshot: G
+ //
+ // SetHead(4)
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4
+ //
+ // Expected head header : C4
+ // Expected head fast block: C4
+ // Expected head block : C4
+ // Expected snapshot disk : G
+ t.Skip("Legacy format testing is not supported")
+ test := &setHeadSnapshotTest{
+ snapshotTestBasic: snapshotTestBasic{
+ legacy: true,
+ chainBlocks: 8,
+ snapshotBlock: 0,
+ commitBlock: 0,
+ expCanonicalBlocks: 4,
+ expHeadHeader: 4,
+ expHeadFastBlock: 4,
+ expHeadBlock: 4,
+ expSnapshotBottom: 0, // The initial disk layer is built from the genesis
+ },
+ setHead: 4,
+ }
+ test.test(t)
+ test.teardown()
+}
+
+// Tests the Geth was running with snapshot(legacy-format) enabled and upgrades
+// the disk layer journal(journal generator) to latest format. After that the Geth
+// is restarted from a crash. In this case Geth will find the new-format disk layer
+// journal but with legacy-format diff journal(the new-format is never committed),
+// and the invalid diff journal is expected to be dropped.
+func TestRecoverSnapshotFromCrashWithLegacyDiffJournal(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ //
+ // Commit: G
+ // Snapshot: G
+ //
+ // SetHead(0)
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10
+ //
+ // Expected head header : C10
+ // Expected head fast block: C10
+ // Expected head block : C8
+ // Expected snapshot disk : C10
+ t.Skip("Legacy format testing is not supported")
+ test := &restartCrashSnapshotTest{
+ snapshotTestBasic: snapshotTestBasic{
+ legacy: true,
+ chainBlocks: 8,
+ snapshotBlock: 0,
+ commitBlock: 0,
+ expCanonicalBlocks: 10,
+ expHeadHeader: 10,
+ expHeadFastBlock: 10,
+ expHeadBlock: 8, // The persisted state in the first running
+ expSnapshotBottom: 10, // The persisted disk layer in the second running
+ },
+ newBlocks: 2,
+ }
+ test.test(t)
+ test.teardown()
+}
+
+// Tests the Geth was running with a complete snapshot and then imports a few
+// more new blocks on top without enabling the snapshot. After the restart,
+// crash happens. Check everything is ok after the restart.
+func TestRecoverSnapshotFromWipingCrash(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ //
+ // Commit: G
+ // Snapshot: G
+ //
+ // SetHead(0)
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10
+ //
+ // Expected head header : C10
+ // Expected head fast block: C10
+ // Expected head block : C8
+ // Expected snapshot disk : C10
+ test := &wipeCrashSnapshotTest{
+ snapshotTestBasic: snapshotTestBasic{
+ legacy: false,
+ chainBlocks: 8,
+ snapshotBlock: 4,
+ commitBlock: 0,
+ expCanonicalBlocks: 10,
+ expHeadHeader: 10,
+ expHeadFastBlock: 10,
+ expHeadBlock: 10,
+ expSnapshotBottom: 10,
+ },
+ newBlocks: 2,
+ }
+ test.test(t)
+ test.teardown()
+}
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index fc1fcb3de8..33d8cca8a1 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -1215,9 +1215,13 @@ func TestReorgSideEvent(t *testing.T) {
// first two block of the secondary chain are for a brief moment considered
// side chains because up to that point the first one is considered the
// heavier chain.
+ // the third may or may not be, depending on whether it triggers a reorg (the
+ // difficulties of the two chains are equal at this time).
+ // the boolean value indicates whether we are still waiting for that block's event.
expectedSideHashes := map[common.Hash]bool{
replacementBlocks[0].Hash(): true,
replacementBlocks[1].Hash(): true,
+ replacementBlocks[2].Hash(): false, // may not be sent (if reorg was on the 3rd block)
chain[0].Hash(): true,
chain[1].Hash(): true,
chain[2].Hash(): true,
@@ -1235,9 +1239,16 @@ done:
if _, ok := expectedSideHashes[block.Hash()]; !ok {
t.Errorf("%d: didn't expect %x to be in side chain", i, block.Hash())
}
+ expectedSideHashes[block.Hash()] = false
i++
- if i == len(expectedSideHashes) {
+ numLeft := 0
+ for _, isLeft := range expectedSideHashes {
+ if isLeft {
+ numLeft += 1
+ }
+ }
+ if numLeft == 0 {
timeout.Stop()
break done
diff --git a/core/rawdb/accessors_snapshot.go b/core/rawdb/accessors_snapshot.go
index e5aefbe17d..eef8acc018 100644
--- a/core/rawdb/accessors_snapshot.go
+++ b/core/rawdb/accessors_snapshot.go
@@ -17,6 +17,8 @@
package rawdb
import (
+ "encoding/binary"
+
"github.com/celo-org/celo-blockchain/common"
"github.com/celo-org/celo-blockchain/ethdb"
"github.com/celo-org/celo-blockchain/log"
@@ -118,3 +120,58 @@ func DeleteSnapshotJournal(db ethdb.KeyValueWriter) {
log.Crit("Failed to remove snapshot journal", "err", err)
}
}
+
+// ReadSnapshotGenerator retrieves the serialized snapshot generator saved at
+// the last shutdown.
+func ReadSnapshotGenerator(db ethdb.KeyValueReader) []byte {
+ data, _ := db.Get(snapshotGeneratorKey)
+ return data
+}
+
+// WriteSnapshotGenerator stores the serialized snapshot generator to save at
+// shutdown.
+func WriteSnapshotGenerator(db ethdb.KeyValueWriter, generator []byte) {
+ if err := db.Put(snapshotGeneratorKey, generator); err != nil {
+ log.Crit("Failed to store snapshot generator", "err", err)
+ }
+}
+
+// DeleteSnapshotGenerator deletes the serialized snapshot generator saved at
+// the last shutdown
+func DeleteSnapshotGenerator(db ethdb.KeyValueWriter) {
+ if err := db.Delete(snapshotGeneratorKey); err != nil {
+ log.Crit("Failed to remove snapshot generator", "err", err)
+ }
+}
+
+// ReadSnapshotRecoveryNumber retrieves the block number of the last persisted
+// snapshot layer.
+func ReadSnapshotRecoveryNumber(db ethdb.KeyValueReader) *uint64 {
+ data, _ := db.Get(snapshotRecoveryKey)
+ if len(data) == 0 {
+ return nil
+ }
+ if len(data) != 8 {
+ return nil
+ }
+ number := binary.BigEndian.Uint64(data)
+ return &number
+}
+
+// WriteSnapshotRecoveryNumber stores the block number of the last persisted
+// snapshot layer.
+func WriteSnapshotRecoveryNumber(db ethdb.KeyValueWriter, number uint64) {
+ var buf [8]byte
+ binary.BigEndian.PutUint64(buf[:], number)
+ if err := db.Put(snapshotRecoveryKey, buf[:]); err != nil {
+ log.Crit("Failed to store snapshot recovery number", "err", err)
+ }
+}
+
+// DeleteSnapshotRecoveryNumber deletes the block number of the last persisted
+// snapshot layer.
+func DeleteSnapshotRecoveryNumber(db ethdb.KeyValueWriter) {
+ if err := db.Delete(snapshotRecoveryKey); err != nil {
+ log.Crit("Failed to remove snapshot recovery number", "err", err)
+ }
+}
diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go
index 79b305e336..22b5467667 100644
--- a/core/rawdb/schema.go
+++ b/core/rawdb/schema.go
@@ -50,6 +50,12 @@ var (
// snapshotJournalKey tracks the in-memory diff layers across restarts.
snapshotJournalKey = []byte("SnapshotJournal")
+ // snapshotGeneratorKey tracks the snapshot generation marker across restarts.
+ snapshotGeneratorKey = []byte("SnapshotGenerator")
+
+ // snapshotRecoveryKey tracks the snapshot recovery marker across restarts.
+ snapshotRecoveryKey = []byte("SnapshotRecovery")
+
// Data item prefixes (use single byte to avoid mixing data types, avoid `i`, used for indexes).
headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header
headerTDSuffix = []byte("t") // headerPrefix + num (uint64 big endian) + hash + headerTDSuffix -> td
diff --git a/core/state/snapshot/account.go b/core/state/snapshot/account.go
index d24426bbf3..02a8ba5599 100644
--- a/core/state/snapshot/account.go
+++ b/core/state/snapshot/account.go
@@ -24,8 +24,10 @@ import (
"github.com/celo-org/celo-blockchain/rlp"
)
-// Account is a slim version of a state.Account, where the root and code hash
-// are replaced with a nil byte slice for empty accounts.
+// Account is a modified version of a state.Account, where the root is replaced
+// with a byte slice. This format can be used to represent full-consensus format
+// or slim-snapshot format which replaces the empty root and code hash as nil
+// byte slice.
type Account struct {
Nonce uint64
Balance *big.Int
@@ -33,9 +35,8 @@ type Account struct {
CodeHash []byte
}
-// AccountRLP converts a state.Account content into a slim snapshot version RLP
-// encoded.
-func AccountRLP(nonce uint64, balance *big.Int, root common.Hash, codehash []byte) []byte {
+// SlimAccount converts a state.Account content into a slim snapshot account
+func SlimAccount(nonce uint64, balance *big.Int, root common.Hash, codehash []byte) Account {
slim := Account{
Nonce: nonce,
Balance: balance,
@@ -46,9 +47,40 @@ func AccountRLP(nonce uint64, balance *big.Int, root common.Hash, codehash []byt
if !bytes.Equal(codehash, emptyCode[:]) {
slim.CodeHash = codehash
}
- data, err := rlp.EncodeToBytes(slim)
+ return slim
+}
+
+// SlimAccountRLP converts a state.Account content into a slim snapshot
+// version RLP encoded.
+func SlimAccountRLP(nonce uint64, balance *big.Int, root common.Hash, codehash []byte) []byte {
+ data, err := rlp.EncodeToBytes(SlimAccount(nonce, balance, root, codehash))
if err != nil {
panic(err)
}
return data
}
+
+// FullAccount decodes the data on the 'slim RLP' format and return
+// the consensus format account.
+func FullAccount(data []byte) (Account, error) {
+ var account Account
+ if err := rlp.DecodeBytes(data, &account); err != nil {
+ return Account{}, err
+ }
+ if len(account.Root) == 0 {
+ account.Root = emptyRoot[:]
+ }
+ if len(account.CodeHash) == 0 {
+ account.CodeHash = emptyCode[:]
+ }
+ return account, nil
+}
+
+// FullAccountRLP converts data on the 'slim RLP' format into the full RLP-format.
+func FullAccountRLP(data []byte) ([]byte, error) {
+ account, err := FullAccount(data)
+ if err != nil {
+ return nil, err
+ }
+ return rlp.EncodeToBytes(account)
+}
diff --git a/core/state/snapshot/conversion.go b/core/state/snapshot/conversion.go
index 11fd46b847..b464c28dca 100644
--- a/core/state/snapshot/conversion.go
+++ b/core/state/snapshot/conversion.go
@@ -17,6 +17,8 @@
package snapshot
import (
+ "bytes"
+ "fmt"
"sync"
"time"
@@ -27,80 +29,239 @@ import (
"github.com/celo-org/celo-blockchain/trie"
)
-// conversionAccount is used for converting between full and slim format. When
-// doing this, we can consider 'balance' as a byte array, as it has already
-// been converted from big.Int into an rlp-byteslice.
-type conversionAccount struct {
- Nonce uint64
- Balance []byte
- Root []byte
- CodeHash []byte
+// trieKV represents a trie key-value pair
+type trieKV struct {
+ key common.Hash
+ value []byte
}
-// SlimToFull converts data on the 'slim RLP' format into the full RLP-format
-func SlimToFull(data []byte) ([]byte, error) {
- acc := &conversionAccount{}
- if err := rlp.DecodeBytes(data, acc); err != nil {
- return nil, err
- }
- if len(acc.Root) == 0 {
- acc.Root = emptyRoot[:]
- }
- if len(acc.CodeHash) == 0 {
- acc.CodeHash = emptyCode[:]
+type (
+ // trieGeneratorFn is the interface of trie generation which can
+ // be implemented by different trie algorithm.
+ trieGeneratorFn func(in chan (trieKV), out chan (common.Hash))
+
+ // leafCallbackFn is the callback invoked at the leaves of the trie,
+ // returns the subtrie root with the specified subtrie identifier.
+ leafCallbackFn func(hash common.Hash, stat *generateStats) common.Hash
+)
+
+// GenerateAccountTrieRoot takes an account iterator and reproduces the root hash.
+func GenerateAccountTrieRoot(it AccountIterator) (common.Hash, error) {
+ return generateTrieRoot(it, common.Hash{}, stdGenerate, nil, &generateStats{start: time.Now()}, true)
+}
+
+// GenerateStorageTrieRoot takes a storage iterator and reproduces the root hash.
+func GenerateStorageTrieRoot(account common.Hash, it StorageIterator) (common.Hash, error) {
+ return generateTrieRoot(it, account, stdGenerate, nil, &generateStats{start: time.Now()}, true)
+}
+
+// VerifyState takes the whole snapshot tree as the input, traverses all the accounts
+// as well as the corresponding storages and compares the re-computed hash with the
+// original one(state root and the storage root).
+func VerifyState(snaptree *Tree, root common.Hash) error {
+ acctIt, err := snaptree.AccountIterator(root, common.Hash{})
+ if err != nil {
+ return err
}
- fullData, err := rlp.EncodeToBytes(acc)
+ defer acctIt.Release()
+
+ got, err := generateTrieRoot(acctIt, common.Hash{}, stdGenerate, func(account common.Hash, stat *generateStats) common.Hash {
+ storageIt, err := snaptree.StorageIterator(root, account, common.Hash{})
+ if err != nil {
+ return common.Hash{}
+ }
+ defer storageIt.Release()
+
+ hash, err := generateTrieRoot(storageIt, account, stdGenerate, nil, stat, false)
+ if err != nil {
+ return common.Hash{}
+ }
+ return hash
+ }, &generateStats{start: time.Now()}, true)
+
if err != nil {
- return nil, err
+ return err
+ }
+ if got != root {
+ return fmt.Errorf("state root hash mismatch: got %x, want %x", got, root)
}
- return fullData, nil
+ return nil
}
-// trieKV represents a trie key-value pair
-type trieKV struct {
- key common.Hash
- value []byte
+// generateStats is a collection of statistics gathered by the trie generator
+// for logging purposes.
+type generateStats struct {
+ accounts uint64
+ slots uint64
+ curAccount common.Hash
+ curSlot common.Hash
+ start time.Time
+ lock sync.RWMutex
+}
+
+// progress records the progress trie generator made recently.
+func (stat *generateStats) progress(accounts, slots uint64, curAccount common.Hash, curSlot common.Hash) {
+ stat.lock.Lock()
+ defer stat.lock.Unlock()
+
+ stat.accounts += accounts
+ stat.slots += slots
+ stat.curAccount = curAccount
+ stat.curSlot = curSlot
}
-type trieGeneratorFn func(in chan (trieKV), out chan (common.Hash))
+// report prints the cumulative progress statistic smartly.
+func (stat *generateStats) report() {
+ stat.lock.RLock()
+ defer stat.lock.RUnlock()
-// GenerateTrieRoot takes an account iterator and reproduces the root hash.
-func GenerateTrieRoot(it AccountIterator) common.Hash {
- return generateTrieRoot(it, stdGenerate)
+ var ctx []interface{}
+ if stat.curSlot != (common.Hash{}) {
+ ctx = append(ctx, []interface{}{
+ "in", stat.curAccount,
+ "at", stat.curSlot,
+ }...)
+ } else {
+ ctx = append(ctx, []interface{}{"at", stat.curAccount}...)
+ }
+ // Add the usual measurements
+ ctx = append(ctx, []interface{}{"accounts", stat.accounts}...)
+ if stat.slots != 0 {
+ ctx = append(ctx, []interface{}{"slots", stat.slots}...)
+ }
+ ctx = append(ctx, []interface{}{"elapsed", common.PrettyDuration(time.Since(stat.start))}...)
+ log.Info("Generating trie hash from snapshot", ctx...)
+}
+
+// reportDone prints the last log when the whole generation is finished.
+func (stat *generateStats) reportDone() {
+ stat.lock.RLock()
+ defer stat.lock.RUnlock()
+
+ var ctx []interface{}
+ ctx = append(ctx, []interface{}{"accounts", stat.accounts}...)
+ if stat.slots != 0 {
+ ctx = append(ctx, []interface{}{"slots", stat.slots}...)
+ }
+ ctx = append(ctx, []interface{}{"elapsed", common.PrettyDuration(time.Since(stat.start))}...)
+ log.Info("Generated trie hash from snapshot", ctx...)
}
-func generateTrieRoot(it AccountIterator, generatorFn trieGeneratorFn) common.Hash {
+// generateTrieRoot generates the trie hash based on the snapshot iterator.
+// It can be used for generating account trie, storage trie or even the
+// whole state which connects the accounts and the corresponding storages.
+func generateTrieRoot(it Iterator, account common.Hash, generatorFn trieGeneratorFn, leafCallback leafCallbackFn, stats *generateStats, report bool) (common.Hash, error) {
var (
- in = make(chan trieKV) // chan to pass leaves
- out = make(chan common.Hash) // chan to collect result
- wg sync.WaitGroup
+ in = make(chan trieKV) // chan to pass leaves
+ out = make(chan common.Hash, 1) // chan to collect result
+ stoplog = make(chan bool, 1) // 1-size buffer, works when logging is not enabled
+ wg sync.WaitGroup
)
+ // Spin up a go-routine for trie hash re-generation
wg.Add(1)
go func() {
+ defer wg.Done()
generatorFn(in, out)
- wg.Done()
}()
- // Feed leaves
- start := time.Now()
- logged := time.Now()
- accounts := 0
+
+ // Spin up a go-routine for progress logging
+ if report && stats != nil {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+
+ timer := time.NewTimer(0)
+ defer timer.Stop()
+
+ for {
+ select {
+ case <-timer.C:
+ stats.report()
+ timer.Reset(time.Second * 8)
+ case success := <-stoplog:
+ if success {
+ stats.reportDone()
+ }
+ return
+ }
+ }
+ }()
+ }
+ // stop is a helper function to shutdown the background threads
+ // and return the re-generated trie hash.
+ stop := func(success bool) common.Hash {
+ close(in)
+ result := <-out
+ stoplog <- success
+ wg.Wait()
+ return result
+ }
+ var (
+ logged = time.Now()
+ processed = uint64(0)
+ leaf trieKV
+ last common.Hash
+ )
+ // Start to feed leaves
for it.Next() {
- slimData := it.Account()
- fullData, _ := SlimToFull(slimData)
- l := trieKV{it.Hash(), fullData}
- in <- l
- if time.Since(logged) > 8*time.Second {
- log.Info("Generating trie hash from snapshot",
- "at", l.key, "accounts", accounts, "elapsed", time.Since(start))
- logged = time.Now()
+ if account == (common.Hash{}) {
+ var (
+ err error
+ fullData []byte
+ )
+ if leafCallback == nil {
+ fullData, err = FullAccountRLP(it.(AccountIterator).Account())
+ if err != nil {
+ stop(false)
+ return common.Hash{}, err
+ }
+ } else {
+ account, err := FullAccount(it.(AccountIterator).Account())
+ if err != nil {
+ stop(false)
+ return common.Hash{}, err
+ }
+ // Apply the leaf callback. Normally the callback is used to traverse
+ // the storage trie and re-generate the subtrie root.
+ subroot := leafCallback(it.Hash(), stats)
+ if !bytes.Equal(account.Root, subroot.Bytes()) {
+ stop(false)
+ return common.Hash{}, fmt.Errorf("invalid subroot(%x), want %x, got %x", it.Hash(), account.Root, subroot)
+ }
+ fullData, err = rlp.EncodeToBytes(account)
+ if err != nil {
+ stop(false)
+ return common.Hash{}, err
+ }
+ }
+ leaf = trieKV{it.Hash(), fullData}
+ } else {
+ leaf = trieKV{it.Hash(), common.CopyBytes(it.(StorageIterator).Slot())}
+ }
+ in <- leaf
+
+ // Accumulate the generation statistic if it's required.
+ processed++
+ if time.Since(logged) > 3*time.Second && stats != nil {
+ if account == (common.Hash{}) {
+ stats.progress(processed, 0, it.Hash(), common.Hash{})
+ } else {
+ stats.progress(0, processed, account, it.Hash())
+ }
+ logged, processed = time.Now(), 0
+ }
+ last = it.Hash()
+ }
+ // Commit the last part statistic.
+ if processed > 0 && stats != nil {
+ if account == (common.Hash{}) {
+ stats.progress(processed, 0, last, common.Hash{})
+ } else {
+ stats.progress(0, processed, account, last)
}
- accounts++
}
- close(in)
- result := <-out
- log.Info("Generated trie hash from snapshot", "accounts", accounts, "elapsed", time.Since(start))
- wg.Wait()
- return result
+ result := stop(true)
+ return result, nil
}
// stdGenerate is a very basic hexary trie builder which uses the same Trie
diff --git a/core/state/snapshot/difflayer.go b/core/state/snapshot/difflayer.go
index eb2b5e2936..aed58f17a2 100644
--- a/core/state/snapshot/difflayer.go
+++ b/core/state/snapshot/difflayer.go
@@ -44,7 +44,7 @@ var (
// aggregatorItemLimit is an approximate number of items that will end up
// in the agregator layer before it's flushed out to disk. A plain account
// weighs around 14B (+hash), a storage slot 32B (+hash), a deleted slot
- // 0B (+hash). Slots are mostly set/unset in lockstep, so thet average at
+ // 0B (+hash). Slots are mostly set/unset in lockstep, so that average at
// 16B (+hash). All in all, the average entry seems to be 15+32=47B. Use a
// smaller number to be on the safe side.
aggregatorItemLimit = aggregatorMemoryLimit / 42
@@ -105,11 +105,18 @@ type diffLayer struct {
root common.Hash // Root hash to which this snapshot diff belongs to
stale uint32 // Signals that the layer became stale (state progressed)
+ // destructSet is a very special helper marker. If an account is marked as
+ // deleted, then it's recorded in this set. However it's allowed that an account
+ // is included here but still available in other sets(e.g. storageData). The
+ // reason is the diff layer includes all the changes in a *block*. It can
+ // happen that in the tx_1, account A is self-destructed while in the tx_2
+ // it's recreated. But we still need this marker to indicate the "old" A is
+ // deleted, all data in other set belongs to the "new" A.
destructSet map[common.Hash]struct{} // Keyed markers for deleted (and potentially) recreated accounts
accountList []common.Hash // List of account for iteration. If it exists, it's sorted, otherwise it's nil
- accountData map[common.Hash][]byte // Keyed accounts for direct retrival (nil means deleted)
+ accountData map[common.Hash][]byte // Keyed accounts for direct retrieval (nil means deleted)
storageList map[common.Hash][]common.Hash // List of storage slots for iterated retrievals, one per account. Any existing lists are sorted if non-nil
- storageData map[common.Hash]map[common.Hash][]byte // Keyed storage slots for direct retrival. one per account (nil means deleted)
+ storageData map[common.Hash]map[common.Hash][]byte // Keyed storage slots for direct retrieval. one per account (nil means deleted)
diffed *bloomfilter.Filter // Bloom filter tracking all the diffed items up to the disk layer
@@ -169,6 +176,7 @@ func newDiffLayer(parent snapshot, root common.Hash, destructs map[common.Hash]s
destructSet: destructs,
accountData: accounts,
storageData: storage,
+ storageList: make(map[common.Hash][]common.Hash),
}
switch parent := parent.(type) {
case *diskLayer:
@@ -183,30 +191,21 @@ func newDiffLayer(parent snapshot, root common.Hash, destructs map[common.Hash]s
if blob == nil {
panic(fmt.Sprintf("account %#x nil", accountHash))
}
+ // Determine memory size and track the dirty writes
+ dl.memory += uint64(common.HashLength + len(blob))
+ snapshotDirtyAccountWriteMeter.Mark(int64(len(blob)))
}
for accountHash, slots := range storage {
if slots == nil {
panic(fmt.Sprintf("storage %#x nil", accountHash))
}
- }
- // Determine memory size and track the dirty writes
- for _, data := range accounts {
- dl.memory += uint64(common.HashLength + len(data))
- snapshotDirtyAccountWriteMeter.Mark(int64(len(data)))
- }
- // Fill the storage hashes and sort them for the iterator
- dl.storageList = make(map[common.Hash][]common.Hash)
- for accountHash := range destructs {
- dl.storageList[accountHash] = nil
- }
- // Determine memory size and track the dirty writes
- for _, slots := range storage {
+ // Determine memory size and track the dirty writes
for _, data := range slots {
dl.memory += uint64(common.HashLength + len(data))
snapshotDirtyStorageWriteMeter.Mark(int64(len(data)))
}
}
- dl.memory += uint64(len(dl.storageList) * common.HashLength)
+ dl.memory += uint64(len(destructs) * common.HashLength)
return dl
}
@@ -287,6 +286,8 @@ func (dl *diffLayer) Account(hash common.Hash) (*Account, error) {
// AccountRLP directly retrieves the account RLP associated with a particular
// hash in the snapshot slim data format.
+//
+// Note the returned account is not a copy, please don't modify it.
func (dl *diffLayer) AccountRLP(hash common.Hash) ([]byte, error) {
// Check the bloom filter first whether there's even a point in reaching into
// all the maps in all the layers below
@@ -295,13 +296,17 @@ func (dl *diffLayer) AccountRLP(hash common.Hash) ([]byte, error) {
if !hit {
hit = dl.diffed.Contains(destructBloomHasher(hash))
}
+ var origin *diskLayer
+ if !hit {
+ origin = dl.origin // extract origin while holding the lock
+ }
dl.lock.RUnlock()
// If the bloom filter misses, don't even bother with traversing the memory
// diff layers, reach straight into the bottom persistent disk layer
- if !hit {
+ if origin != nil {
snapshotBloomAccountMissMeter.Mark(1)
- return dl.origin.AccountRLP(hash)
+ return origin.AccountRLP(hash)
}
// The bloom filter hit, start poking in the internal maps
return dl.accountRLP(hash, 0)
@@ -347,6 +352,8 @@ func (dl *diffLayer) accountRLP(hash common.Hash, depth int) ([]byte, error) {
// Storage directly retrieves the storage data associated with a particular hash,
// within a particular account. If the slot is unknown to this diff, it's parent
// is consulted.
+//
+// Note the returned slot is not a copy, please don't modify it.
func (dl *diffLayer) Storage(accountHash, storageHash common.Hash) ([]byte, error) {
// Check the bloom filter first whether there's even a point in reaching into
// all the maps in all the layers below
@@ -355,13 +362,17 @@ func (dl *diffLayer) Storage(accountHash, storageHash common.Hash) ([]byte, erro
if !hit {
hit = dl.diffed.Contains(destructBloomHasher(accountHash))
}
+ var origin *diskLayer
+ if !hit {
+ origin = dl.origin // extract origin while holding the lock
+ }
dl.lock.RUnlock()
// If the bloom filter misses, don't even bother with traversing the memory
// diff layers, reach straight into the bottom persistent disk layer
- if !hit {
+ if origin != nil {
snapshotBloomStorageMissMeter.Mark(1)
- return dl.origin.Storage(accountHash, storageHash)
+ return origin.Storage(accountHash, storageHash)
}
// The bloom filter hit, start poking in the internal maps
return dl.storage(accountHash, storageHash, 0)
@@ -475,7 +486,7 @@ func (dl *diffLayer) flatten() snapshot {
}
}
-// AccountList returns a sorted list of all accounts in this difflayer, including
+// AccountList returns a sorted list of all accounts in this diffLayer, including
// the deleted ones.
//
// Note, the returned slice is not a copy, so do not modify it.
@@ -502,22 +513,34 @@ func (dl *diffLayer) AccountList() []common.Hash {
}
}
sort.Sort(hashes(dl.accountList))
+ dl.memory += uint64(len(dl.accountList) * common.HashLength)
return dl.accountList
}
-// StorageList returns a sorted list of all storage slot hashes in this difflayer
-// for the given account.
+// StorageList returns a sorted list of all storage slot hashes in this diffLayer
+// for the given account. If the whole storage is destructed in this layer, then
+// an additional flag *destructed = true* will be returned, otherwise the flag is
+// false. Besides, the returned list will include the hash of deleted storage slot.
+// Note a special case is an account is deleted in a prior tx but is recreated in
+// the following tx with some storage slots set. In this case the returned list is
+// not empty but the flag is true.
//
// Note, the returned slice is not a copy, so do not modify it.
-func (dl *diffLayer) StorageList(accountHash common.Hash) []common.Hash {
- // If an old list already exists, return it
+func (dl *diffLayer) StorageList(accountHash common.Hash) ([]common.Hash, bool) {
dl.lock.RLock()
- list := dl.storageList[accountHash]
+ _, destructed := dl.destructSet[accountHash]
+ if _, ok := dl.storageData[accountHash]; !ok {
+ // Account not tracked by this layer
+ dl.lock.RUnlock()
+ return nil, destructed
+ }
+ // If an old list already exists, return it
+ if list, exist := dl.storageList[accountHash]; exist {
+ dl.lock.RUnlock()
+ return list, destructed // the cached list can't be nil
+ }
dl.lock.RUnlock()
- if list != nil {
- return list
- }
// No old sorted account list exists, generate a new one
dl.lock.Lock()
defer dl.lock.Unlock()
@@ -529,5 +552,6 @@ func (dl *diffLayer) StorageList(accountHash common.Hash) []common.Hash {
}
sort.Sort(hashes(storageList))
dl.storageList[accountHash] = storageList
- return storageList
+ dl.memory += uint64(len(dl.storageList)*common.HashLength + common.HashLength)
+ return storageList, destructed
}
diff --git a/core/state/snapshot/difflayer_test.go b/core/state/snapshot/difflayer_test.go
index ff1c0eaa6c..cc48c84747 100644
--- a/core/state/snapshot/difflayer_test.go
+++ b/core/state/snapshot/difflayer_test.go
@@ -109,7 +109,8 @@ func TestMergeBasics(t *testing.T) {
if have, want := len(merged.storageList), i; have != want {
t.Errorf("[1] storageList wrong: have %v, want %v", have, want)
}
- if have, want := len(merged.StorageList(aHash)), len(sMap); have != want {
+ list, _ := merged.StorageList(aHash)
+ if have, want := len(list), len(sMap); have != want {
t.Errorf("[2] StorageList() wrong: have %v, want %v", have, want)
}
if have, want := len(merged.storageList[aHash]), len(sMap); have != want {
@@ -313,7 +314,7 @@ func BenchmarkSearchSlot(b *testing.B) {
// With accountList and sorting
// BenchmarkFlatten-6 50 29890856 ns/op
//
-// Without sorting and tracking accountlist
+// Without sorting and tracking accountList
// BenchmarkFlatten-6 300 5511511 ns/op
func BenchmarkFlatten(b *testing.B) {
fill := func(parent snapshot) *diffLayer {
diff --git a/core/state/snapshot/disklayer.go b/core/state/snapshot/disklayer.go
index d6195d6e6f..833cabee1c 100644
--- a/core/state/snapshot/disklayer.go
+++ b/core/state/snapshot/disklayer.go
@@ -31,7 +31,7 @@ import (
// diskLayer is a low level persistent snapshot built on top of a key-value store.
type diskLayer struct {
diskdb ethdb.KeyValueStore // Key-value store containing the base snapshot
- triedb *trie.Database // Trie node cache for reconstuction purposes
+ triedb *trie.Database // Trie node cache for reconstruction purposes
cache *fastcache.Cache // Cache to avoid hitting the disk for direct access
root common.Hash // Root hash of the base snapshot
diff --git a/core/state/snapshot/disklayer_test.go b/core/state/snapshot/disklayer_test.go
index 79d015b8a5..364bdcae4a 100644
--- a/core/state/snapshot/disklayer_test.go
+++ b/core/state/snapshot/disklayer_test.go
@@ -28,6 +28,7 @@ import (
"github.com/celo-org/celo-blockchain/ethdb"
"github.com/celo-org/celo-blockchain/ethdb/leveldb"
"github.com/celo-org/celo-blockchain/ethdb/memorydb"
+ "github.com/celo-org/celo-blockchain/rlp"
)
// reverse reverses the contents of a byte slice. It's used to update random accs
@@ -429,6 +430,81 @@ func TestDiskPartialMerge(t *testing.T) {
}
}
+// Tests that when the bottom-most diff layer is merged into the disk
+// layer whether the corresponding generator is persisted correctly.
+func TestDiskGeneratorPersistence(t *testing.T) {
+ var (
+ accOne = randomHash()
+ accTwo = randomHash()
+ accOneSlotOne = randomHash()
+ accOneSlotTwo = randomHash()
+
+ accThree = randomHash()
+ accThreeSlot = randomHash()
+ baseRoot = randomHash()
+ diffRoot = randomHash()
+ diffTwoRoot = randomHash()
+ genMarker = append(randomHash().Bytes(), randomHash().Bytes()...)
+ )
+ // Testing scenario 1, the disk layer is still under the construction.
+ db := rawdb.NewMemoryDatabase()
+
+ rawdb.WriteAccountSnapshot(db, accOne, accOne[:])
+ rawdb.WriteStorageSnapshot(db, accOne, accOneSlotOne, accOneSlotOne[:])
+ rawdb.WriteStorageSnapshot(db, accOne, accOneSlotTwo, accOneSlotTwo[:])
+ rawdb.WriteSnapshotRoot(db, baseRoot)
+
+ // Create a disk layer based on all above updates
+ snaps := &Tree{
+ layers: map[common.Hash]snapshot{
+ baseRoot: &diskLayer{
+ diskdb: db,
+ cache: fastcache.New(500 * 1024),
+ root: baseRoot,
+ genMarker: genMarker,
+ },
+ },
+ }
+ // Modify or delete some accounts, flatten everything onto disk
+ if err := snaps.Update(diffRoot, baseRoot, nil, map[common.Hash][]byte{
+ accTwo: accTwo[:],
+ }, nil); err != nil {
+ t.Fatalf("failed to update snapshot tree: %v", err)
+ }
+ if err := snaps.Cap(diffRoot, 0); err != nil {
+ t.Fatalf("failed to flatten snapshot tree: %v", err)
+ }
+ blob := rawdb.ReadSnapshotGenerator(db)
+ var generator journalGenerator
+ if err := rlp.DecodeBytes(blob, &generator); err != nil {
+ t.Fatalf("Failed to decode snapshot generator %v", err)
+ }
+ if !bytes.Equal(generator.Marker, genMarker) {
+ t.Fatalf("Generator marker is not matched")
+ }
+ // Test scenario 2, the disk layer is fully generated
+ // Modify or delete some accounts, flatten everything onto disk
+ if err := snaps.Update(diffTwoRoot, diffRoot, nil, map[common.Hash][]byte{
+ accThree: accThree.Bytes(),
+ }, map[common.Hash]map[common.Hash][]byte{
+ accThree: {accThreeSlot: accThreeSlot.Bytes()},
+ }); err != nil {
+ t.Fatalf("failed to update snapshot tree: %v", err)
+ }
+ diskLayer := snaps.layers[snaps.diskRoot()].(*diskLayer)
+ diskLayer.genMarker = nil // Construction finished
+ if err := snaps.Cap(diffTwoRoot, 0); err != nil {
+ t.Fatalf("failed to flatten snapshot tree: %v", err)
+ }
+ blob = rawdb.ReadSnapshotGenerator(db)
+ if err := rlp.DecodeBytes(blob, &generator); err != nil {
+ t.Fatalf("Failed to decode snapshot generator %v", err)
+ }
+ if len(generator.Marker) != 0 {
+ t.Fatalf("Failed to update snapshot generator")
+ }
+}
+
// Tests that merging something into a disk layer persists it into the database
// and invalidates any previously written and cached values, discarding anything
// after the in-progress generation marker.
diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go
index 6cf75c5521..1f0ed4ec19 100644
--- a/core/state/snapshot/generate.go
+++ b/core/state/snapshot/generate.go
@@ -19,6 +19,7 @@ package snapshot
import (
"bytes"
"encoding/binary"
+ "fmt"
"math/big"
"time"
@@ -42,7 +43,7 @@ var (
)
// generatorStats is a collection of statistics gathered by the snapshot generator
-// for logging purposes.
+// for logging purposes.
type generatorStats struct {
wiping chan struct{} // Notification channel if wiping is in progress
origin uint64 // Origin prefix where generation started
@@ -54,9 +55,11 @@ type generatorStats struct {
// Log creates an contextual log with the given message and the context pulled
// from the internally maintained statistics.
-func (gs *generatorStats) Log(msg string, marker []byte) {
+func (gs *generatorStats) Log(msg string, root common.Hash, marker []byte) {
var ctx []interface{}
-
+ if root != (common.Hash{}) {
+ ctx = append(ctx, []interface{}{"root", root}...)
+ }
// Figure out whether we're after or within an account
switch len(marker) {
case common.HashLength:
@@ -98,36 +101,79 @@ func generateSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache i
wiper = wipeSnapshot(diskdb, true)
}
// Create a new disk layer with an initialized state marker at zero
- rawdb.WriteSnapshotRoot(diskdb, root)
-
+ var (
+ stats = &generatorStats{wiping: wiper, start: time.Now()}
+ batch = diskdb.NewBatch()
+ genMarker = []byte{} // Initialized but empty!
+ )
+ rawdb.WriteSnapshotRoot(batch, root)
+ journalProgress(batch, genMarker, stats)
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed to write initialized state marker", "error", err)
+ }
base := &diskLayer{
diskdb: diskdb,
triedb: triedb,
root: root,
cache: fastcache.New(cache * 1024 * 1024),
- genMarker: []byte{}, // Initialized but empty!
+ genMarker: genMarker,
genPending: make(chan struct{}),
genAbort: make(chan chan *generatorStats),
}
- go base.generate(&generatorStats{wiping: wiper, start: time.Now()})
+ go base.generate(stats)
+ log.Debug("Start snapshot generation", "root", root)
return base
}
+// journalProgress persists the generator stats into the database to resume later.
+func journalProgress(db ethdb.KeyValueWriter, marker []byte, stats *generatorStats) {
+ // Write out the generator marker. Note it's a standalone disk layer generator
+ // which is not mixed with journal. It's ok if the generator is persisted while
+ // journal is not.
+ entry := journalGenerator{
+ Done: marker == nil,
+ Marker: marker,
+ }
+ if stats != nil {
+ entry.Wiping = (stats.wiping != nil)
+ entry.Accounts = stats.accounts
+ entry.Slots = stats.slots
+ entry.Storage = uint64(stats.storage)
+ }
+ blob, err := rlp.EncodeToBytes(entry)
+ if err != nil {
+ panic(err) // Cannot happen, here to catch dev errors
+ }
+ var logstr string
+ switch {
+ case marker == nil:
+ logstr = "done"
+ case bytes.Equal(marker, []byte{}):
+ logstr = "empty"
+ case len(marker) == common.HashLength:
+ logstr = fmt.Sprintf("%#x", marker)
+ default:
+ logstr = fmt.Sprintf("%#x:%#x", marker[:common.HashLength], marker[common.HashLength:])
+ }
+ log.Debug("Journalled generator progress", "progress", logstr)
+ rawdb.WriteSnapshotGenerator(db, blob)
+}
+
// generate is a background thread that iterates over the state and storage tries,
// constructing the state snapshot. All the arguments are purely for statistics
-// gethering and logging, since the method surfs the blocks as they arrive, often
+// gathering and logging, since the method surfs the blocks as they arrive, often
// being restarted.
func (dl *diskLayer) generate(stats *generatorStats) {
// If a database wipe is in operation, wait until it's done
if stats.wiping != nil {
- stats.Log("Wiper running, state snapshotting paused", dl.genMarker)
+ stats.Log("Wiper running, state snapshotting paused", common.Hash{}, dl.genMarker)
select {
// If wiper is done, resume normal mode of operation
case <-stats.wiping:
stats.wiping = nil
stats.start = time.Now()
- // If generator was aboted during wipe, return
+ // If generator was aborted during wipe, return
case abort := <-dl.genAbort:
abort <- stats
return
@@ -137,13 +183,13 @@ func (dl *diskLayer) generate(stats *generatorStats) {
accTrie, err := trie.NewSecure(dl.root, dl.triedb)
if err != nil {
// The account trie is missing (GC), surf the chain until one becomes available
- stats.Log("Trie missing, state snapshotting paused", dl.genMarker)
+ stats.Log("Trie missing, state snapshotting paused", dl.root, dl.genMarker)
abort := <-dl.genAbort
abort <- stats
return
}
- stats.Log("Resuming state snapshot generation", dl.genMarker)
+ stats.Log("Resuming state snapshot generation", dl.root, dl.genMarker)
var accMarker []byte
if len(dl.genMarker) > 0 { // []byte{} is the start, use nil for that
@@ -167,7 +213,7 @@ func (dl *diskLayer) generate(stats *generatorStats) {
if err := rlp.DecodeBytes(accIt.Value, &acc); err != nil {
log.Crit("Invalid account encountered during snapshot creation", "err", err)
}
- data := AccountRLP(acc.Nonce, acc.Balance, acc.Root, acc.CodeHash)
+ data := SlimAccountRLP(acc.Nonce, acc.Balance, acc.Root, acc.CodeHash)
// If the account is not yet in-progress, write it out
if accMarker == nil || !bytes.Equal(accountHash[:], accMarker) {
@@ -184,15 +230,19 @@ func (dl *diskLayer) generate(stats *generatorStats) {
if batch.ValueSize() > ethdb.IdealBatchSize || abort != nil {
// Only write and set the marker if we actually did something useful
if batch.ValueSize() > 0 {
+ // Ensure the generator entry is in sync with the data
+ marker := accountHash[:]
+ journalProgress(batch, marker, stats)
+
batch.Write()
batch.Reset()
dl.lock.Lock()
- dl.genMarker = accountHash[:]
+ dl.genMarker = marker
dl.lock.Unlock()
}
if abort != nil {
- stats.Log("Aborting state snapshot generation", accountHash[:])
+ stats.Log("Aborting state snapshot generation", dl.root, accountHash[:])
abort <- stats
return
}
@@ -201,7 +251,10 @@ func (dl *diskLayer) generate(stats *generatorStats) {
if acc.Root != emptyRoot {
storeTrie, err := trie.NewSecure(acc.Root, dl.triedb)
if err != nil {
- log.Crit("Storage trie inaccessible for snapshot generation", "err", err)
+ log.Error("Generator failed to access storage trie", "accroot", dl.root, "acchash", common.BytesToHash(accIt.Key), "stroot", acc.Root, "err", err)
+ abort := <-dl.genAbort
+ abort <- stats
+ return
}
var storeMarker []byte
if accMarker != nil && bytes.Equal(accountHash[:], accMarker) && len(dl.genMarker) > common.HashLength {
@@ -222,32 +275,54 @@ func (dl *diskLayer) generate(stats *generatorStats) {
if batch.ValueSize() > ethdb.IdealBatchSize || abort != nil {
// Only write and set the marker if we actually did something useful
if batch.ValueSize() > 0 {
+ // Ensure the generator entry is in sync with the data
+ marker := append(accountHash[:], storeIt.Key...)
+ journalProgress(batch, marker, stats)
+
batch.Write()
batch.Reset()
dl.lock.Lock()
- dl.genMarker = append(accountHash[:], storeIt.Key...)
+ dl.genMarker = marker
dl.lock.Unlock()
}
if abort != nil {
- stats.Log("Aborting state snapshot generation", append(accountHash[:], storeIt.Key...))
+ stats.Log("Aborting state snapshot generation", dl.root, append(accountHash[:], storeIt.Key...))
abort <- stats
return
}
+ if time.Since(logged) > 8*time.Second {
+ stats.Log("Generating state snapshot", dl.root, append(accountHash[:], storeIt.Key...))
+ logged = time.Now()
+ }
}
}
+ if err := storeIt.Err; err != nil {
+ log.Error("Generator failed to iterate storage trie", "accroot", dl.root, "acchash", common.BytesToHash(accIt.Key), "stroot", acc.Root, "err", err)
+ abort := <-dl.genAbort
+ abort <- stats
+ return
+ }
}
if time.Since(logged) > 8*time.Second {
- stats.Log("Generating state snapshot", accIt.Key)
+ stats.Log("Generating state snapshot", dl.root, accIt.Key)
logged = time.Now()
}
// Some account processed, unmark the marker
accMarker = nil
}
- // Snapshot fully generated, set the marker to nil
- if batch.ValueSize() > 0 {
- batch.Write()
+ if err := accIt.Err; err != nil {
+ log.Error("Generator failed to iterate account trie", "root", dl.root, "err", err)
+ abort := <-dl.genAbort
+ abort <- stats
+ return
}
+ // Snapshot fully generated, set the marker to nil.
+ // Note even there is nothing to commit, persist the
+ // generator anyway to mark the snapshot is complete.
+ journalProgress(batch, nil, stats)
+ batch.Write()
+
log.Info("Generated state snapshot", "accounts", stats.accounts, "slots", stats.slots,
"storage", stats.storage, "elapsed", common.PrettyDuration(time.Since(stats.start)))
diff --git a/core/state/snapshot/generate_test.go b/core/state/snapshot/generate_test.go
new file mode 100644
index 0000000000..648325f007
--- /dev/null
+++ b/core/state/snapshot/generate_test.go
@@ -0,0 +1,190 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package snapshot
+
+import (
+ "math/big"
+ "testing"
+ "time"
+
+ "github.com/celo-org/celo-blockchain/common"
+ "github.com/celo-org/celo-blockchain/ethdb/memorydb"
+ "github.com/celo-org/celo-blockchain/rlp"
+ "github.com/celo-org/celo-blockchain/trie"
+)
+
+// Tests that snapshot generation errors out correctly in case of a missing trie
+// node in the account trie.
+func TestGenerateCorruptAccountTrie(t *testing.T) {
+ // We can't use statedb to make a test trie (circular dependency), so make
+ // a fake one manually. We're going with a small account trie of 3 accounts,
+ // without any storage slots to keep the test smaller.
+ var (
+ diskdb = memorydb.New()
+ triedb = trie.NewDatabase(diskdb)
+ )
+ tr, _ := trie.NewSecure(common.Hash{}, triedb)
+ acc := &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}
+ val, _ := rlp.EncodeToBytes(acc)
+ tr.Update([]byte("acc-1"), val) // 0xc7a30f39aff471c95d8a837497ad0e49b65be475cc0953540f80cfcdbdcd9074
+
+ acc = &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}
+ val, _ = rlp.EncodeToBytes(acc)
+ tr.Update([]byte("acc-2"), val) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
+
+ acc = &Account{Balance: big.NewInt(3), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}
+ val, _ = rlp.EncodeToBytes(acc)
+ tr.Update([]byte("acc-3"), val) // 0x19ead688e907b0fab07176120dceec244a72aff2f0aa51e8b827584e378772f4
+ tr.Commit(nil) // Root: 0xa04693ea110a31037fb5ee814308a6f1d76bdab0b11676bdf4541d2de55ba978
+
+ // Delete an account trie leaf and ensure the generator chokes
+ triedb.Commit(common.HexToHash("0xa04693ea110a31037fb5ee814308a6f1d76bdab0b11676bdf4541d2de55ba978"), false)
+ diskdb.Delete(common.HexToHash("0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7").Bytes())
+
+ snap := generateSnapshot(diskdb, triedb, 16, common.HexToHash("0xa04693ea110a31037fb5ee814308a6f1d76bdab0b11676bdf4541d2de55ba978"), nil)
+ select {
+ case <-snap.genPending:
+ // Snapshot generation succeeded
+ t.Errorf("Snapshot generated against corrupt account trie")
+
+ case <-time.After(250 * time.Millisecond):
+ // Not generated fast enough, hopefully blocked inside on missing trie node fail
+ }
+ // Signal abortion to the generator and wait for it to tear down
+ stop := make(chan *generatorStats)
+ snap.genAbort <- stop
+ <-stop
+}
+
+// Tests that snapshot generation errors out correctly in case of a missing root
+// trie node for a storage trie. It's similar to internal corruption but it is
+// handled differently inside the generator.
+func TestGenerateMissingStorageTrie(t *testing.T) {
+ // We can't use statedb to make a test trie (circular dependency), so make
+ // a fake one manually. We're going with a small account trie of 3 accounts,
+ // two of which also has the same 3-slot storage trie attached.
+ var (
+ diskdb = memorydb.New()
+ triedb = trie.NewDatabase(diskdb)
+ )
+ stTrie, _ := trie.NewSecure(common.Hash{}, triedb)
+ stTrie.Update([]byte("key-1"), []byte("val-1")) // 0x1314700b81afc49f94db3623ef1df38f3ed18b73a1b7ea2f6c095118cf6118a0
+ stTrie.Update([]byte("key-2"), []byte("val-2")) // 0x18a0f4d79cff4459642dd7604f303886ad9d77c30cf3d7d7cedb3a693ab6d371
+ stTrie.Update([]byte("key-3"), []byte("val-3")) // 0x51c71a47af0695957647fb68766d0becee77e953df17c29b3c2f25436f055c78
+ stTrie.Commit(nil) // Root: 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67
+
+ accTrie, _ := trie.NewSecure(common.Hash{}, triedb)
+ acc := &Account{Balance: big.NewInt(1), Root: stTrie.Hash().Bytes(), CodeHash: emptyCode.Bytes()}
+ val, _ := rlp.EncodeToBytes(acc)
+ accTrie.Update([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
+
+ acc = &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}
+ val, _ = rlp.EncodeToBytes(acc)
+ accTrie.Update([]byte("acc-2"), val) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
+
+ acc = &Account{Balance: big.NewInt(3), Root: stTrie.Hash().Bytes(), CodeHash: emptyCode.Bytes()}
+ val, _ = rlp.EncodeToBytes(acc)
+ accTrie.Update([]byte("acc-3"), val) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2
+ accTrie.Commit(nil) // Root: 0xe3712f1a226f3782caca78ca770ccc19ee000552813a9f59d479f8611db9b1fd
+
+ // We can only corrupt the disk database, so flush the tries out
+ triedb.Reference(
+ common.HexToHash("0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67"),
+ common.HexToHash("0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e"),
+ )
+ triedb.Reference(
+ common.HexToHash("0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67"),
+ common.HexToHash("0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2"),
+ )
+ triedb.Commit(common.HexToHash("0xe3712f1a226f3782caca78ca770ccc19ee000552813a9f59d479f8611db9b1fd"), false)
+
+ // Delete a storage trie root and ensure the generator chokes
+ diskdb.Delete(common.HexToHash("0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67").Bytes())
+
+ snap := generateSnapshot(diskdb, triedb, 16, common.HexToHash("0xe3712f1a226f3782caca78ca770ccc19ee000552813a9f59d479f8611db9b1fd"), nil)
+ select {
+ case <-snap.genPending:
+ // Snapshot generation succeeded
+ t.Errorf("Snapshot generated against corrupt storage trie")
+
+ case <-time.After(250 * time.Millisecond):
+ // Not generated fast enough, hopefully blocked inside on missing trie node fail
+ }
+ // Signal abortion to the generator and wait for it to tear down
+ stop := make(chan *generatorStats)
+ snap.genAbort <- stop
+ <-stop
+}
+
+// Tests that snapshot generation errors out correctly in case of a missing trie
+// node in a storage trie.
+func TestGenerateCorruptStorageTrie(t *testing.T) {
+ // We can't use statedb to make a test trie (circular dependency), so make
+ // a fake one manually. We're going with a small account trie of 3 accounts,
+ // two of which also has the same 3-slot storage trie attached.
+ var (
+ diskdb = memorydb.New()
+ triedb = trie.NewDatabase(diskdb)
+ )
+ stTrie, _ := trie.NewSecure(common.Hash{}, triedb)
+ stTrie.Update([]byte("key-1"), []byte("val-1")) // 0x1314700b81afc49f94db3623ef1df38f3ed18b73a1b7ea2f6c095118cf6118a0
+ stTrie.Update([]byte("key-2"), []byte("val-2")) // 0x18a0f4d79cff4459642dd7604f303886ad9d77c30cf3d7d7cedb3a693ab6d371
+ stTrie.Update([]byte("key-3"), []byte("val-3")) // 0x51c71a47af0695957647fb68766d0becee77e953df17c29b3c2f25436f055c78
+ stTrie.Commit(nil) // Root: 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67
+
+ accTrie, _ := trie.NewSecure(common.Hash{}, triedb)
+ acc := &Account{Balance: big.NewInt(1), Root: stTrie.Hash().Bytes(), CodeHash: emptyCode.Bytes()}
+ val, _ := rlp.EncodeToBytes(acc)
+ accTrie.Update([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
+
+ acc = &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}
+ val, _ = rlp.EncodeToBytes(acc)
+ accTrie.Update([]byte("acc-2"), val) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
+
+ acc = &Account{Balance: big.NewInt(3), Root: stTrie.Hash().Bytes(), CodeHash: emptyCode.Bytes()}
+ val, _ = rlp.EncodeToBytes(acc)
+ accTrie.Update([]byte("acc-3"), val) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2
+ accTrie.Commit(nil) // Root: 0xe3712f1a226f3782caca78ca770ccc19ee000552813a9f59d479f8611db9b1fd
+
+ // We can only corrupt the disk database, so flush the tries out
+ triedb.Reference(
+ common.HexToHash("0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67"),
+ common.HexToHash("0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e"),
+ )
+ triedb.Reference(
+ common.HexToHash("0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67"),
+ common.HexToHash("0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2"),
+ )
+ triedb.Commit(common.HexToHash("0xe3712f1a226f3782caca78ca770ccc19ee000552813a9f59d479f8611db9b1fd"), false)
+
+ // Delete a storage trie leaf and ensure the generator chokes
+ diskdb.Delete(common.HexToHash("0x18a0f4d79cff4459642dd7604f303886ad9d77c30cf3d7d7cedb3a693ab6d371").Bytes())
+
+ snap := generateSnapshot(diskdb, triedb, 16, common.HexToHash("0xe3712f1a226f3782caca78ca770ccc19ee000552813a9f59d479f8611db9b1fd"), nil)
+ select {
+ case <-snap.genPending:
+ // Snapshot generation succeeded
+ t.Errorf("Snapshot generated against corrupt storage trie")
+
+ case <-time.After(250 * time.Millisecond):
+ // Not generated fast enough, hopefully blocked inside on missing trie node fail
+ }
+ // Signal abortion to the generator and wait for it to tear down
+ stop := make(chan *generatorStats)
+ snap.genAbort <- stop
+ <-stop
+}
diff --git a/core/state/snapshot/iterator.go b/core/state/snapshot/iterator.go
index 77fd3763e1..b30acfd4bd 100644
--- a/core/state/snapshot/iterator.go
+++ b/core/state/snapshot/iterator.go
@@ -26,9 +26,9 @@ import (
"github.com/celo-org/celo-blockchain/ethdb"
)
-// AccountIterator is an iterator to step over all the accounts in a snapshot,
-// which may or may npt be composed of multiple layers.
-type AccountIterator interface {
+// Iterator is a iterator to step over all the accounts or the specific
+// storage in a snapshot which may or may not be composed of multiple layers.
+type Iterator interface {
// Next steps the iterator forward one element, returning false if exhausted,
// or an error if iteration failed for some reason (e.g. root being iterated
// becomes stale and garbage collected).
@@ -38,18 +38,35 @@ type AccountIterator interface {
// caused a premature iteration exit (e.g. snapshot stack becoming stale).
Error() error
- // Hash returns the hash of the account the iterator is currently at.
+ // Hash returns the hash of the account or storage slot the iterator is
+ // currently at.
Hash() common.Hash
- // Account returns the RLP encoded slim account the iterator is currently at.
- // An error will be returned if the iterator becomes invalid (e.g. snaph
- Account() []byte
-
// Release releases associated resources. Release should always succeed and
// can be called multiple times without causing error.
Release()
}
+// AccountIterator is a iterator to step over all the accounts in a snapshot,
+// which may or may not be composed of multiple layers.
+type AccountIterator interface {
+ Iterator
+
+ // Account returns the RLP encoded slim account the iterator is currently at.
+ // An error will be returned if the iterator becomes invalid
+ Account() []byte
+}
+
+// StorageIterator is a iterator to step over the specific storage in a snapshot,
+// which may or may not be composed of multiple layers.
+type StorageIterator interface {
+ Iterator
+
+ // Slot returns the storage slot the iterator is currently at. An error will
+ // be returned if the iterator becomes invalid
+ Slot() []byte
+}
+
// diffAccountIterator is an account iterator that steps over the accounts (both
// live and deleted) contained within a single diff layer. Higher order iterators
// will use the deleted accounts to skip deeper iterators.
@@ -116,10 +133,12 @@ func (it *diffAccountIterator) Hash() common.Hash {
// Account returns the RLP encoded slim account the iterator is currently at.
// This method may _fail_, if the underlying layer has been flattened between
-// the call to Next and Acccount. That type of error will set it.Err.
+// the call to Next and Account. That type of error will set it.Err.
// This method assumes that flattening does not delete elements from
// the accountdata mapping (writing nil into it is fine though), and will panic
// if elements have been deleted.
+//
+// Note the returned account is not a copy, please don't modify it.
func (it *diffAccountIterator) Account() []byte {
it.layer.lock.RLock()
blob, ok := it.layer.accountData[it.curHash]
@@ -164,7 +183,7 @@ func (it *diskAccountIterator) Next() bool {
}
// Try to advance the iterator and release it if we reached the end
for {
- if !it.it.Next() || !bytes.HasPrefix(it.it.Key(), rawdb.SnapshotAccountPrefix) {
+ if !it.it.Next() {
it.it.Release()
it.it = nil
return false
@@ -182,12 +201,15 @@ func (it *diskAccountIterator) Next() bool {
// A diff layer is immutable after creation content wise and can always be fully
// iterated without error, so this method always returns nil.
func (it *diskAccountIterator) Error() error {
+ if it.it == nil {
+ return nil // Iterator is exhausted and released
+ }
return it.it.Error()
}
// Hash returns the hash of the account the iterator is currently at.
func (it *diskAccountIterator) Hash() common.Hash {
- return common.BytesToHash(it.it.Key())
+ return common.BytesToHash(it.it.Key()) // The prefix will be truncated
}
// Account returns the RLP encoded slim account the iterator is currently at.
@@ -203,3 +225,176 @@ func (it *diskAccountIterator) Release() {
it.it = nil
}
}
+
+// diffStorageIterator is a storage iterator that steps over the specific storage
+// (both live and deleted) contained within a single diff layer. Higher order
+// iterators will use the deleted slot to skip deeper iterators.
+type diffStorageIterator struct {
+ // curHash is the current hash the iterator is positioned on. The field is
+ // explicitly tracked since the referenced diff layer might go stale after
+ // the iterator was positioned and we don't want to fail accessing the old
+ // hash as long as the iterator is not touched any more.
+ curHash common.Hash
+ account common.Hash
+
+ layer *diffLayer // Live layer to retrieve values from
+ keys []common.Hash // Keys left in the layer to iterate
+ fail error // Any failures encountered (stale)
+}
+
+// StorageIterator creates a storage iterator over a single diff layer.
+// Except the storage iterator is returned, there is an additional flag
+// "destructed" returned. If it's true then it means the whole storage is
+// destructed in this layer(maybe recreated too), don't bother deeper layer
+// for storage retrieval.
+func (dl *diffLayer) StorageIterator(account common.Hash, seek common.Hash) (StorageIterator, bool) {
+ // Create the storage for this account even it's marked
+ // as destructed. The iterator is for the new one which
+ // just has the same adddress as the deleted one.
+ hashes, destructed := dl.StorageList(account)
+ index := sort.Search(len(hashes), func(i int) bool {
+ return bytes.Compare(seek[:], hashes[i][:]) <= 0
+ })
+ // Assemble and returned the already seeked iterator
+ return &diffStorageIterator{
+ layer: dl,
+ account: account,
+ keys: hashes[index:],
+ }, destructed
+}
+
+// Next steps the iterator forward one element, returning false if exhausted.
+func (it *diffStorageIterator) Next() bool {
+ // If the iterator was already stale, consider it a programmer error. Although
+ // we could just return false here, triggering this path would probably mean
+ // somebody forgot to check for Error, so lets blow up instead of undefined
+ // behavior that's hard to debug.
+ if it.fail != nil {
+ panic(fmt.Sprintf("called Next of failed iterator: %v", it.fail))
+ }
+ // Stop iterating if all keys were exhausted
+ if len(it.keys) == 0 {
+ return false
+ }
+ if it.layer.Stale() {
+ it.fail, it.keys = ErrSnapshotStale, nil
+ return false
+ }
+ // Iterator seems to be still alive, retrieve and cache the live hash
+ it.curHash = it.keys[0]
+ // key cached, shift the iterator and notify the user of success
+ it.keys = it.keys[1:]
+ return true
+}
+
+// Error returns any failure that occurred during iteration, which might have
+// caused a premature iteration exit (e.g. snapshot stack becoming stale).
+func (it *diffStorageIterator) Error() error {
+ return it.fail
+}
+
+// Hash returns the hash of the storage slot the iterator is currently at.
+func (it *diffStorageIterator) Hash() common.Hash {
+ return it.curHash
+}
+
+// Slot returns the raw storage slot value the iterator is currently at.
+// This method may _fail_, if the underlying layer has been flattened between
+// the call to Next and Value. That type of error will set it.Err.
+// This method assumes that flattening does not delete elements from
+// the storage mapping (writing nil into it is fine though), and will panic
+// if elements have been deleted.
+//
+// Note the returned slot is not a copy, please don't modify it.
+func (it *diffStorageIterator) Slot() []byte {
+ it.layer.lock.RLock()
+ storage, ok := it.layer.storageData[it.account]
+ if !ok {
+ panic(fmt.Sprintf("iterator referenced non-existent account storage: %x", it.account))
+ }
+ // Storage slot might be nil(deleted), but it must exist
+ blob, ok := storage[it.curHash]
+ if !ok {
+ panic(fmt.Sprintf("iterator referenced non-existent storage slot: %x", it.curHash))
+ }
+ it.layer.lock.RUnlock()
+ if it.layer.Stale() {
+ it.fail, it.keys = ErrSnapshotStale, nil
+ }
+ return blob
+}
+
+// Release is a noop for diff account iterators as there are no held resources.
+func (it *diffStorageIterator) Release() {}
+
+// diskStorageIterator is a storage iterator that steps over the live storage
+// contained within a disk layer.
+type diskStorageIterator struct {
+ layer *diskLayer
+ account common.Hash
+ it ethdb.Iterator
+}
+
+// StorageIterator creates a storage iterator over a disk layer.
+// If the whole storage is destructed, then all entries in the disk
+// layer are deleted already. So the "destructed" flag returned here
+// is always false.
+func (dl *diskLayer) StorageIterator(account common.Hash, seek common.Hash) (StorageIterator, bool) {
+ pos := common.TrimRightZeroes(seek[:])
+ return &diskStorageIterator{
+ layer: dl,
+ account: account,
+ it: dl.diskdb.NewIterator(append(rawdb.SnapshotStoragePrefix, account.Bytes()...), pos),
+ }, false
+}
+
+// Next steps the iterator forward one element, returning false if exhausted.
+func (it *diskStorageIterator) Next() bool {
+ // If the iterator was already exhausted, don't bother
+ if it.it == nil {
+ return false
+ }
+ // Try to advance the iterator and release it if we reached the end
+ for {
+ if !it.it.Next() {
+ it.it.Release()
+ it.it = nil
+ return false
+ }
+ if len(it.it.Key()) == len(rawdb.SnapshotStoragePrefix)+common.HashLength+common.HashLength {
+ break
+ }
+ }
+ return true
+}
+
+// Error returns any failure that occurred during iteration, which might have
+// caused a premature iteration exit (e.g. snapshot stack becoming stale).
+//
+// A diff layer is immutable after creation content wise and can always be fully
+// iterated without error, so this method always returns nil.
+func (it *diskStorageIterator) Error() error {
+ if it.it == nil {
+ return nil // Iterator is exhausted and released
+ }
+ return it.it.Error()
+}
+
+// Hash returns the hash of the storage slot the iterator is currently at.
+func (it *diskStorageIterator) Hash() common.Hash {
+ return common.BytesToHash(it.it.Key()) // The prefix will be truncated
+}
+
+// Slot returns the raw strorage slot content the iterator is currently at.
+func (it *diskStorageIterator) Slot() []byte {
+ return it.it.Value()
+}
+
+// Release releases the database snapshot held during iteration.
+func (it *diskStorageIterator) Release() {
+ // The iterator is auto-released on exhaustion, so make sure it's still alive
+ if it.it != nil {
+ it.it.Release()
+ it.it = nil
+ }
+}
diff --git a/core/state/snapshot/iterator_binary.go b/core/state/snapshot/iterator_binary.go
index b04a0f5b15..4af1dccd58 100644
--- a/core/state/snapshot/iterator_binary.go
+++ b/core/state/snapshot/iterator_binary.go
@@ -22,29 +22,91 @@ import (
"github.com/celo-org/celo-blockchain/common"
)
-// binaryAccountIterator is a simplistic iterator to step over the accounts in
-// a snapshot, which may or may npt be composed of multiple layers. Performance
+// binaryIterator is a simplistic iterator to step over the accounts or storage
+// in a snapshot, which may or may not be composed of multiple layers. Performance
// wise this iterator is slow, it's meant for cross validating the fast one,
-type binaryAccountIterator struct {
- a *diffAccountIterator
- b AccountIterator
- aDone bool
- bDone bool
- k common.Hash
- fail error
+type binaryIterator struct {
+ a Iterator
+ b Iterator
+ aDone bool
+ bDone bool
+ accountIterator bool
+ k common.Hash
+ account common.Hash
+ fail error
}
-// newBinaryAccountIterator creates a simplistic account iterator to step over
-// all the accounts in a slow, but eaily verifiable way.
-func (dl *diffLayer) newBinaryAccountIterator() AccountIterator {
+// initBinaryAccountIterator creates a simplistic iterator to step over all the
+// accounts in a slow, but easily verifiable way. Note this function is used for
+// initialization, use `newBinaryAccountIterator` as the API.
+func (dl *diffLayer) initBinaryAccountIterator() Iterator {
parent, ok := dl.parent.(*diffLayer)
if !ok {
- // parent is the disk layer
- return dl.AccountIterator(common.Hash{})
+ l := &binaryIterator{
+ a: dl.AccountIterator(common.Hash{}),
+ b: dl.Parent().AccountIterator(common.Hash{}),
+ accountIterator: true,
+ }
+ l.aDone = !l.a.Next()
+ l.bDone = !l.b.Next()
+ return l
}
- l := &binaryAccountIterator{
- a: dl.AccountIterator(common.Hash{}).(*diffAccountIterator),
- b: parent.newBinaryAccountIterator(),
+ l := &binaryIterator{
+ a: dl.AccountIterator(common.Hash{}),
+ b: parent.initBinaryAccountIterator(),
+ accountIterator: true,
+ }
+ l.aDone = !l.a.Next()
+ l.bDone = !l.b.Next()
+ return l
+}
+
+// initBinaryStorageIterator creates a simplistic iterator to step over all the
+// storage slots in a slow, but easily verifiable way. Note this function is used
+// for initialization, use `newBinaryStorageIterator` as the API.
+func (dl *diffLayer) initBinaryStorageIterator(account common.Hash) Iterator {
+ parent, ok := dl.parent.(*diffLayer)
+ if !ok {
+ // If the storage in this layer is already destructed, discard all
+ // deeper layers but still return an valid single-branch iterator.
+ a, destructed := dl.StorageIterator(account, common.Hash{})
+ if destructed {
+ l := &binaryIterator{
+ a: a,
+ account: account,
+ }
+ l.aDone = !l.a.Next()
+ l.bDone = true
+ return l
+ }
+ // The parent is disk layer, don't need to take care "destructed"
+ // anymore.
+ b, _ := dl.Parent().StorageIterator(account, common.Hash{})
+ l := &binaryIterator{
+ a: a,
+ b: b,
+ account: account,
+ }
+ l.aDone = !l.a.Next()
+ l.bDone = !l.b.Next()
+ return l
+ }
+ // If the storage in this layer is already destructed, discard all
+ // deeper layers but still return an valid single-branch iterator.
+ a, destructed := dl.StorageIterator(account, common.Hash{})
+ if destructed {
+ l := &binaryIterator{
+ a: a,
+ account: account,
+ }
+ l.aDone = !l.a.Next()
+ l.bDone = true
+ return l
+ }
+ l := &binaryIterator{
+ a: a,
+ b: parent.initBinaryStorageIterator(account),
+ account: account,
}
l.aDone = !l.a.Next()
l.bDone = !l.b.Next()
@@ -54,23 +116,22 @@ func (dl *diffLayer) newBinaryAccountIterator() AccountIterator {
// Next steps the iterator forward one element, returning false if exhausted,
// or an error if iteration failed for some reason (e.g. root being iterated
// becomes stale and garbage collected).
-func (it *binaryAccountIterator) Next() bool {
+func (it *binaryIterator) Next() bool {
if it.aDone && it.bDone {
return false
}
- nextB := it.b.Hash()
first:
- nextA := it.a.Hash()
if it.aDone {
+ it.k = it.b.Hash()
it.bDone = !it.b.Next()
- it.k = nextB
return true
}
if it.bDone {
+ it.k = it.a.Hash()
it.aDone = !it.a.Next()
- it.k = nextA
return true
}
+ nextA, nextB := it.a.Hash(), it.b.Hash()
if diff := bytes.Compare(nextA[:], nextB[:]); diff < 0 {
it.aDone = !it.a.Next()
it.k = nextA
@@ -87,20 +148,43 @@ first:
// Error returns any failure that occurred during iteration, which might have
// caused a premature iteration exit (e.g. snapshot stack becoming stale).
-func (it *binaryAccountIterator) Error() error {
+func (it *binaryIterator) Error() error {
return it.fail
}
// Hash returns the hash of the account the iterator is currently at.
-func (it *binaryAccountIterator) Hash() common.Hash {
+func (it *binaryIterator) Hash() common.Hash {
return it.k
}
// Account returns the RLP encoded slim account the iterator is currently at, or
// nil if the iterated snapshot stack became stale (you can check Error after
// to see if it failed or not).
-func (it *binaryAccountIterator) Account() []byte {
- blob, err := it.a.layer.AccountRLP(it.k)
+//
+// Note the returned account is not a copy, please don't modify it.
+func (it *binaryIterator) Account() []byte {
+ if !it.accountIterator {
+ return nil
+ }
+ // The topmost iterator must be `diffAccountIterator`
+ blob, err := it.a.(*diffAccountIterator).layer.AccountRLP(it.k)
+ if err != nil {
+ it.fail = err
+ return nil
+ }
+ return blob
+}
+
+// Slot returns the raw storage slot data the iterator is currently at, or
+// nil if the iterated snapshot stack became stale (you can check Error after
+// to see if it failed or not).
+//
+// Note the returned slot is not a copy, please don't modify it.
+func (it *binaryIterator) Slot() []byte {
+ if it.accountIterator {
+ return nil
+ }
+ blob, err := it.a.(*diffStorageIterator).layer.Storage(it.account, it.k)
if err != nil {
it.fail = err
return nil
@@ -109,7 +193,21 @@ func (it *binaryAccountIterator) Account() []byte {
}
// Release recursively releases all the iterators in the stack.
-func (it *binaryAccountIterator) Release() {
+func (it *binaryIterator) Release() {
it.a.Release()
it.b.Release()
}
+
+// newBinaryAccountIterator creates a simplistic account iterator to step over
+// all the accounts in a slow, but easily verifiable way.
+func (dl *diffLayer) newBinaryAccountIterator() AccountIterator {
+ iter := dl.initBinaryAccountIterator()
+ return iter.(AccountIterator)
+}
+
+// newBinaryStorageIterator creates a simplistic account iterator to step over
+// all the storage slots in a slow, but easily verifiable way.
+func (dl *diffLayer) newBinaryStorageIterator(account common.Hash) StorageIterator {
+ iter := dl.initBinaryStorageIterator(account)
+ return iter.(StorageIterator)
+}
diff --git a/core/state/snapshot/iterator_fast.go b/core/state/snapshot/iterator_fast.go
index 3d5475b4a2..ad64109972 100644
--- a/core/state/snapshot/iterator_fast.go
+++ b/core/state/snapshot/iterator_fast.go
@@ -24,23 +24,23 @@ import (
"github.com/celo-org/celo-blockchain/common"
)
-// weightedAccountIterator is an account iterator with an assigned weight. It is
-// used to prioritise which account is the correct one if multiple iterators find
-// the same one (modified in multiple consecutive blocks).
-type weightedAccountIterator struct {
- it AccountIterator
+// weightedIterator is a iterator with an assigned weight. It is used to prioritise
+// which account or storage slot is the correct one if multiple iterators find the
+// same one (modified in multiple consecutive blocks).
+type weightedIterator struct {
+ it Iterator
priority int
}
-// weightedAccountIterators is a set of iterators implementing the sort.Interface.
-type weightedAccountIterators []*weightedAccountIterator
+// weightedIterators is a set of iterators implementing the sort.Interface.
+type weightedIterators []*weightedIterator
// Len implements sort.Interface, returning the number of active iterators.
-func (its weightedAccountIterators) Len() int { return len(its) }
+func (its weightedIterators) Len() int { return len(its) }
// Less implements sort.Interface, returning which of two iterators in the stack
// is before the other.
-func (its weightedAccountIterators) Less(i, j int) bool {
+func (its weightedIterators) Less(i, j int) bool {
// Order the iterators primarily by the account hashes
hashI := its[i].it.Hash()
hashJ := its[j].it.Hash()
@@ -51,45 +51,64 @@ func (its weightedAccountIterators) Less(i, j int) bool {
case 1:
return false
}
- // Same account in multiple layers, split by priority
+ // Same account/storage-slot in multiple layers, split by priority
return its[i].priority < its[j].priority
}
// Swap implements sort.Interface, swapping two entries in the iterator stack.
-func (its weightedAccountIterators) Swap(i, j int) {
+func (its weightedIterators) Swap(i, j int) {
its[i], its[j] = its[j], its[i]
}
-// fastAccountIterator is a more optimized multi-layer iterator which maintains a
+// fastIterator is a more optimized multi-layer iterator which maintains a
// direct mapping of all iterators leading down to the bottom layer.
-type fastAccountIterator struct {
- tree *Tree // Snapshot tree to reinitialize stale sub-iterators with
- root common.Hash // Root hash to reinitialize stale sub-iterators through
+type fastIterator struct {
+ tree *Tree // Snapshot tree to reinitialize stale sub-iterators with
+ root common.Hash // Root hash to reinitialize stale sub-iterators through
+
curAccount []byte
+ curSlot []byte
- iterators weightedAccountIterators
+ iterators weightedIterators
initiated bool
+ account bool
fail error
}
-// newFastAccountIterator creates a new hierarhical account iterator with one
+// newFastIterator creates a new hierarchical account or storage iterator with one
// element per diff layer. The returned combo iterator can be used to walk over
// the entire snapshot diff stack simultaneously.
-func newFastAccountIterator(tree *Tree, root common.Hash, seek common.Hash) (AccountIterator, error) {
+func newFastIterator(tree *Tree, root common.Hash, account common.Hash, seek common.Hash, accountIterator bool) (*fastIterator, error) {
snap := tree.Snapshot(root)
if snap == nil {
return nil, fmt.Errorf("unknown snapshot: %x", root)
}
- fi := &fastAccountIterator{
- tree: tree,
- root: root,
+ fi := &fastIterator{
+ tree: tree,
+ root: root,
+ account: accountIterator,
}
current := snap.(snapshot)
for depth := 0; current != nil; depth++ {
- fi.iterators = append(fi.iterators, &weightedAccountIterator{
- it: current.AccountIterator(seek),
- priority: depth,
- })
+ if accountIterator {
+ fi.iterators = append(fi.iterators, &weightedIterator{
+ it: current.AccountIterator(seek),
+ priority: depth,
+ })
+ } else {
+ // If the whole storage is destructed in this layer, don't
+ // bother deeper layer anymore. But we should still keep
+ // the iterator for this layer, since the iterator can contain
+ // some valid slots which belongs to the re-created account.
+ it, destructed := current.StorageIterator(account, seek)
+ fi.iterators = append(fi.iterators, &weightedIterator{
+ it: it,
+ priority: depth,
+ })
+ if destructed {
+ break
+ }
+ }
current = current.Parent()
}
fi.init()
@@ -98,7 +117,7 @@ func newFastAccountIterator(tree *Tree, root common.Hash, seek common.Hash) (Acc
// init walks over all the iterators and resolves any clashes between them, after
// which it prepares the stack for step-by-step iteration.
-func (fi *fastAccountIterator) init() {
+func (fi *fastIterator) init() {
// Track which account hashes are iterators positioned on
var positioned = make(map[common.Hash]int)
@@ -153,7 +172,7 @@ func (fi *fastAccountIterator) init() {
}
// Next steps the iterator forward one element, returning false if exhausted.
-func (fi *fastAccountIterator) Next() bool {
+func (fi *fastIterator) Next() bool {
if len(fi.iterators) == 0 {
return false
}
@@ -161,21 +180,25 @@ func (fi *fastAccountIterator) Next() bool {
// Don't forward first time -- we had to 'Next' once in order to
// do the sorting already
fi.initiated = true
- fi.curAccount = fi.iterators[0].it.Account()
+ if fi.account {
+ fi.curAccount = fi.iterators[0].it.(AccountIterator).Account()
+ } else {
+ fi.curSlot = fi.iterators[0].it.(StorageIterator).Slot()
+ }
if innerErr := fi.iterators[0].it.Error(); innerErr != nil {
fi.fail = innerErr
return false
}
- if fi.curAccount != nil {
+ if fi.curAccount != nil || fi.curSlot != nil {
return true
}
- // Implicit else: we've hit a nil-account, and need to fall through to the
- // loop below to land on something non-nil
+ // Implicit else: we've hit a nil-account or nil-slot, and need to
+ // fall through to the loop below to land on something non-nil
}
- // If an account is deleted in one of the layers, the key will still be there,
- // but the actual value will be nil. However, the iterator should not
- // export nil-values (but instead simply omit the key), so we need to loop
- // here until we either
+ // If an account or a slot is deleted in one of the layers, the key will
+ // still be there, but the actual value will be nil. However, the iterator
+ // should not export nil-values (but instead simply omit the key), so we
+ // need to loop here until we either
// - get a non-nil value,
// - hit an error,
// - or exhaust the iterator
@@ -183,12 +206,16 @@ func (fi *fastAccountIterator) Next() bool {
if !fi.next(0) {
return false // exhausted
}
- fi.curAccount = fi.iterators[0].it.Account()
+ if fi.account {
+ fi.curAccount = fi.iterators[0].it.(AccountIterator).Account()
+ } else {
+ fi.curSlot = fi.iterators[0].it.(StorageIterator).Slot()
+ }
if innerErr := fi.iterators[0].it.Error(); innerErr != nil {
fi.fail = innerErr
return false // error
}
- if fi.curAccount != nil {
+ if fi.curAccount != nil || fi.curSlot != nil {
break // non-nil value found
}
}
@@ -201,7 +228,7 @@ func (fi *fastAccountIterator) Next() bool {
// For example, if the iterated hashes become [2,3,5,5,8,9,10], then we should
// invoke next(3), which will call Next on elem 3 (the second '5') and will
// cascade along the list, applying the same operation if needed.
-func (fi *fastAccountIterator) next(idx int) bool {
+func (fi *fastIterator) next(idx int) bool {
// If this particular iterator got exhausted, remove it and return true (the
// next one is surely not exhausted yet, otherwise it would have been removed
// already).
@@ -262,7 +289,7 @@ func (fi *fastAccountIterator) next(idx int) bool {
}
// move advances an iterator to another position in the list.
-func (fi *fastAccountIterator) move(index, newpos int) {
+func (fi *fastIterator) move(index, newpos int) {
elem := fi.iterators[index]
copy(fi.iterators[index:], fi.iterators[index+1:newpos+1])
fi.iterators[newpos] = elem
@@ -270,23 +297,30 @@ func (fi *fastAccountIterator) move(index, newpos int) {
// Error returns any failure that occurred during iteration, which might have
// caused a premature iteration exit (e.g. snapshot stack becoming stale).
-func (fi *fastAccountIterator) Error() error {
+func (fi *fastIterator) Error() error {
return fi.fail
}
// Hash returns the current key
-func (fi *fastAccountIterator) Hash() common.Hash {
+func (fi *fastIterator) Hash() common.Hash {
return fi.iterators[0].it.Hash()
}
-// Account returns the current key
-func (fi *fastAccountIterator) Account() []byte {
+// Account returns the current account blob.
+// Note the returned account is not a copy, please don't modify it.
+func (fi *fastIterator) Account() []byte {
return fi.curAccount
}
+// Slot returns the current storage slot.
+// Note the returned slot is not a copy, please don't modify it.
+func (fi *fastIterator) Slot() []byte {
+ return fi.curSlot
+}
+
// Release iterates over all the remaining live layer iterators and releases each
// of thme individually.
-func (fi *fastAccountIterator) Release() {
+func (fi *fastIterator) Release() {
for _, it := range fi.iterators {
it.it.Release()
}
@@ -294,9 +328,23 @@ func (fi *fastAccountIterator) Release() {
}
// Debug is a convencience helper during testing
-func (fi *fastAccountIterator) Debug() {
+func (fi *fastIterator) Debug() {
for _, it := range fi.iterators {
fmt.Printf("[p=%v v=%v] ", it.priority, it.it.Hash()[0])
}
fmt.Println()
}
+
+// newFastAccountIterator creates a new hierarchical account iterator with one
+// element per diff layer. The returned combo iterator can be used to walk over
+// the entire snapshot diff stack simultaneously.
+func newFastAccountIterator(tree *Tree, root common.Hash, seek common.Hash) (AccountIterator, error) {
+ return newFastIterator(tree, root, common.Hash{}, seek, true)
+}
+
+// newFastStorageIterator creates a new hierarchical storage iterator with one
+// element per diff layer. The returned combo iterator can be used to walk over
+// the entire snapshot diff stack simultaneously.
+func newFastStorageIterator(tree *Tree, root common.Hash, account common.Hash, seek common.Hash) (StorageIterator, error) {
+ return newFastIterator(tree, root, account, seek, false)
+}
diff --git a/core/state/snapshot/iterator_test.go b/core/state/snapshot/iterator_test.go
index b81fc19995..09c2276b17 100644
--- a/core/state/snapshot/iterator_test.go
+++ b/core/state/snapshot/iterator_test.go
@@ -28,7 +28,7 @@ import (
"github.com/celo-org/celo-blockchain/core/rawdb"
)
-// TestAccountIteratorBasics tests some simple single-layer iteration
+// TestAccountIteratorBasics tests some simple single-layer(diff and disk) iteration
func TestAccountIteratorBasics(t *testing.T) {
var (
destructs = make(map[common.Hash]struct{})
@@ -53,9 +53,55 @@ func TestAccountIteratorBasics(t *testing.T) {
}
}
// Add some (identical) layers on top
- parent := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
- it := parent.AccountIterator(common.Hash{})
- verifyIterator(t, 100, it)
+ diffLayer := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
+ it := diffLayer.AccountIterator(common.Hash{})
+ verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator
+
+ diskLayer := diffToDisk(diffLayer)
+ it = diskLayer.AccountIterator(common.Hash{})
+ verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator
+}
+
+// TestStorageIteratorBasics tests some simple single-layer(diff and disk) iteration for storage
+func TestStorageIteratorBasics(t *testing.T) {
+ var (
+ nilStorage = make(map[common.Hash]int)
+ accounts = make(map[common.Hash][]byte)
+ storage = make(map[common.Hash]map[common.Hash][]byte)
+ )
+ // Fill some random data
+ for i := 0; i < 10; i++ {
+ h := randomHash()
+ accounts[h] = randomAccount()
+
+ accStorage := make(map[common.Hash][]byte)
+ value := make([]byte, 32)
+
+ var nilstorage int
+ for i := 0; i < 100; i++ {
+ rand.Read(value)
+ if rand.Intn(2) == 0 {
+ accStorage[randomHash()] = common.CopyBytes(value)
+ } else {
+ accStorage[randomHash()] = nil // delete slot
+ nilstorage += 1
+ }
+ }
+ storage[h] = accStorage
+ nilStorage[h] = nilstorage
+ }
+ // Add some (identical) layers on top
+ diffLayer := newDiffLayer(emptyLayer(), common.Hash{}, nil, copyAccounts(accounts), copyStorage(storage))
+ for account := range accounts {
+ it, _ := diffLayer.StorageIterator(account, common.Hash{})
+ verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator
+ }
+
+ diskLayer := diffToDisk(diffLayer)
+ for account := range accounts {
+ it, _ := diskLayer.StorageIterator(account, common.Hash{})
+ verifyIterator(t, 100-nilStorage[account], it, verifyNothing) // Nil is allowed for single layer iterator
+ }
}
type testIterator struct {
@@ -87,6 +133,10 @@ func (ti *testIterator) Account() []byte {
return nil
}
+func (ti *testIterator) Slot() []byte {
+ return nil
+}
+
func (ti *testIterator) Release() {}
func TestFastIteratorBasics(t *testing.T) {
@@ -102,13 +152,12 @@ func TestFastIteratorBasics(t *testing.T) {
{9, 10}, {10, 13, 15, 16}},
expKeys: []byte{0, 1, 2, 7, 8, 9, 10, 13, 14, 15, 16}},
} {
- var iterators []*weightedAccountIterator
+ var iterators []*weightedIterator
for i, data := range tc.lists {
it := newTestIterator(data...)
- iterators = append(iterators, &weightedAccountIterator{it, i})
-
+ iterators = append(iterators, &weightedIterator{it, i})
}
- fi := &fastAccountIterator{
+ fi := &fastIterator{
iterators: iterators,
initiated: false,
}
@@ -122,7 +171,15 @@ func TestFastIteratorBasics(t *testing.T) {
}
}
-func verifyIterator(t *testing.T, expCount int, it AccountIterator) {
+type verifyContent int
+
+const (
+ verifyNothing verifyContent = iota
+ verifyAccount
+ verifyStorage
+)
+
+func verifyIterator(t *testing.T, expCount int, it Iterator, verify verifyContent) {
t.Helper()
var (
@@ -134,10 +191,13 @@ func verifyIterator(t *testing.T, expCount int, it AccountIterator) {
if bytes.Compare(last[:], hash[:]) >= 0 {
t.Errorf("wrong order: %x >= %x", last, hash)
}
- if it.Account() == nil {
+ count++
+ if verify == verifyAccount && len(it.(AccountIterator).Account()) == 0 {
+ t.Errorf("iterator returned nil-value for hash %x", hash)
+ } else if verify == verifyStorage && len(it.(StorageIterator).Slot()) == 0 {
t.Errorf("iterator returned nil-value for hash %x", hash)
}
- count++
+ last = hash
}
if count != expCount {
t.Errorf("iterator count mismatch: have %d, want %d", count, expCount)
@@ -173,13 +233,74 @@ func TestAccountIteratorTraversal(t *testing.T) {
// Verify the single and multi-layer iterators
head := snaps.Snapshot(common.HexToHash("0x04"))
- verifyIterator(t, 3, head.(snapshot).AccountIterator(common.Hash{}))
- verifyIterator(t, 7, head.(*diffLayer).newBinaryAccountIterator())
+ verifyIterator(t, 3, head.(snapshot).AccountIterator(common.Hash{}), verifyNothing)
+ verifyIterator(t, 7, head.(*diffLayer).newBinaryAccountIterator(), verifyAccount)
it, _ := snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{})
- defer it.Release()
+ verifyIterator(t, 7, it, verifyAccount)
+ it.Release()
+
+ // Test after persist some bottom-most layers into the disk,
+ // the functionalities still work.
+ limit := aggregatorMemoryLimit
+ defer func() {
+ aggregatorMemoryLimit = limit
+ }()
+ aggregatorMemoryLimit = 0 // Force pushing the bottom-most layer into disk
+ snaps.Cap(common.HexToHash("0x04"), 2)
+ verifyIterator(t, 7, head.(*diffLayer).newBinaryAccountIterator(), verifyAccount)
+
+ it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{})
+ verifyIterator(t, 7, it, verifyAccount)
+ it.Release()
+}
+
+func TestStorageIteratorTraversal(t *testing.T) {
+ // Create an empty base layer and a snapshot tree out of it
+ base := &diskLayer{
+ diskdb: rawdb.NewMemoryDatabase(),
+ root: common.HexToHash("0x01"),
+ cache: fastcache.New(1024 * 500),
+ }
+ snaps := &Tree{
+ layers: map[common.Hash]snapshot{
+ base.root: base,
+ },
+ }
+ // Stack three diff layers on top with various overlaps
+ snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
+ randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil))
+
+ snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
+ randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x04", "0x05", "0x06"}}, nil))
+
+ snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
+ randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil))
+
+ // Verify the single and multi-layer iterators
+ head := snaps.Snapshot(common.HexToHash("0x04"))
+
+ diffIter, _ := head.(snapshot).StorageIterator(common.HexToHash("0xaa"), common.Hash{})
+ verifyIterator(t, 3, diffIter, verifyNothing)
+ verifyIterator(t, 6, head.(*diffLayer).newBinaryStorageIterator(common.HexToHash("0xaa")), verifyStorage)
+
+ it, _ := snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.Hash{})
+ verifyIterator(t, 6, it, verifyStorage)
+ it.Release()
- verifyIterator(t, 7, it)
+ // Test after persist some bottom-most layers into the disk,
+ // the functionalities still work.
+ limit := aggregatorMemoryLimit
+ defer func() {
+ aggregatorMemoryLimit = limit
+ }()
+ aggregatorMemoryLimit = 0 // Force pushing the bottom-most layer into disk
+ snaps.Cap(common.HexToHash("0x04"), 2)
+ verifyIterator(t, 6, head.(*diffLayer).newBinaryStorageIterator(common.HexToHash("0xaa")), verifyStorage)
+
+ it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.Hash{})
+ verifyIterator(t, 6, it, verifyStorage)
+ it.Release()
}
// TestAccountIteratorTraversalValues tests some multi-layer iteration, where we
@@ -242,8 +363,6 @@ func TestAccountIteratorTraversalValues(t *testing.T) {
snaps.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), nil, h, nil)
it, _ := snaps.AccountIterator(common.HexToHash("0x09"), common.Hash{})
- defer it.Release()
-
head := snaps.Snapshot(common.HexToHash("0x09"))
for it.Next() {
hash := it.Hash()
@@ -255,6 +374,128 @@ func TestAccountIteratorTraversalValues(t *testing.T) {
t.Fatalf("hash %x: account mismatch: have %x, want %x", hash, have, want)
}
}
+ it.Release()
+
+ // Test after persist some bottom-most layers into the disk,
+ // the functionalities still work.
+ limit := aggregatorMemoryLimit
+ defer func() {
+ aggregatorMemoryLimit = limit
+ }()
+ aggregatorMemoryLimit = 0 // Force pushing the bottom-most layer into disk
+ snaps.Cap(common.HexToHash("0x09"), 2)
+
+ it, _ = snaps.AccountIterator(common.HexToHash("0x09"), common.Hash{})
+ for it.Next() {
+ hash := it.Hash()
+ want, err := head.AccountRLP(hash)
+ if err != nil {
+ t.Fatalf("failed to retrieve expected account: %v", err)
+ }
+ if have := it.Account(); !bytes.Equal(want, have) {
+ t.Fatalf("hash %x: account mismatch: have %x, want %x", hash, have, want)
+ }
+ }
+ it.Release()
+}
+
+func TestStorageIteratorTraversalValues(t *testing.T) {
+ // Create an empty base layer and a snapshot tree out of it
+ base := &diskLayer{
+ diskdb: rawdb.NewMemoryDatabase(),
+ root: common.HexToHash("0x01"),
+ cache: fastcache.New(1024 * 500),
+ }
+ snaps := &Tree{
+ layers: map[common.Hash]snapshot{
+ base.root: base,
+ },
+ }
+ wrapStorage := func(storage map[common.Hash][]byte) map[common.Hash]map[common.Hash][]byte {
+ return map[common.Hash]map[common.Hash][]byte{
+ common.HexToHash("0xaa"): storage,
+ }
+ }
+ // Create a batch of storage sets to seed subsequent layers with
+ var (
+ a = make(map[common.Hash][]byte)
+ b = make(map[common.Hash][]byte)
+ c = make(map[common.Hash][]byte)
+ d = make(map[common.Hash][]byte)
+ e = make(map[common.Hash][]byte)
+ f = make(map[common.Hash][]byte)
+ g = make(map[common.Hash][]byte)
+ h = make(map[common.Hash][]byte)
+ )
+ for i := byte(2); i < 0xff; i++ {
+ a[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 0, i))
+ if i > 20 && i%2 == 0 {
+ b[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 1, i))
+ }
+ if i%4 == 0 {
+ c[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 2, i))
+ }
+ if i%7 == 0 {
+ d[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 3, i))
+ }
+ if i%8 == 0 {
+ e[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 4, i))
+ }
+ if i > 50 || i < 85 {
+ f[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 5, i))
+ }
+ if i%64 == 0 {
+ g[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 6, i))
+ }
+ if i%128 == 0 {
+ h[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 7, i))
+ }
+ }
+ // Assemble a stack of snapshots from the account layers
+ snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, randomAccountSet("0xaa"), wrapStorage(a))
+ snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, randomAccountSet("0xaa"), wrapStorage(b))
+ snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, randomAccountSet("0xaa"), wrapStorage(c))
+ snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil, randomAccountSet("0xaa"), wrapStorage(d))
+ snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), nil, randomAccountSet("0xaa"), wrapStorage(e))
+ snaps.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), nil, randomAccountSet("0xaa"), wrapStorage(e))
+ snaps.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), nil, randomAccountSet("0xaa"), wrapStorage(g))
+ snaps.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), nil, randomAccountSet("0xaa"), wrapStorage(h))
+
+ it, _ := snaps.StorageIterator(common.HexToHash("0x09"), common.HexToHash("0xaa"), common.Hash{})
+ head := snaps.Snapshot(common.HexToHash("0x09"))
+ for it.Next() {
+ hash := it.Hash()
+ want, err := head.Storage(common.HexToHash("0xaa"), hash)
+ if err != nil {
+ t.Fatalf("failed to retrieve expected storage slot: %v", err)
+ }
+ if have := it.Slot(); !bytes.Equal(want, have) {
+ t.Fatalf("hash %x: slot mismatch: have %x, want %x", hash, have, want)
+ }
+ }
+ it.Release()
+
+ // Test after persist some bottom-most layers into the disk,
+ // the functionalities still work.
+ limit := aggregatorMemoryLimit
+ defer func() {
+ aggregatorMemoryLimit = limit
+ }()
+ aggregatorMemoryLimit = 0 // Force pushing the bottom-most layer into disk
+ snaps.Cap(common.HexToHash("0x09"), 2)
+
+ it, _ = snaps.StorageIterator(common.HexToHash("0x09"), common.HexToHash("0xaa"), common.Hash{})
+ for it.Next() {
+ hash := it.Hash()
+ want, err := head.Storage(common.HexToHash("0xaa"), hash)
+ if err != nil {
+ t.Fatalf("failed to retrieve expected slot: %v", err)
+ }
+ if have := it.Slot(); !bytes.Equal(want, have) {
+ t.Fatalf("hash %x: slot mismatch: have %x, want %x", hash, have, want)
+ }
+ }
+ it.Release()
}
// This testcase is notorious, all layers contain the exact same 200 accounts.
@@ -285,13 +526,27 @@ func TestAccountIteratorLargeTraversal(t *testing.T) {
}
// Iterate the entire stack and ensure everything is hit only once
head := snaps.Snapshot(common.HexToHash("0x80"))
- verifyIterator(t, 200, head.(snapshot).AccountIterator(common.Hash{}))
- verifyIterator(t, 200, head.(*diffLayer).newBinaryAccountIterator())
+ verifyIterator(t, 200, head.(snapshot).AccountIterator(common.Hash{}), verifyNothing)
+ verifyIterator(t, 200, head.(*diffLayer).newBinaryAccountIterator(), verifyAccount)
it, _ := snaps.AccountIterator(common.HexToHash("0x80"), common.Hash{})
- defer it.Release()
+ verifyIterator(t, 200, it, verifyAccount)
+ it.Release()
+
+ // Test after persist some bottom-most layers into the disk,
+ // the functionalities still work.
+ limit := aggregatorMemoryLimit
+ defer func() {
+ aggregatorMemoryLimit = limit
+ }()
+ aggregatorMemoryLimit = 0 // Force pushing the bottom-most layer into disk
+ snaps.Cap(common.HexToHash("0x80"), 2)
+
+ verifyIterator(t, 200, head.(*diffLayer).newBinaryAccountIterator(), verifyAccount)
- verifyIterator(t, 200, it)
+ it, _ = snaps.AccountIterator(common.HexToHash("0x80"), common.Hash{})
+ verifyIterator(t, 200, it, verifyAccount)
+ it.Release()
}
// TestAccountIteratorFlattening tests what happens when we
@@ -358,46 +613,105 @@ func TestAccountIteratorSeek(t *testing.T) {
// Construct various iterators and ensure their traversal is correct
it, _ := snaps.AccountIterator(common.HexToHash("0x02"), common.HexToHash("0xdd"))
defer it.Release()
- verifyIterator(t, 3, it) // expected: ee, f0, ff
+ verifyIterator(t, 3, it, verifyAccount) // expected: ee, f0, ff
it, _ = snaps.AccountIterator(common.HexToHash("0x02"), common.HexToHash("0xaa"))
defer it.Release()
- verifyIterator(t, 4, it) // expected: aa, ee, f0, ff
+ verifyIterator(t, 4, it, verifyAccount) // expected: aa, ee, f0, ff
it, _ = snaps.AccountIterator(common.HexToHash("0x02"), common.HexToHash("0xff"))
defer it.Release()
- verifyIterator(t, 1, it) // expected: ff
+ verifyIterator(t, 1, it, verifyAccount) // expected: ff
it, _ = snaps.AccountIterator(common.HexToHash("0x02"), common.HexToHash("0xff1"))
defer it.Release()
- verifyIterator(t, 0, it) // expected: nothing
+ verifyIterator(t, 0, it, verifyAccount) // expected: nothing
it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.HexToHash("0xbb"))
defer it.Release()
- verifyIterator(t, 6, it) // expected: bb, cc, dd, ee, f0, ff
+ verifyIterator(t, 6, it, verifyAccount) // expected: bb, cc, dd, ee, f0, ff
it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.HexToHash("0xef"))
defer it.Release()
- verifyIterator(t, 2, it) // expected: f0, ff
+ verifyIterator(t, 2, it, verifyAccount) // expected: f0, ff
it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.HexToHash("0xf0"))
defer it.Release()
- verifyIterator(t, 2, it) // expected: f0, ff
+ verifyIterator(t, 2, it, verifyAccount) // expected: f0, ff
it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.HexToHash("0xff"))
defer it.Release()
- verifyIterator(t, 1, it) // expected: ff
+ verifyIterator(t, 1, it, verifyAccount) // expected: ff
it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.HexToHash("0xff1"))
defer it.Release()
- verifyIterator(t, 0, it) // expected: nothing
+ verifyIterator(t, 0, it, verifyAccount) // expected: nothing
+}
+
+func TestStorageIteratorSeek(t *testing.T) {
+ // Create a snapshot stack with some initial data
+ base := &diskLayer{
+ diskdb: rawdb.NewMemoryDatabase(),
+ root: common.HexToHash("0x01"),
+ cache: fastcache.New(1024 * 500),
+ }
+ snaps := &Tree{
+ layers: map[common.Hash]snapshot{
+ base.root: base,
+ },
+ }
+ // Stack three diff layers on top with various overlaps
+ snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
+ randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil))
+ snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
+ randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x05", "0x06"}}, nil))
+
+ snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
+ randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x05", "0x08"}}, nil))
+
+ // Account set is now
+ // 02: 01, 03, 05
+ // 03: 01, 02, 03, 05 (, 05), 06
+ // 04: 01(, 01), 02, 03, 05(, 05, 05), 06, 08
+ // Construct various iterators and ensure their traversal is correct
+ it, _ := snaps.StorageIterator(common.HexToHash("0x02"), common.HexToHash("0xaa"), common.HexToHash("0x01"))
+ defer it.Release()
+ verifyIterator(t, 3, it, verifyStorage) // expected: 01, 03, 05
+
+ it, _ = snaps.StorageIterator(common.HexToHash("0x02"), common.HexToHash("0xaa"), common.HexToHash("0x02"))
+ defer it.Release()
+ verifyIterator(t, 2, it, verifyStorage) // expected: 03, 05
+
+ it, _ = snaps.StorageIterator(common.HexToHash("0x02"), common.HexToHash("0xaa"), common.HexToHash("0x5"))
+ defer it.Release()
+ verifyIterator(t, 1, it, verifyStorage) // expected: 05
+
+ it, _ = snaps.StorageIterator(common.HexToHash("0x02"), common.HexToHash("0xaa"), common.HexToHash("0x6"))
+ defer it.Release()
+ verifyIterator(t, 0, it, verifyStorage) // expected: nothing
+
+ it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.HexToHash("0x01"))
+ defer it.Release()
+ verifyIterator(t, 6, it, verifyStorage) // expected: 01, 02, 03, 05, 06, 08
+
+ it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.HexToHash("0x05"))
+ defer it.Release()
+ verifyIterator(t, 3, it, verifyStorage) // expected: 05, 06, 08
+
+ it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.HexToHash("0x08"))
+ defer it.Release()
+ verifyIterator(t, 1, it, verifyStorage) // expected: 08
+
+ it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.HexToHash("0x09"))
+ defer it.Release()
+ verifyIterator(t, 0, it, verifyStorage) // expected: nothing
}
-// TestIteratorDeletions tests that the iterator behaves correct when there are
+// TestAccountIteratorDeletions tests that the iterator behaves correct when there are
// deleted accounts (where the Account() value is nil). The iterator
// should not output any accounts or nil-values for those cases.
-func TestIteratorDeletions(t *testing.T) {
+func TestAccountIteratorDeletions(t *testing.T) {
// Create an empty base layer and a snapshot tree out of it
base := &diskLayer{
diskdb: rawdb.NewMemoryDatabase(),
@@ -426,7 +740,7 @@ func TestIteratorDeletions(t *testing.T) {
// The output should be 11,33,44,55
it, _ := snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{})
// Do a quick check
- verifyIterator(t, 4, it)
+ verifyIterator(t, 4, it, verifyAccount)
it.Release()
// And a more detailed verification that we indeed do not see '0x22'
@@ -443,6 +757,63 @@ func TestIteratorDeletions(t *testing.T) {
}
}
+func TestStorageIteratorDeletions(t *testing.T) {
+ // Create an empty base layer and a snapshot tree out of it
+ base := &diskLayer{
+ diskdb: rawdb.NewMemoryDatabase(),
+ root: common.HexToHash("0x01"),
+ cache: fastcache.New(1024 * 500),
+ }
+ snaps := &Tree{
+ layers: map[common.Hash]snapshot{
+ base.root: base,
+ },
+ }
+ // Stack three diff layers on top with various overlaps
+ snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
+ randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil))
+
+ snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
+ randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x04", "0x06"}}, [][]string{{"0x01", "0x03"}}))
+
+ // The output should be 02,04,05,06
+ it, _ := snaps.StorageIterator(common.HexToHash("0x03"), common.HexToHash("0xaa"), common.Hash{})
+ verifyIterator(t, 4, it, verifyStorage)
+ it.Release()
+
+ // The output should be 04,05,06
+ it, _ = snaps.StorageIterator(common.HexToHash("0x03"), common.HexToHash("0xaa"), common.HexToHash("0x03"))
+ verifyIterator(t, 3, it, verifyStorage)
+ it.Release()
+
+ // Destruct the whole storage
+ destructed := map[common.Hash]struct{}{
+ common.HexToHash("0xaa"): {},
+ }
+ snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), destructed, nil, nil)
+
+ it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.Hash{})
+ verifyIterator(t, 0, it, verifyStorage)
+ it.Release()
+
+ // Re-insert the slots of the same account
+ snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil,
+ randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x07", "0x08", "0x09"}}, nil))
+
+ // The output should be 07,08,09
+ it, _ = snaps.StorageIterator(common.HexToHash("0x05"), common.HexToHash("0xaa"), common.Hash{})
+ verifyIterator(t, 3, it, verifyStorage)
+ it.Release()
+
+ // Destruct the whole storage but re-create the account in the same layer
+ snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), destructed, randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x11", "0x12"}}, nil))
+ it, _ = snaps.StorageIterator(common.HexToHash("0x06"), common.HexToHash("0xaa"), common.Hash{})
+ verifyIterator(t, 2, it, verifyStorage) // The output should be 11,12
+ it.Release()
+
+ verifyIterator(t, 2, snaps.Snapshot(common.HexToHash("0x06")).(*diffLayer).newBinaryStorageIterator(common.HexToHash("0xaa")), verifyStorage)
+}
+
// BenchmarkAccountIteratorTraversal is a bit a bit notorious -- all layers contain the
// exact same 200 accounts. That means that we need to process 2000 items, but
// only spit out 200 values eventually.
diff --git a/core/state/snapshot/journal.go b/core/state/snapshot/journal.go
index b72c3f1fa6..332fccd556 100644
--- a/core/state/snapshot/journal.go
+++ b/core/state/snapshot/journal.go
@@ -33,6 +33,8 @@ import (
"github.com/celo-org/celo-blockchain/trie"
)
+const journalVersion uint64 = 0
+
// journalGenerator is a disk layer entry containing the generator progress marker.
type journalGenerator struct {
Wiping bool // Whether the database was in progress of being wiped
@@ -61,8 +63,91 @@ type journalStorage struct {
Vals [][]byte
}
+// loadAndParseLegacyJournal tries to parse the snapshot journal in legacy format.
+func loadAndParseLegacyJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, journalGenerator, error) {
+ // Retrieve the journal, for legacy journal it must exist since even for
+ // 0 layer it stores whether we've already generated the snapshot or are
+ // in progress only.
+ journal := rawdb.ReadSnapshotJournal(db)
+ if len(journal) == 0 {
+ return nil, journalGenerator{}, errors.New("missing or corrupted snapshot journal")
+ }
+ r := rlp.NewStream(bytes.NewReader(journal), 0)
+
+ // Read the snapshot generation progress for the disk layer
+ var generator journalGenerator
+ if err := r.Decode(&generator); err != nil {
+ return nil, journalGenerator{}, fmt.Errorf("failed to load snapshot progress marker: %v", err)
+ }
+ // Load all the snapshot diffs from the journal
+ snapshot, err := loadDiffLayer(base, r)
+ if err != nil {
+ return nil, generator, err
+ }
+ return snapshot, generator, nil
+}
+
+// loadAndParseJournal tries to parse the snapshot journal in latest format.
+func loadAndParseJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, journalGenerator, error) {
+ // Retrieve the disk layer generator. It must exist, no matter the
+ // snapshot is fully generated or not. Otherwise the entire disk
+ // layer is invalid.
+ generatorBlob := rawdb.ReadSnapshotGenerator(db)
+ if len(generatorBlob) == 0 {
+ return nil, journalGenerator{}, errors.New("missing snapshot generator")
+ }
+ var generator journalGenerator
+ if err := rlp.DecodeBytes(generatorBlob, &generator); err != nil {
+ return nil, journalGenerator{}, fmt.Errorf("failed to decode snapshot generator: %v", err)
+ }
+ // Retrieve the diff layer journal. It's possible that the journal is
+ // not existent, e.g. the disk layer is generating while that the Geth
+ // crashes without persisting the diff journal.
+ // So if there is no journal, or the journal is invalid(e.g. the journal
+ // is not matched with disk layer; or the it's the legacy-format journal,
+ // etc.), we just discard all diffs and try to recover them later.
+ journal := rawdb.ReadSnapshotJournal(db)
+ if len(journal) == 0 {
+ log.Warn("Loaded snapshot journal", "diskroot", base.root, "diffs", "missing")
+ return base, generator, nil
+ }
+ r := rlp.NewStream(bytes.NewReader(journal), 0)
+
+ // Firstly, resolve the first element as the journal version
+ version, err := r.Uint()
+ if err != nil {
+ log.Warn("Failed to resolve the journal version", "error", err)
+ return base, generator, nil
+ }
+ if version != journalVersion {
+ log.Warn("Discarded the snapshot journal with wrong version", "required", journalVersion, "got", version)
+ return base, generator, nil
+ }
+ // Secondly, resolve the disk layer root, ensure it's continuous
+ // with disk layer. Note now we can ensure it's the snapshot journal
+ // correct version, so we expect everything can be resolved properly.
+ var root common.Hash
+ if err := r.Decode(&root); err != nil {
+ return nil, journalGenerator{}, errors.New("missing disk layer root")
+ }
+ // The diff journal is not matched with disk, discard them.
+ // It can happen that Geth crashes without persisting the latest
+ // diff journal.
+ if !bytes.Equal(root.Bytes(), base.root.Bytes()) {
+ log.Warn("Loaded snapshot journal", "diskroot", base.root, "diffs", "unmatched")
+ return base, generator, nil
+ }
+ // Load all the snapshot diffs from the journal
+ snapshot, err := loadDiffLayer(base, r)
+ if err != nil {
+ return nil, journalGenerator{}, err
+ }
+ log.Debug("Loaded snapshot journal", "diskroot", base.root, "diffhead", snapshot.Root())
+ return snapshot, generator, nil
+}
+
// loadSnapshot loads a pre-existing state snapshot backed by a key-value store.
-func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash) (snapshot, error) {
+func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, recovery bool) (snapshot, error) {
// Retrieve the block number and hash of the snapshot, failing if no snapshot
// is present in the database (or crashed mid-update).
baseRoot := rawdb.ReadSnapshotRoot(diskdb)
@@ -75,28 +160,36 @@ func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int,
cache: fastcache.New(cache * 1024 * 1024),
root: baseRoot,
}
- // Retrieve the journal, it must exist since even for 0 layer it stores whether
- // we've already generated the snapshot or are in progress only
- journal := rawdb.ReadSnapshotJournal(diskdb)
- if len(journal) == 0 {
- return nil, errors.New("missing or corrupted snapshot journal")
- }
- r := rlp.NewStream(bytes.NewReader(journal), 0)
-
- // Read the snapshot generation progress for the disk layer
- var generator journalGenerator
- if err := r.Decode(&generator); err != nil {
- return nil, fmt.Errorf("failed to load snapshot progress marker: %v", err)
+ var legacy bool
+ snapshot, generator, err := loadAndParseJournal(diskdb, base)
+ if err != nil {
+ log.Warn("Failed to load new-format journal", "error", err)
+ snapshot, generator, err = loadAndParseLegacyJournal(diskdb, base)
+ legacy = true
}
- // Load all the snapshot diffs from the journal
- snapshot, err := loadDiffLayer(base, r)
if err != nil {
return nil, err
}
- // Entire snapshot journal loaded, sanity check the head and return
- // Journal doesn't exist, don't worry if it's not supposed to
+ // Entire snapshot journal loaded, sanity check the head. If the loaded
+ // snapshot is not matched with current state root, print a warning log
+ // or discard the entire snapshot it's legacy snapshot.
+ //
+ // Possible scenario: Geth was crashed without persisting journal and then
+ // restart, the head is rewound to the point with available state(trie)
+ // which is below the snapshot. In this case the snapshot can be recovered
+ // by re-executing blocks but right now it's unavailable.
if head := snapshot.Root(); head != root {
- return nil, fmt.Errorf("head doesn't match snapshot: have %#x, want %#x", head, root)
+ // If it's legacy snapshot, or it's new-format snapshot but
+ // it's not in recovery mode, returns the error here for
+ // rebuilding the entire snapshot forcibly.
+ if legacy || !recovery {
+ return nil, fmt.Errorf("head doesn't match snapshot: have %#x, want %#x", head, root)
+ }
+ // It's in snapshot recovery, the assumption is held that
+ // the disk layer is always higher than chain head. It can
+ // be eventually recovered when the chain head beyonds the
+ // disk layer.
+ log.Warn("Snapshot is not continuous with chain", "snaproot", head, "chainroot", root)
}
// Everything loaded correctly, resume any suspended operations
if !generator.Done {
@@ -158,7 +251,11 @@ func loadDiffLayer(parent snapshot, r *rlp.Stream) (snapshot, error) {
}
accountData := make(map[common.Hash][]byte)
for _, entry := range accounts {
- accountData[entry.Hash] = entry.Blob
+ if len(entry.Blob) > 0 { // RLP loses nil-ness, but `[]byte{}` is not a valid item, so reinterpret that
+ accountData[entry.Hash] = entry.Blob
+ } else {
+ accountData[entry.Hash] = nil
+ }
}
var storage []journalStorage
if err := r.Decode(&storage); err != nil {
@@ -168,15 +265,19 @@ func loadDiffLayer(parent snapshot, r *rlp.Stream) (snapshot, error) {
for _, entry := range storage {
slots := make(map[common.Hash][]byte)
for i, key := range entry.Keys {
- slots[key] = entry.Vals[i]
+ if len(entry.Vals[i]) > 0 { // RLP loses nil-ness, but `[]byte{}` is not a valid item, so reinterpret that
+ slots[key] = entry.Vals[i]
+ } else {
+ slots[key] = nil
+ }
}
storageData[entry.Hash] = slots
}
return loadDiffLayer(newDiffLayer(parent, root, destructSet, accountData, storageData), r)
}
-// Journal writes the persistent layer generator stats into a buffer to be stored
-// in the database as the snapshot journal.
+// Journal terminates any in-progress snapshot generation, also implicitly pushing
+// the progress into the database.
func (dl *diskLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) {
// If the snapshot is currently being generated, abort it
var stats *generatorStats
@@ -185,7 +286,86 @@ func (dl *diskLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) {
dl.genAbort <- abort
if stats = <-abort; stats != nil {
- stats.Log("Journalling in-progress snapshot", dl.genMarker)
+ stats.Log("Journalling in-progress snapshot", dl.root, dl.genMarker)
+ }
+ }
+ // Ensure the layer didn't get stale
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+
+ if dl.stale {
+ return common.Hash{}, ErrSnapshotStale
+ }
+ // Ensure the generator stats is written even if none was ran this cycle
+ journalProgress(dl.diskdb, dl.genMarker, stats)
+
+ log.Debug("Journalled disk layer", "root", dl.root)
+ return dl.root, nil
+}
+
+// Journal writes the memory layer contents into a buffer to be stored in the
+// database as the snapshot journal.
+func (dl *diffLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) {
+ // Journal the parent first
+ base, err := dl.parent.Journal(buffer)
+ if err != nil {
+ return common.Hash{}, err
+ }
+ // Ensure the layer didn't get stale
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+
+ if dl.Stale() {
+ return common.Hash{}, ErrSnapshotStale
+ }
+ // Everything below was journalled, persist this layer too
+ if err := rlp.Encode(buffer, dl.root); err != nil {
+ return common.Hash{}, err
+ }
+ destructs := make([]journalDestruct, 0, len(dl.destructSet))
+ for hash := range dl.destructSet {
+ destructs = append(destructs, journalDestruct{Hash: hash})
+ }
+ if err := rlp.Encode(buffer, destructs); err != nil {
+ return common.Hash{}, err
+ }
+ accounts := make([]journalAccount, 0, len(dl.accountData))
+ for hash, blob := range dl.accountData {
+ accounts = append(accounts, journalAccount{Hash: hash, Blob: blob})
+ }
+ if err := rlp.Encode(buffer, accounts); err != nil {
+ return common.Hash{}, err
+ }
+ storage := make([]journalStorage, 0, len(dl.storageData))
+ for hash, slots := range dl.storageData {
+ keys := make([]common.Hash, 0, len(slots))
+ vals := make([][]byte, 0, len(slots))
+ for key, val := range slots {
+ keys = append(keys, key)
+ vals = append(vals, val)
+ }
+ storage = append(storage, journalStorage{Hash: hash, Keys: keys, Vals: vals})
+ }
+ if err := rlp.Encode(buffer, storage); err != nil {
+ return common.Hash{}, err
+ }
+ log.Debug("Journalled diff layer", "root", dl.root, "parent", dl.parent.Root())
+ return base, nil
+}
+
+// LegacyJournal writes the persistent layer generator stats into a buffer
+// to be stored in the database as the snapshot journal.
+//
+// Note it's the legacy version which is only used in testing right now.
+func (dl *diskLayer) LegacyJournal(buffer *bytes.Buffer) (common.Hash, error) {
+ // If the snapshot is currently being generated, abort it
+ var stats *generatorStats
+ if dl.genAbort != nil {
+ abort := make(chan *generatorStats)
+ dl.genAbort <- abort
+
+ if stats = <-abort; stats != nil {
+ stats.Log("Journalling in-progress snapshot", dl.root, dl.genMarker)
}
}
// Ensure the layer didn't get stale
@@ -206,6 +386,7 @@ func (dl *diskLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) {
entry.Slots = stats.slots
entry.Storage = uint64(stats.storage)
}
+ log.Debug("Legacy journalled disk layer", "root", dl.root)
if err := rlp.Encode(buffer, entry); err != nil {
return common.Hash{}, err
}
@@ -214,9 +395,11 @@ func (dl *diskLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) {
// Journal writes the memory layer contents into a buffer to be stored in the
// database as the snapshot journal.
-func (dl *diffLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) {
+//
+// Note it's the legacy version which is only used in testing right now.
+func (dl *diffLayer) LegacyJournal(buffer *bytes.Buffer) (common.Hash, error) {
// Journal the parent first
- base, err := dl.parent.Journal(buffer)
+ base, err := dl.parent.LegacyJournal(buffer)
if err != nil {
return common.Hash{}, err
}
@@ -258,5 +441,6 @@ func (dl *diffLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) {
if err := rlp.Encode(buffer, storage); err != nil {
return common.Hash{}, err
}
+ log.Debug("Legacy journalled diff layer", "root", dl.root, "parent", dl.parent.Root())
return base, nil
}
diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go
index be77282ae0..c46d8ed8ff 100644
--- a/core/state/snapshot/snapshot.go
+++ b/core/state/snapshot/snapshot.go
@@ -29,6 +29,7 @@ import (
"github.com/celo-org/celo-blockchain/ethdb"
"github.com/celo-org/celo-blockchain/log"
"github.com/celo-org/celo-blockchain/metrics"
+ "github.com/celo-org/celo-blockchain/rlp"
"github.com/celo-org/celo-blockchain/trie"
)
@@ -86,6 +87,10 @@ var (
// range of accounts covered.
ErrNotCoveredYet = errors.New("not covered yet")
+ // ErrNotConstructed is returned if the callers want to iterate the snapshot
+ // while the generation is not finished yet.
+ ErrNotConstructed = errors.New("snapshot is not constructed")
+
// errSnapshotCycle is returned if a snapshot is attempted to be inserted
// that forms a cycle in the snapshot tree.
errSnapshotCycle = errors.New("snapshot cycle")
@@ -132,12 +137,19 @@ type snapshot interface {
// flattening everything down (bad for reorgs).
Journal(buffer *bytes.Buffer) (common.Hash, error)
+ // LegacyJournal is basically identical to Journal. it's the legacy version for
+ // flushing legacy journal. Now the only purpose of this function is for testing.
+ LegacyJournal(buffer *bytes.Buffer) (common.Hash, error)
+
// Stale return whether this layer has become stale (was flattened across) or
// if it's still live.
Stale() bool
// AccountIterator creates an account iterator over an arbitrary layer.
AccountIterator(seek common.Hash) AccountIterator
+
+ // StorageIterator creates a storage iterator over an arbitrary layer.
+ StorageIterator(account common.Hash, seek common.Hash) (StorageIterator, bool)
}
// SnapshotTree is an Ethereum state snapshot tree. It consists of one persistent
@@ -161,10 +173,12 @@ type Tree struct {
// store (with a number of memory layers from a journal), ensuring that the head
// of the snapshot matches the expected one.
//
-// If the snapshot is missing or inconsistent, the entirety is deleted and will
-// be reconstructed from scratch based on the tries in the key-value store, on a
-// background thread.
-func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, async bool) *Tree {
+// If the snapshot is missing or the disk layer is broken, the entire is deleted
+// and will be reconstructed from scratch based on the tries in the key-value
+// store, on a background thread. If the memory layers from the journal is not
+// continuous with disk layer or the journal is missing, all diffs will be discarded
+// iff it's in "recovery" mode, otherwise rebuild is mandatory.
+func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, async bool, recovery bool) *Tree {
// Create a new, empty snapshot tree
snap := &Tree{
diskdb: diskdb,
@@ -176,7 +190,7 @@ func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root comm
defer snap.waitBuild()
}
// Attempt to load a previously persisted snapshot and rebuild one if failed
- head, err := loadSnapshot(diskdb, triedb, cache, root)
+ head, err := loadSnapshot(diskdb, triedb, cache, root, recovery)
if err != nil {
log.Warn("Failed to load snapshot, regenerating", "err", err)
snap.Rebuild(root)
@@ -191,7 +205,7 @@ func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root comm
}
// waitBuild blocks until the snapshot finishes rebuilding. This method is meant
-// to be used by tests to ensure we're testing what we believe we are.
+// to be used by tests to ensure we're testing what we believe we are.
func (t *Tree) waitBuild() {
// Find the rebuild termination channel
var done chan struct{}
@@ -233,11 +247,11 @@ func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs m
return errSnapshotCycle
}
// Generate a new snapshot on top of the parent
- parent := t.Snapshot(parentRoot).(snapshot)
+ parent := t.Snapshot(parentRoot)
if parent == nil {
return fmt.Errorf("parent [%#x] snapshot missing", parentRoot)
}
- snap := parent.Update(blockRoot, destructs, accounts, storage)
+ snap := parent.(snapshot).Update(blockRoot, destructs, accounts, storage)
// Save the new snapshot for later
t.lock.Lock()
@@ -250,6 +264,12 @@ func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs m
// Cap traverses downwards the snapshot tree from a head block hash until the
// number of allowed layers are crossed. All layers beyond the permitted number
// are flattened downwards.
+//
+// Note, the final diff layer count in general will be one more than the amount
+// requested. This happens because the bottom-most diff layer is the accumulator
+// which may or may not overflow and cascade to disk. Since this last layer's
+// survival is only known *after* capping, we need to omit it from the count if
+// we want to ensure that *at least* the requested number of diff layers remain.
func (t *Tree) Cap(root common.Hash, layers int) error {
// Retrieve the head snapshot to cap from
snap := t.Snapshot(root)
@@ -260,6 +280,13 @@ func (t *Tree) Cap(root common.Hash, layers int) error {
if !ok {
return fmt.Errorf("snapshot [%#x] is disk layer", root)
}
+ // If the generator is still running, use a more aggressive cap
+ diff.origin.lock.RLock()
+ if diff.origin.genMarker != nil && layers > 8 {
+ layers = 8
+ }
+ diff.origin.lock.RUnlock()
+
// Run the internal capping and discard all stale layers
t.lock.Lock()
defer t.lock.Unlock()
@@ -267,10 +294,7 @@ func (t *Tree) Cap(root common.Hash, layers int) error {
// Flattening the bottom-most diff layer requires special casing since there's
// no child to rewire to the grandparent. In that case we can fake a temporary
// child for the capping and then remove it.
- var persisted *diskLayer
-
- switch layers {
- case 0:
+ if layers == 0 {
// If full commit was requested, flatten the diffs and merge onto disk
diff.lock.RLock()
base := diffToDisk(diff.flatten().(*diffLayer))
@@ -279,33 +303,9 @@ func (t *Tree) Cap(root common.Hash, layers int) error {
// Replace the entire snapshot tree with the flat base
t.layers = map[common.Hash]snapshot{base.root: base}
return nil
-
- case 1:
- // If full flattening was requested, flatten the diffs but only merge if the
- // memory limit was reached
- var (
- bottom *diffLayer
- base *diskLayer
- )
- diff.lock.RLock()
- bottom = diff.flatten().(*diffLayer)
- if bottom.memory >= aggregatorMemoryLimit {
- base = diffToDisk(bottom)
- }
- diff.lock.RUnlock()
-
- // If all diff layers were removed, replace the entire snapshot tree
- if base != nil {
- t.layers = map[common.Hash]snapshot{base.root: base}
- return nil
- }
- // Merge the new aggregated layer into the snapshot tree, clean stales below
- t.layers[bottom.root] = bottom
-
- default:
- // Many layers requested to be retained, cap normally
- persisted = t.cap(diff, layers)
}
+ persisted := t.cap(diff, layers)
+
// Remove any layer that is stale or links into a stale layer
children := make(map[common.Hash][]common.Hash)
for root, snap := range t.layers {
@@ -347,10 +347,16 @@ func (t *Tree) Cap(root common.Hash, layers int) error {
// crossed. All diffs beyond the permitted number are flattened downwards. If the
// layer limit is reached, memory cap is also enforced (but not before).
//
-// The method returns the new disk layer if diffs were persistend into it.
+// The method returns the new disk layer if diffs were persisted into it.
+//
+// Note, the final diff layer count in general will be one more than the amount
+// requested. This happens because the bottom-most diff layer is the accumulator
+// which may or may not overflow and cascade to disk. Since this last layer's
+// survival is only known *after* capping, we need to omit it from the count if
+// we want to ensure that *at least* the requested number of diff layers remain.
func (t *Tree) cap(diff *diffLayer, layers int) *diskLayer {
// Dive until we run out of layers or reach the persistent database
- for ; layers > 2; layers-- {
+ for i := 0; i < layers-1; i++ {
// If we still have diff layers below, continue down
if parent, ok := diff.parent.(*diffLayer); ok {
diff = parent
@@ -401,6 +407,9 @@ func (t *Tree) cap(diff *diffLayer, layers int) *diskLayer {
// diffToDisk merges a bottom-most diff into the persistent disk layer underneath
// it. The method will panic if called onto a non-bottom-most diff layer.
+//
+// The disk layer persistence should be operated in an atomic way. All updates should
+// be discarded if the whole transition if not finished.
func diffToDisk(bottom *diffLayer) *diskLayer {
var (
base = bottom.parent.(*diskLayer)
@@ -413,8 +422,7 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
base.genAbort <- abort
stats = <-abort
}
- // Start by temporarily deleting the current snapshot block marker. This
- // ensures that in the case of a crash, the entire snapshot is invalidated.
+ // Put the deletion in the batch writer, flush all updates in the final step.
rawdb.DeleteSnapshotRoot(batch)
// Mark the original base as stale as we're going to create a new wrapper
@@ -440,8 +448,17 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
if key := it.Key(); len(key) == 65 { // TODO(karalabe): Yuck, we should move this into the iterator
batch.Delete(key)
base.cache.Del(key[1:])
-
snapshotFlushStorageItemMeter.Mark(1)
+
+ // Ensure we don't delete too much data blindly (contract can be
+ // huge). It's ok to flush, the root will go missing in case of a
+ // crash and we'll detect and regenerate the snapshot.
+ if batch.ValueSize() > ethdb.IdealBatchSize {
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed to write storage deletions", "err", err)
+ }
+ batch.Reset()
+ }
}
}
it.Release()
@@ -457,14 +474,18 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
base.cache.Set(hash[:], data)
snapshotCleanAccountWriteMeter.Mark(int64(len(data)))
+ snapshotFlushAccountItemMeter.Mark(1)
+ snapshotFlushAccountSizeMeter.Mark(int64(len(data)))
+
+ // Ensure we don't write too much data blindly. It's ok to flush, the
+ // root will go missing in case of a crash and we'll detect and regen
+ // the snapshot.
if batch.ValueSize() > ethdb.IdealBatchSize {
if err := batch.Write(); err != nil {
- log.Crit("Failed to write account snapshot", "err", err)
+ log.Crit("Failed to write storage deletions", "err", err)
}
batch.Reset()
}
- snapshotFlushAccountItemMeter.Mark(1)
- snapshotFlushAccountSizeMeter.Mark(int64(len(data)))
}
// Push all the storage slots into the database
for accountHash, storage := range bottom.storageData {
@@ -491,18 +512,19 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
snapshotFlushStorageItemMeter.Mark(1)
snapshotFlushStorageSizeMeter.Mark(int64(len(data)))
}
- if batch.ValueSize() > ethdb.IdealBatchSize {
- if err := batch.Write(); err != nil {
- log.Crit("Failed to write storage snapshot", "err", err)
- }
- batch.Reset()
- }
}
// Update the snapshot block marker and write any remainder data
rawdb.WriteSnapshotRoot(batch, bottom.root)
+
+ // Write out the generator progress marker and report
+ journalProgress(batch, base.genMarker, stats)
+
+ // Flush all the updates in the single db operation. Ensure the
+ // disk layer transition is atomic.
if err := batch.Write(); err != nil {
log.Crit("Failed to write leftover snapshot", "err", err)
}
+ log.Debug("Journalled disk layer", "root", bottom.root, "complete", base.genMarker == nil)
res := &diskLayer{
root: bottom.root,
cache: base.cache,
@@ -540,7 +562,21 @@ func (t *Tree) Journal(root common.Hash) (common.Hash, error) {
t.lock.Lock()
defer t.lock.Unlock()
+ // Firstly write out the metadata of journal
journal := new(bytes.Buffer)
+ if err := rlp.Encode(journal, journalVersion); err != nil {
+ return common.Hash{}, err
+ }
+ diskroot := t.diskRoot()
+ if diskroot == (common.Hash{}) {
+ return common.Hash{}, errors.New("invalid disk root")
+ }
+ // Secondly write out the disk layer root, ensure the
+ // diff journal is continuous with disk.
+ if err := rlp.Encode(journal, diskroot); err != nil {
+ return common.Hash{}, err
+ }
+ // Finally write out the journal of each layer in reverse order.
base, err := snap.(snapshot).Journal(journal)
if err != nil {
return common.Hash{}, err
@@ -550,6 +586,29 @@ func (t *Tree) Journal(root common.Hash) (common.Hash, error) {
return base, nil
}
+// LegacyJournal is basically identical to Journal. it's the legacy
+// version for flushing legacy journal. Now the only purpose of this
+// function is for testing.
+func (t *Tree) LegacyJournal(root common.Hash) (common.Hash, error) {
+ // Retrieve the head snapshot to journal from var snap snapshot
+ snap := t.Snapshot(root)
+ if snap == nil {
+ return common.Hash{}, fmt.Errorf("snapshot [%#x] missing", root)
+ }
+ // Run the journaling
+ t.lock.Lock()
+ defer t.lock.Unlock()
+
+ journal := new(bytes.Buffer)
+ base, err := snap.(snapshot).LegacyJournal(journal)
+ if err != nil {
+ return common.Hash{}, err
+ }
+ // Store the journal into the database and return
+ rawdb.WriteSnapshotJournal(t.diskdb, journal.Bytes())
+ return base, nil
+}
+
// Rebuild wipes all available snapshot data from the persistent database and
// discard all caches and diff layers. Afterwards, it starts a new snapshot
// generator with the given root hash.
@@ -557,6 +616,10 @@ func (t *Tree) Rebuild(root common.Hash) {
t.lock.Lock()
defer t.lock.Unlock()
+ // Firstly delete any recovery flag in the database. Because now we are
+ // building a brand new snapshot.
+ rawdb.DeleteSnapshotRecoveryNumber(t.diskdb)
+
// Track whether there's a wipe currently running and keep it alive if so
var wiper chan struct{}
@@ -588,7 +651,7 @@ func (t *Tree) Rebuild(root common.Hash) {
panic(fmt.Sprintf("unknown layer type: %T", layer))
}
}
- // Start generating a new snapshot from scratch on a backgroung thread. The
+ // Start generating a new snapshot from scratch on a background thread. The
// generator will run a wiper first if there's not one running right now.
log.Info("Rebuilding state snapshot")
t.layers = map[common.Hash]snapshot{
@@ -599,5 +662,79 @@ func (t *Tree) Rebuild(root common.Hash) {
// AccountIterator creates a new account iterator for the specified root hash and
// seeks to a starting account hash.
func (t *Tree) AccountIterator(root common.Hash, seek common.Hash) (AccountIterator, error) {
+ ok, err := t.generating()
+ if err != nil {
+ return nil, err
+ }
+ if ok {
+ return nil, ErrNotConstructed
+ }
return newFastAccountIterator(t, root, seek)
}
+
+// StorageIterator creates a new storage iterator for the specified root hash and
+// account. The iterator will be move to the specific start position.
+func (t *Tree) StorageIterator(root common.Hash, account common.Hash, seek common.Hash) (StorageIterator, error) {
+ ok, err := t.generating()
+ if err != nil {
+ return nil, err
+ }
+ if ok {
+ return nil, ErrNotConstructed
+ }
+ return newFastStorageIterator(t, root, account, seek)
+}
+
+// disklayer is an internal helper function to return the disk layer.
+// The lock of snapTree is assumed to be held already.
+func (t *Tree) disklayer() *diskLayer {
+ var snap snapshot
+ for _, s := range t.layers {
+ snap = s
+ break
+ }
+ if snap == nil {
+ return nil
+ }
+ switch layer := snap.(type) {
+ case *diskLayer:
+ return layer
+ case *diffLayer:
+ return layer.origin
+ default:
+ panic(fmt.Sprintf("%T: undefined layer", snap))
+ }
+}
+
+// diskRoot is a internal helper function to return the disk layer root.
+// The lock of snapTree is assumed to be held already.
+func (t *Tree) diskRoot() common.Hash {
+ disklayer := t.disklayer()
+ if disklayer == nil {
+ return common.Hash{}
+ }
+ return disklayer.Root()
+}
+
+// generating is an internal helper function which reports whether the snapshot
+// is still under the construction.
+func (t *Tree) generating() (bool, error) {
+ t.lock.Lock()
+ defer t.lock.Unlock()
+
+ layer := t.disklayer()
+ if layer == nil {
+ return false, errors.New("disk layer is missing")
+ }
+ layer.lock.RLock()
+ defer layer.lock.RUnlock()
+ return layer.genMarker != nil, nil
+}
+
+// diskRoot is a external helper function to return the disk layer root.
+func (t *Tree) DiskRoot() common.Hash {
+ t.lock.Lock()
+ defer t.lock.Unlock()
+
+ return t.diskRoot()
+}
diff --git a/core/state/snapshot/snapshot_test.go b/core/state/snapshot/snapshot_test.go
index f6d3411f3c..88a0e0cd4e 100644
--- a/core/state/snapshot/snapshot_test.go
+++ b/core/state/snapshot/snapshot_test.go
@@ -60,6 +60,29 @@ func randomAccountSet(hashes ...string) map[common.Hash][]byte {
return accounts
}
+// randomStorageSet generates a set of random slots with the given strings as
+// the slot addresses.
+func randomStorageSet(accounts []string, hashes [][]string, nilStorage [][]string) map[common.Hash]map[common.Hash][]byte {
+ storages := make(map[common.Hash]map[common.Hash][]byte)
+ for index, account := range accounts {
+ storages[common.HexToHash(account)] = make(map[common.Hash][]byte)
+
+ if index < len(hashes) {
+ hashes := hashes[index]
+ for _, hash := range hashes {
+ storages[common.HexToHash(account)][common.HexToHash(hash)] = randomHash().Bytes()
+ }
+ }
+ if index < len(nilStorage) {
+ nils := nilStorage[index]
+ for _, hash := range nils {
+ storages[common.HexToHash(account)][common.HexToHash(hash)] = nil
+ }
+ }
+ }
+ return storages
+}
+
// Tests that if a disk layer becomes stale, no active external references will
// be returned with junk data. This version of the test flattens every diff layer
// to check internal corner case around the bottom-most memory accumulator.
@@ -138,57 +161,10 @@ func TestDiskLayerExternalInvalidationPartialFlatten(t *testing.T) {
defer func(memcap uint64) { aggregatorMemoryLimit = memcap }(aggregatorMemoryLimit)
aggregatorMemoryLimit = 0
- if err := snaps.Cap(common.HexToHash("0x03"), 2); err != nil {
- t.Fatalf("failed to merge diff layer onto disk: %v", err)
- }
- // Since the base layer was modified, ensure that data retrievald on the external reference fail
- if acc, err := ref.Account(common.HexToHash("0x01")); err != ErrSnapshotStale {
- t.Errorf("stale reference returned account: %#x (err: %v)", acc, err)
- }
- if slot, err := ref.Storage(common.HexToHash("0xa1"), common.HexToHash("0xb1")); err != ErrSnapshotStale {
- t.Errorf("stale reference returned storage slot: %#x (err: %v)", slot, err)
- }
- if n := len(snaps.layers); n != 2 {
- t.Errorf("post-cap layer count mismatch: have %d, want %d", n, 2)
- fmt.Println(snaps.layers)
- }
-}
-
-// Tests that if a diff layer becomes stale, no active external references will
-// be returned with junk data. This version of the test flattens every diff layer
-// to check internal corner case around the bottom-most memory accumulator.
-func TestDiffLayerExternalInvalidationFullFlatten(t *testing.T) {
- // Create an empty base layer and a snapshot tree out of it
- base := &diskLayer{
- diskdb: rawdb.NewMemoryDatabase(),
- root: common.HexToHash("0x01"),
- cache: fastcache.New(1024 * 500),
- }
- snaps := &Tree{
- layers: map[common.Hash]snapshot{
- base.root: base,
- },
- }
- // Commit two diffs on top and retrieve a reference to the bottommost
- accounts := map[common.Hash][]byte{
- common.HexToHash("0xa1"): randomAccount(),
- }
- if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil {
- t.Fatalf("failed to create a diff layer: %v", err)
- }
- if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, accounts, nil); err != nil {
- t.Fatalf("failed to create a diff layer: %v", err)
- }
- if n := len(snaps.layers); n != 3 {
- t.Errorf("pre-cap layer count mismatch: have %d, want %d", n, 3)
- }
- ref := snaps.Snapshot(common.HexToHash("0x02"))
-
- // Flatten the diff layer into the bottom accumulator
if err := snaps.Cap(common.HexToHash("0x03"), 1); err != nil {
- t.Fatalf("failed to flatten diff layer into accumulator: %v", err)
+ t.Fatalf("failed to merge accumulator onto disk: %v", err)
}
- // Since the accumulator diff layer was modified, ensure that data retrievald on the external reference fail
+ // Since the base layer was modified, ensure that data retrievald on the external reference fail
if acc, err := ref.Account(common.HexToHash("0x01")); err != ErrSnapshotStale {
t.Errorf("stale reference returned account: %#x (err: %v)", acc, err)
}
@@ -243,7 +219,7 @@ func TestDiffLayerExternalInvalidationPartialFlatten(t *testing.T) {
t.Errorf("layers modified, got %d exp %d", got, exp)
}
// Flatten the diff layer into the bottom accumulator
- if err := snaps.Cap(common.HexToHash("0x04"), 2); err != nil {
+ if err := snaps.Cap(common.HexToHash("0x04"), 1); err != nil {
t.Fatalf("failed to flatten diff layer into accumulator: %v", err)
}
// Since the accumulator diff layer was modified, ensure that data retrievald on the external reference fail
diff --git a/core/state/statedb.go b/core/state/statedb.go
index 609138d625..fa6bb387cc 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -472,7 +472,7 @@ func (s *StateDB) updateStateObject(obj *stateObject) {
// enough to track account updates at commit time, deletions need tracking
// at transaction boundary level to ensure we capture state clearing.
if s.snap != nil {
- s.snapAccounts[obj.addrHash] = snapshot.AccountRLP(obj.data.Nonce, obj.data.Balance, obj.data.Root, obj.data.CodeHash)
+ s.snapAccounts[obj.addrHash] = snapshot.SlimAccountRLP(obj.data.Nonce, obj.data.Balance, obj.data.Root, obj.data.CodeHash)
}
}
@@ -691,6 +691,31 @@ func (s *StateDB) Copy() *StateDB {
for hash, preimage := range s.preimages {
state.preimages[hash] = preimage
}
+ if s.snaps != nil {
+ // In order for the miner to be able to use and make additions
+ // to the snapshot tree, we need to copy that aswell.
+ // Otherwise, any block mined by ourselves will cause gaps in the tree,
+ // and force the miner to operate trie-backed only
+ state.snaps = s.snaps
+ state.snap = s.snap
+ // deep copy needed
+ state.snapDestructs = make(map[common.Hash]struct{})
+ for k, v := range s.snapDestructs {
+ state.snapDestructs[k] = v
+ }
+ state.snapAccounts = make(map[common.Hash][]byte)
+ for k, v := range s.snapAccounts {
+ state.snapAccounts[k] = v
+ }
+ state.snapStorage = make(map[common.Hash]map[common.Hash][]byte)
+ for k, v := range s.snapStorage {
+ temp := make(map[common.Hash][]byte)
+ for kk, vv := range v {
+ temp[kk] = vv
+ }
+ state.snapStorage[k] = temp
+ }
+ }
return state
}
@@ -858,8 +883,12 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
if err := s.snaps.Update(root, parent, s.snapDestructs, s.snapAccounts, s.snapStorage); err != nil {
log.Warn("Failed to update snapshot tree", "from", parent, "to", root, "err", err)
}
- if err := s.snaps.Cap(root, 127); err != nil { // Persistent layer is 128th, the last available trie
- log.Warn("Failed to cap snapshot tree", "root", root, "layers", 127, "err", err)
+ // Keep 128 diff layers in the memory, persistent layer is 129th.
+ // - head layer is paired with HEAD state
+ // - head-1 layer is paired with HEAD-1 state
+ // - head-127 layer(bottom-most diff layer) is paired with HEAD-127 state
+ if err := s.snaps.Cap(root, 128); err != nil {
+ log.Warn("Failed to cap snapshot tree", "root", root, "layers", 128, "err", err)
}
}
s.snap, s.snapDestructs, s.snapAccounts, s.snapStorage = nil, nil, nil, nil
diff --git a/eth/config.go b/eth/config.go
index c1c38fcd9e..bfb90413d4 100644
--- a/eth/config.go
+++ b/eth/config.go
@@ -35,11 +35,11 @@ var DefaultConfig = Config{
LightPeers: 100,
LightServ: 0,
UltraLightFraction: 75,
- DatabaseCache: 768,
- TrieCleanCache: 256,
+ DatabaseCache: 512,
+ TrieCleanCache: 154,
TrieDirtyCache: 256,
TrieTimeout: 60 * time.Minute,
- SnapshotCache: 256,
+ SnapshotCache: 102,
Miner: miner.Config{
GasFloor: 8000000,
GasCeil: 8000000,
diff --git a/eth/tracers/tracers_test.go b/eth/tracers/tracers_test.go
index 5d5ca7ab89..26074d7662 100644
--- a/eth/tracers/tracers_test.go
+++ b/eth/tracers/tracers_test.go
@@ -125,7 +125,7 @@ func TestPrestateTracerCreate2(t *testing.T) {
Code: []byte{},
Balance: big.NewInt(500000000000000),
}
- statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), alloc, false)
+ _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), alloc, false)
// Create the tracer, the EVM environment and run it
tracer, err := New("prestateTracer")
@@ -202,7 +202,7 @@ func TestCallTracer(t *testing.T) {
GasLimit: uint64(test.Context.GasLimit),
GasPrice: tx.GasPrice(),
}
- statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false)
+ _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false)
// Create the tracer, the EVM environment and run it
tracer, err := New("callTracer")
diff --git a/ethdb/leveldb/leveldb.go b/ethdb/leveldb/leveldb.go
index c3834c4b54..e92bac2d2f 100644
--- a/ethdb/leveldb/leveldb.go
+++ b/ethdb/leveldb/leveldb.go
@@ -428,7 +428,7 @@ func (b *batch) Put(key, value []byte) error {
// Delete inserts the a key removal into the batch for later committing.
func (b *batch) Delete(key []byte) error {
b.b.Delete(key)
- b.size++
+ b.size += len(key)
return nil
}
diff --git a/ethdb/memorydb/memorydb.go b/ethdb/memorydb/memorydb.go
index 8e9cf3c70b..338dd1aeb5 100644
--- a/ethdb/memorydb/memorydb.go
+++ b/ethdb/memorydb/memorydb.go
@@ -211,7 +211,7 @@ func (b *batch) Put(key, value []byte) error {
// Delete inserts the a key removal into the batch for later committing.
func (b *batch) Delete(key []byte) error {
b.writes = append(b.writes, keyvalue{common.CopyBytes(key), nil, true})
- b.size += 1
+ b.size += len(key)
return nil
}
diff --git a/go.sum b/go.sum
index 7acac49d52..d0a0a29d24 100644
--- a/go.sum
+++ b/go.sum
@@ -248,6 +248,10 @@ golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 h1:myAQVi0cGEoqQVR5POX+8RR2mrocKqNN1hmeMqhX27k=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So=
+golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
diff --git a/tests/block_test_util.go b/tests/block_test_util.go
index 65c0832e7b..49ed55e63e 100644
--- a/tests/block_test_util.go
+++ b/tests/block_test_util.go
@@ -133,15 +133,8 @@ func (t *BlockTest) Run(snapshotter bool) error {
}
// Cross-check the snapshot-to-hash against the trie hash
if snapshotter {
- snapTree := chain.Snapshot()
- root := chain.CurrentBlock().Root()
- it, err := snapTree.AccountIterator(root, common.Hash{})
- if err != nil {
- return fmt.Errorf("Could not create iterator for root %x: %v", root, err)
- }
- generatedRoot := snapshot.GenerateTrieRoot(it)
- if generatedRoot != root {
- return fmt.Errorf("Snapshot corruption, got %d exp %d", generatedRoot, root)
+ if err := snapshot.VerifyState(chain.Snapshot(), chain.CurrentBlock().Root()); err != nil {
+ return err
}
}
return t.validateImportedHeaders(chain, validBlocks)
diff --git a/tests/state_test.go b/tests/state_test.go
index 118c7b23a4..9ba1eb5e86 100644
--- a/tests/state_test.go
+++ b/tests/state_test.go
@@ -66,13 +66,16 @@ func TestState(t *testing.T) {
t.Run(key+"/trie", func(t *testing.T) {
withTrace(t, test.gasLimit(subtest), func(vmconfig vm.Config) error {
- _, err := test.Run(subtest, vmconfig, false)
+ _, _, err := test.Run(subtest, vmconfig, false)
return st.checkFailure(t, name+"/trie", err)
})
})
t.Run(key+"/snap", func(t *testing.T) {
withTrace(t, test.gasLimit(subtest), func(vmconfig vm.Config) error {
- _, err := test.Run(subtest, vmconfig, true)
+ snaps, statedb, err := test.Run(subtest, vmconfig, true)
+ if _, err := snaps.Journal(statedb.IntermediateRoot(false)); err != nil {
+ return err
+ }
return st.checkFailure(t, name+"/snap", err)
})
})
diff --git a/tests/state_test_util.go b/tests/state_test_util.go
index d69dcc8314..e4b06472f3 100644
--- a/tests/state_test_util.go
+++ b/tests/state_test_util.go
@@ -145,37 +145,37 @@ func (t *StateTest) Subtests() []StateSubtest {
}
// Run executes a specific subtest and verifies the post-state and logs
-func (t *StateTest) Run(subtest StateSubtest, vmconfig vm.Config, snapshotter bool) (*state.StateDB, error) {
- statedb, root, err := t.RunNoVerify(subtest, vmconfig, snapshotter)
+func (t *StateTest) Run(subtest StateSubtest, vmconfig vm.Config, snapshotter bool) (*snapshot.Tree, *state.StateDB, error) {
+ snaps, statedb, root, err := t.RunNoVerify(subtest, vmconfig, snapshotter)
if err != nil {
- return statedb, err
+ return snaps, statedb, err
}
post := t.json.Post[subtest.Fork][subtest.Index]
// N.B: We need to do this in a two-step process, because the first Commit takes care
// of suicides, and we need to touch the coinbase _after_ it has potentially suicided.
if root != common.Hash(post.Root) {
- return statedb, fmt.Errorf("post state root mismatch: got %x, want %x", root, post.Root)
+ return snaps, statedb, fmt.Errorf("post state root mismatch: got %x, want %x", root, post.Root)
}
if logs := rlpHash(statedb.Logs()); logs != common.Hash(post.Logs) {
- return statedb, fmt.Errorf("post state logs hash mismatch: got %x, want %x", logs, post.Logs)
+ return snaps, statedb, fmt.Errorf("post state logs hash mismatch: got %x, want %x", logs, post.Logs)
}
- return statedb, nil
+ return snaps, statedb, nil
}
// RunNoVerify runs a specific subtest and returns the statedb and post-state root
-func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapshotter bool) (*state.StateDB, common.Hash, error) {
+func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapshotter bool) (*snapshot.Tree, *state.StateDB, common.Hash, error) {
config, eips, err := getVMConfig(subtest.Fork)
if err != nil {
- return nil, common.Hash{}, UnsupportedForkError{subtest.Fork}
+ return nil, nil, common.Hash{}, UnsupportedForkError{subtest.Fork}
}
vmconfig.ExtraEips = eips
block := t.genesis(config).ToBlock(nil)
- statedb := MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre, snapshotter)
+ snaps, statedb := MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre, snapshotter)
post := t.json.Post[subtest.Fork][subtest.Index]
msg, err := t.json.Tx.toMessage(post)
if err != nil {
- return nil, common.Hash{}, err
+ return nil, nil, common.Hash{}, err
}
context := vm.NewEVMContext(msg, block.Header(), nil, &t.json.Env.Coinbase)
context.GetHash = vmTestBlockHash
@@ -197,14 +197,14 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh
statedb.AddBalance(block.Coinbase(), new(big.Int))
// And _now_ get the state root
root := statedb.IntermediateRoot(config.IsEIP158(block.Number()))
- return statedb, root, nil
+ return snaps, statedb, root, nil
}
func (t *StateTest) gasLimit(subtest StateSubtest) uint64 {
return t.json.Tx.GasLimit[t.json.Post[subtest.Fork][subtest.Index].Indexes.Gas]
}
-func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter bool) *state.StateDB {
+func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter bool) (*snapshot.Tree, *state.StateDB) {
sdb := state.NewDatabase(db)
statedb, _ := state.New(common.Hash{}, sdb, nil)
for addr, a := range accounts {
@@ -220,10 +220,10 @@ func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter boo
var snaps *snapshot.Tree
if snapshotter {
- snaps = snapshot.New(db, sdb.TrieDB(), 1, root, false)
+ snaps = snapshot.New(db, sdb.TrieDB(), 1, root, false, false)
}
statedb, _ = state.New(root, sdb, snaps)
- return statedb
+ return snaps, statedb
}
func (t *StateTest) genesis(config *params.ChainConfig) *core.Genesis {
diff --git a/tests/vm_test_util.go b/tests/vm_test_util.go
index 4f40ffed4b..4ae5c4f745 100644
--- a/tests/vm_test_util.go
+++ b/tests/vm_test_util.go
@@ -79,7 +79,15 @@ type vmExecMarshaling struct {
}
func (t *VMTest) Run(vmconfig vm.Config, snapshotter bool) error {
- statedb := MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre, snapshotter)
+ snaps, statedb := MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre, snapshotter)
+ if snapshotter {
+ preRoot := statedb.IntermediateRoot(false)
+ defer func() {
+ if _, err := snaps.Journal(preRoot); err != nil {
+ panic(err)
+ }
+ }()
+ }
ret, gasRemaining, err := t.exec(statedb, vmconfig)
if t.json.GasRemaining == nil {
diff --git a/trie/database.go b/trie/database.go
index fae5cf491e..321c62cc35 100644
--- a/trie/database.go
+++ b/trie/database.go
@@ -747,7 +747,7 @@ func (db *Database) Commit(node common.Hash, report bool) error {
batch.Replay(uncacher)
batch.Reset()
- // Reset the storage counters and bumpd metrics
+ // Reset the storage counters and bumped metrics
db.preimages = make(map[common.Hash][]byte)
db.preimagesSize = 0