From 50e44b0d2c0f8d34bc341a538157eb81e2f4e399 Mon Sep 17 00:00:00 2001 From: Garand Tyson Date: Fri, 6 Dec 2024 15:29:57 -0800 Subject: [PATCH 01/17] Fixed prefetch metric and intialized all point load metrics on startup --- src/bucket/BucketSnapshotManager.cpp | 21 +++++++++++++-------- src/bucket/SearchableBucketList.cpp | 11 +++++++++++ 2 files changed, 24 insertions(+), 8 deletions(-) diff --git a/src/bucket/BucketSnapshotManager.cpp b/src/bucket/BucketSnapshotManager.cpp index 0cad98addf..9d9918cd16 100644 --- a/src/bucket/BucketSnapshotManager.cpp +++ b/src/bucket/BucketSnapshotManager.cpp @@ -13,7 +13,9 @@ #include "medida/meter.h" #include "medida/metrics_registry.h" +#include "xdr/Stellar-ledger-entries.h" #include +#include namespace stellar { @@ -38,6 +40,16 @@ BucketSnapshotManager::BucketSnapshotManager( releaseAssert(threadIsMain()); releaseAssert(mCurrLiveSnapshot); releaseAssert(mCurrHotArchiveSnapshot); + + // Initialize point load timers for each LedgerEntry type + for (auto t : xdr::xdr_traits::enum_values()) + { + auto const& label = xdr::xdr_traits::enum_name( + static_cast(t)); + auto& metric = + mApp.getMetrics().NewTimer({"bucketlistDB", "point", label}); + mPointTimers.emplace(static_cast(t), metric); + } } std::shared_ptr @@ -215,14 +227,7 @@ BucketSnapshotManager::endPointLoadTimer(LedgerEntryType t, if (!bloomMiss) { auto iter = mPointTimers.find(t); - if (iter == mPointTimers.end()) - { - auto const& label = xdr::xdr_traits::enum_name(t); - auto& metric = - mApp.getMetrics().NewTimer({"bucketlistDB", "point", label}); - iter = mPointTimers.emplace(t, metric).first; - } - + releaseAssert(iter != mPointTimers.end()); iter->second.Update(duration); } } diff --git a/src/bucket/SearchableBucketList.cpp b/src/bucket/SearchableBucketList.cpp index dd49a424c0..15b6e4792b 100644 --- a/src/bucket/SearchableBucketList.cpp +++ b/src/bucket/SearchableBucketList.cpp @@ -5,6 +5,7 @@ #include "bucket/SearchableBucketList.h" #include "bucket/BucketInputIterator.h" #include "bucket/BucketListSnapshotBase.h" +#include "util/GlobalChecks.h" #include @@ -244,6 +245,16 @@ SearchableLiveBucketListSnapshot::loadKeysWithLimits( std::set const& inKeys, LedgerKeyMeter* lkMeter) { + if (threadIsMain()) + { + auto timer = + mSnapshotManager.recordBulkLoadMetrics("prefetch", inKeys.size()) + .TimeScope(); + auto op = loadKeysInternal(inKeys, lkMeter, std::nullopt); + releaseAssertOrThrow(op); + return std::move(*op); + } + auto op = loadKeysInternal(inKeys, lkMeter, std::nullopt); releaseAssertOrThrow(op); return std::move(*op); From 9666c91daa71fc477579546dcf65f07a31a1e0a1 Mon Sep 17 00:00:00 2001 From: Dmytro Kozhevin Date: Fri, 8 Nov 2024 15:55:03 -0500 Subject: [PATCH 02/17] Apply-load benchmark updates: - Use the new function that doesn't access storage, but does emit events - Generate synthetic bucket list with configurable number of entries/ledgers - Tighten the resources estimation - Wait for merges in-between benchmark runs --- Builds/VisualStudio/stellar-core.vcxproj | 27 +- .../VisualStudio/stellar-core.vcxproj.filters | 81 +++-- Cargo.lock | 11 +- src/bucket/BucketIndexImpl.cpp | 2 +- src/history/HistoryManagerImpl.cpp | 14 +- src/ledger/LedgerManager.h | 1 + src/ledger/LedgerManagerImpl.cpp | 6 + src/ledger/LedgerManagerImpl.h | 1 + src/ledger/SorobanMetrics.cpp | 2 + src/ledger/SorobanMetrics.h | 1 + src/main/CommandLine.cpp | 277 ++++++------------ src/main/Config.cpp | 136 ++++++++- src/main/Config.h | 62 +++- src/rust/Cargo.toml | 6 +- src/simulation/ApplyLoad.cpp | 266 +++++++++++++---- src/simulation/ApplyLoad.h | 19 +- src/simulation/TxGenerator.cpp | 199 +++++++++++-- src/simulation/TxGenerator.h | 16 +- src/simulation/test/LoadGeneratorTests.cpp | 73 +++-- .../InvokeHostFunctionOpFrame.cpp | 5 + 20 files changed, 848 insertions(+), 357 deletions(-) diff --git a/Builds/VisualStudio/stellar-core.vcxproj b/Builds/VisualStudio/stellar-core.vcxproj index 881523c1fc..d7641cdd7e 100644 --- a/Builds/VisualStudio/stellar-core.vcxproj +++ b/Builds/VisualStudio/stellar-core.vcxproj @@ -481,20 +481,26 @@ exit /b 0 - + - - - + + + + + + + + + @@ -942,23 +948,28 @@ exit /b 0 - + - - + + - + + + + + + diff --git a/Builds/VisualStudio/stellar-core.vcxproj.filters b/Builds/VisualStudio/stellar-core.vcxproj.filters index 8475c23188..4ef8433844 100644 --- a/Builds/VisualStudio/stellar-core.vcxproj.filters +++ b/Builds/VisualStudio/stellar-core.vcxproj.filters @@ -576,9 +576,6 @@ bucket\tests - - bucket - bucket @@ -588,12 +585,6 @@ bucket - - bucket - - - bucket - bucket @@ -1323,9 +1314,6 @@ main - - bucket - bucket @@ -1383,6 +1371,36 @@ history + + bucket + + + bucket + + + bucket + + + bucket + + + bucket + + + bucket + + + bucket + + + bucket + + + bucket + + + bucket + @@ -1796,9 +1814,6 @@ bucket\tests - - bucket - bucket @@ -1811,15 +1826,9 @@ bucket - - bucket - bucket - - bucket - bucket @@ -2357,9 +2366,6 @@ main - - bucket - bucket @@ -2416,6 +2422,33 @@ history + + bucket + + + bucket + + + bucket + + + bucket + + + bucket + + + bucket + + + bucket + + + bucket + + + bucket + diff --git a/Cargo.lock b/Cargo.lock index e948c5074c..74880f7a7b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -472,7 +472,7 @@ checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "soroban-env-common" version = "22.0.0" -source = "git+https://github.com/stellar/rs-soroban-env?rev=0497816694bef2b103494c8c61b7c8a06a72c7d3#0497816694bef2b103494c8c61b7c8a06a72c7d3" +source = "git+https://github.com/stellar/rs-soroban-env?rev=a3f7fca9c2ad89796c7525a648da086543502dd5#a3f7fca9c2ad89796c7525a648da086543502dd5" dependencies = [ "crate-git-revision", "ethnum", @@ -486,7 +486,7 @@ dependencies = [ [[package]] name = "soroban-env-macros" version = "22.0.0" -source = "git+https://github.com/stellar/rs-soroban-env?rev=0497816694bef2b103494c8c61b7c8a06a72c7d3#0497816694bef2b103494c8c61b7c8a06a72c7d3" +source = "git+https://github.com/stellar/rs-soroban-env?rev=a3f7fca9c2ad89796c7525a648da086543502dd5#a3f7fca9c2ad89796c7525a648da086543502dd5" dependencies = [ "itertools", "proc-macro2", @@ -500,7 +500,7 @@ dependencies = [ [[package]] name = "soroban-synth-wasm" version = "22.0.0" -source = "git+https://github.com/stellar/rs-soroban-env?rev=0497816694bef2b103494c8c61b7c8a06a72c7d3#0497816694bef2b103494c8c61b7c8a06a72c7d3" +source = "git+https://github.com/stellar/rs-soroban-env?rev=a3f7fca9c2ad89796c7525a648da086543502dd5#a3f7fca9c2ad89796c7525a648da086543502dd5" dependencies = [ "arbitrary", "soroban-env-common", @@ -513,7 +513,7 @@ dependencies = [ [[package]] name = "soroban-test-wasms" version = "22.0.0" -source = "git+https://github.com/stellar/rs-soroban-env?rev=0497816694bef2b103494c8c61b7c8a06a72c7d3#0497816694bef2b103494c8c61b7c8a06a72c7d3" +source = "git+https://github.com/stellar/rs-soroban-env?rev=a3f7fca9c2ad89796c7525a648da086543502dd5#a3f7fca9c2ad89796c7525a648da086543502dd5" [[package]] name = "static_assertions" @@ -550,7 +550,8 @@ dependencies = [ [[package]] name = "stellar-xdr" version = "22.0.0" -source = "git+https://github.com/stellar/rs-stellar-xdr?rev=b5516843b6379e4e29520bf2ba156484f62edc46#b5516843b6379e4e29520bf2ba156484f62edc46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20c2130275cc730d042b3082f51145f0486f5a543d6d72fced02ed9048b82b57" dependencies = [ "crate-git-revision", "escape-bytes", diff --git a/src/bucket/BucketIndexImpl.cpp b/src/bucket/BucketIndexImpl.cpp index 2255785f3d..38de02875c 100644 --- a/src/bucket/BucketIndexImpl.cpp +++ b/src/bucket/BucketIndexImpl.cpp @@ -272,7 +272,7 @@ BucketIndexImpl::saveToDisk( { OutputFileStream out(ctx, !bm.getConfig().DISABLE_XDR_FSYNC); - out.open(tmpFilename); + out.open(tmpFilename.string()); cereal::BufferedAsioOutputArchive ar(out); ar(mData); } diff --git a/src/history/HistoryManagerImpl.cpp b/src/history/HistoryManagerImpl.cpp index 59c3d805ff..ed126b4d3d 100644 --- a/src/history/HistoryManagerImpl.cpp +++ b/src/history/HistoryManagerImpl.cpp @@ -560,13 +560,13 @@ HistoryManagerImpl::getPublishQueueStates() { ZoneScoped; std::vector states; - forEveryQueuedCheckpoint(publishQueuePath(mApp.getConfig()).string(), - [&](uint32_t seq, std::string const& f) { - HistoryArchiveState has; - auto fullPath = - publishQueuePath(mApp.getConfig()) / f; - states.push_back(loadCheckpointHAS(fullPath)); - }); + forEveryQueuedCheckpoint( + publishQueuePath(mApp.getConfig()).string(), + [&](uint32_t seq, std::string const& f) { + HistoryArchiveState has; + auto fullPath = publishQueuePath(mApp.getConfig()) / f; + states.push_back(loadCheckpointHAS(fullPath.string())); + }); return states; } diff --git a/src/ledger/LedgerManager.h b/src/ledger/LedgerManager.h index acf6e1ee62..e375f9d92e 100644 --- a/src/ledger/LedgerManager.h +++ b/src/ledger/LedgerManager.h @@ -135,6 +135,7 @@ class LedgerManager virtual SorobanNetworkConfig& getMutableSorobanNetworkConfig() = 0; virtual std::vector const& getLastClosedLedgerTxMeta() = 0; + virtual void storeCurrentLedgerForTest(LedgerHeader const& header) = 0; #endif // Return the (changing) number of seconds since the LCL closed. diff --git a/src/ledger/LedgerManagerImpl.cpp b/src/ledger/LedgerManagerImpl.cpp index b562fe8c6f..3a393eaf4a 100644 --- a/src/ledger/LedgerManagerImpl.cpp +++ b/src/ledger/LedgerManagerImpl.cpp @@ -580,6 +580,12 @@ LedgerManagerImpl::getLastClosedLedgerTxMeta() { return mLastLedgerTxMeta; } + +void +LedgerManagerImpl::storeCurrentLedgerForTest(LedgerHeader const& header) +{ + storeCurrentLedger(header, true, true); +} #endif SorobanMetrics& diff --git a/src/ledger/LedgerManagerImpl.h b/src/ledger/LedgerManagerImpl.h index 61caaf5490..27c55682ca 100644 --- a/src/ledger/LedgerManagerImpl.h +++ b/src/ledger/LedgerManagerImpl.h @@ -166,6 +166,7 @@ class LedgerManagerImpl : public LedgerManager std::vector const& getLastClosedLedgerTxMeta() override; TransactionResultSet mLatestTxResultSet{}; + void storeCurrentLedgerForTest(LedgerHeader const& header) override; #endif uint64_t secondsSinceLastLedgerClose() const override; diff --git a/src/ledger/SorobanMetrics.cpp b/src/ledger/SorobanMetrics.cpp index 36753d94fe..784d922551 100644 --- a/src/ledger/SorobanMetrics.cpp +++ b/src/ledger/SorobanMetrics.cpp @@ -132,6 +132,8 @@ SorobanMetrics::SorobanMetrics(medida::MetricsRegistry& metrics) {"soroban", "host-fn-op", "ledger-cpu-insns-ratio"})) , mLedgerHostFnCpuInsnsRatioExclVm(metrics.NewHistogram( {"soroban", "host-fn-op", "ledger-cpu-insns-ratio-excl-vm"})) + , mHostFnOpDeclaredInsnsUsageRatio(metrics.NewHistogram( + {"soroban", "host-fn-op", "declared-cpu-insns-usage-ratio"})) { } diff --git a/src/ledger/SorobanMetrics.h b/src/ledger/SorobanMetrics.h index d0f2c84570..1e84227dd4 100644 --- a/src/ledger/SorobanMetrics.h +++ b/src/ledger/SorobanMetrics.h @@ -71,6 +71,7 @@ class SorobanMetrics medida::Timer& mHostFnOpInvokeTimeNsecsExclVm; medida::Histogram& mHostFnOpInvokeTimeFsecsCpuInsnRatio; medida::Histogram& mHostFnOpInvokeTimeFsecsCpuInsnRatioExclVm; + medida::Histogram& mHostFnOpDeclaredInsnsUsageRatio; medida::Meter& mHostFnOpMaxRwKeyByte; medida::Meter& mHostFnOpMaxRwDataByte; medida::Meter& mHostFnOpMaxRwCodeByte; diff --git a/src/main/CommandLine.cpp b/src/main/CommandLine.cpp index 0fa277417c..f16ba5dfc7 100644 --- a/src/main/CommandLine.cpp +++ b/src/main/CommandLine.cpp @@ -1822,199 +1822,116 @@ runApplyLoad(CommandLineArgs const& args) { CommandLine::ConfigOption configOption; - uint64_t ledgerMaxInstructions = 0; - uint64_t ledgerMaxReadLedgerEntries = 0; - uint64_t ledgerMaxReadBytes = 0; - uint64_t ledgerMaxWriteLedgerEntries = 0; - uint64_t ledgerMaxWriteBytes = 0; - uint64_t ledgerMaxTxCount = 0; - uint64_t ledgerMaxTransactionsSizeBytes = 0; - - ParserWithValidation ledgerMaxInstructionsParser{ - clara::Opt(ledgerMaxInstructions, - "LedgerMaxInstructions")["--ledger-max-instructions"] - .required(), - [&] { - return ledgerMaxInstructions > 0 - ? "" - : "ledgerMaxInstructions must be > 0"; - }}; - - ParserWithValidation ledgerMaxReadLedgerEntriesParser{ - clara::Opt(ledgerMaxReadLedgerEntries, - "LedgerMaxReadLedgerEntries")["--ledger-max-read-entries"] - .required(), - [&] { - return ledgerMaxReadLedgerEntries > 0 - ? "" - : "ledgerMaxReadLedgerEntries must be > 0"; - }}; - - ParserWithValidation ledgerMaxReadBytesParser{ - clara::Opt(ledgerMaxReadBytes, - "LedgerMaxReadBytes")["--ledger-max-read-bytes"] - .required(), - [&] { - return ledgerMaxReadBytes > 0 ? "" - : "ledgerMaxReadBytes must be > 0"; - }}; - - ParserWithValidation ledgerMaxWriteLedgerEntriesParser{ - clara::Opt(ledgerMaxWriteLedgerEntries, - "LedgerMaxWriteLedgerEntries")["--ledger-max-write-entries"] - .required(), - [&] { - return ledgerMaxWriteLedgerEntries > 0 - ? "" - : "ledgerMaxWriteLedgerEntries must be > 0"; - }}; - - ParserWithValidation ledgerMaxWriteBytesParser{ - clara::Opt(ledgerMaxWriteBytes, - "LedgerMaxWriteBytes")["--ledger-max-write-bytes"] - .required(), - [&] { - return ledgerMaxWriteBytes > 0 ? "" - : "ledgerMaxWriteBytes must be > 0"; - }}; - - ParserWithValidation ledgerMaxTxCountParser{ - clara::Opt(ledgerMaxTxCount, - "LedgerMaxTxCount")["--ledger-max-tx-count"] - .required(), - [&] { - return ledgerMaxTxCount > 0 ? "" : "ledgerMaxTxCount must be > 0"; - }}; + return runWithHelp(args, {configurationParser(configOption)}, [&] { + auto config = configOption.getConfig(); + config.RUN_STANDALONE = true; + config.MANUAL_CLOSE = true; + config.USE_CONFIG_FOR_GENESIS = true; + config.TESTING_UPGRADE_MAX_TX_SET_SIZE = 1000; + config.LEDGER_PROTOCOL_VERSION = + Config::CURRENT_LEDGER_PROTOCOL_VERSION; - ParserWithValidation ledgerMaxTransactionsSizeBytesParser{ - clara::Opt(ledgerMaxTransactionsSizeBytes, - "LedgerMaxTransactionsSizeBytes")["--ledger-max-tx-size"] - .required(), - [&] { - return ledgerMaxTransactionsSizeBytes > 0 - ? "" - : "ledgerMaxTransactionsSizeBytes must be > 0"; - }}; + TmpDirManager tdm(std::string("soroban-storage-meta-")); + TmpDir td = tdm.tmpDir("soroban-meta-ok"); + std::string metaPath = td.getName() + "/stream.xdr"; - return runWithHelp( - args, - {configurationParser(configOption), ledgerMaxInstructionsParser, - ledgerMaxReadLedgerEntriesParser, ledgerMaxReadBytesParser, - ledgerMaxWriteLedgerEntriesParser, ledgerMaxWriteBytesParser, - ledgerMaxTxCountParser, ledgerMaxTransactionsSizeBytesParser}, - [&] { - auto config = configOption.getConfig(); - config.RUN_STANDALONE = true; - config.MANUAL_CLOSE = true; - config.USE_CONFIG_FOR_GENESIS = true; - config.TESTING_UPGRADE_MAX_TX_SET_SIZE = 1000; - config.LEDGER_PROTOCOL_VERSION = - Config::CURRENT_LEDGER_PROTOCOL_VERSION; + config.METADATA_OUTPUT_STREAM = metaPath; - VirtualClock clock(VirtualClock::REAL_TIME); - auto appPtr = Application::create(clock, config); - - auto& app = *appPtr; - { - app.start(); + VirtualClock clock(VirtualClock::REAL_TIME); + auto appPtr = Application::create(clock, config); - ApplyLoad al(app, ledgerMaxInstructions, - ledgerMaxReadLedgerEntries, ledgerMaxReadBytes, - ledgerMaxWriteLedgerEntries, ledgerMaxWriteBytes, - ledgerMaxTxCount, ledgerMaxTransactionsSizeBytes); + auto& app = *appPtr; + { + app.start(); - auto& ledgerClose = - app.getMetrics().NewTimer({"ledger", "ledger", "close"}); - ledgerClose.Clear(); + ApplyLoad al(app); - auto& cpuInsRatio = app.getMetrics().NewHistogram( - {"soroban", "host-fn-op", - "invoke-time-fsecs-cpu-insn-ratio"}); - cpuInsRatio.Clear(); + auto& ledgerClose = + app.getMetrics().NewTimer({"ledger", "ledger", "close"}); + ledgerClose.Clear(); - auto& cpuInsRatioExclVm = app.getMetrics().NewHistogram( - {"soroban", "host-fn-op", - "invoke-time-fsecs-cpu-insn-ratio-excl-vm"}); - cpuInsRatioExclVm.Clear(); + auto& cpuInsRatio = app.getMetrics().NewHistogram( + {"soroban", "host-fn-op", "invoke-time-fsecs-cpu-insn-ratio"}); + cpuInsRatio.Clear(); - auto& ledgerCpuInsRatio = app.getMetrics().NewHistogram( - {"soroban", "host-fn-op", "ledger-cpu-insns-ratio"}); - ledgerCpuInsRatio.Clear(); + auto& cpuInsRatioExclVm = app.getMetrics().NewHistogram( + {"soroban", "host-fn-op", + "invoke-time-fsecs-cpu-insn-ratio-excl-vm"}); + cpuInsRatioExclVm.Clear(); - auto& ledgerCpuInsRatioExclVm = app.getMetrics().NewHistogram( - {"soroban", "host-fn-op", - "ledger-cpu-insns-ratio-excl-vm"}); - ledgerCpuInsRatioExclVm.Clear(); + auto& ledgerCpuInsRatio = app.getMetrics().NewHistogram( + {"soroban", "host-fn-op", "ledger-cpu-insns-ratio"}); + ledgerCpuInsRatio.Clear(); - for (size_t i = 0; i < 100; ++i) - { - app.getBucketManager() - .getLiveBucketList() - .resolveAllFutures(); - releaseAssert(app.getBucketManager() - .getLiveBucketList() - .futuresAllResolved()); - al.benchmark(); - } + auto& ledgerCpuInsRatioExclVm = app.getMetrics().NewHistogram( + {"soroban", "host-fn-op", "ledger-cpu-insns-ratio-excl-vm"}); + ledgerCpuInsRatioExclVm.Clear(); - CLOG_INFO(Perf, "Max ledger close: {} milliseconds", - ledgerClose.max()); - CLOG_INFO(Perf, "Min ledger close: {} milliseconds", - ledgerClose.min()); - CLOG_INFO(Perf, "Mean ledger close: {} milliseconds", - ledgerClose.mean()); - CLOG_INFO(Perf, "stddev ledger close: {} milliseconds", - ledgerClose.std_dev()); - - CLOG_INFO(Perf, "Max CPU ins ratio: {}", - cpuInsRatio.max() / 1000000); - CLOG_INFO(Perf, "Mean CPU ins ratio: {}", - cpuInsRatio.mean() / 1000000); - - CLOG_INFO(Perf, "Max CPU ins ratio excl VM: {}", - cpuInsRatioExclVm.max() / 1000000); - CLOG_INFO(Perf, "Mean CPU ins ratio excl VM: {}", - cpuInsRatioExclVm.mean() / 1000000); - CLOG_INFO(Perf, "stddev CPU ins ratio excl VM: {}", - cpuInsRatioExclVm.std_dev() / 1000000); - - CLOG_INFO(Perf, "Ledger Max CPU ins ratio: {}", - ledgerCpuInsRatio.max() / 1000000); - CLOG_INFO(Perf, "Ledger Mean CPU ins ratio: {}", - ledgerCpuInsRatio.mean() / 1000000); - CLOG_INFO(Perf, "Ledger stddev CPU ins ratio: {}", - ledgerCpuInsRatio.std_dev() / 1000000); - - CLOG_INFO(Perf, "Ledger Max CPU ins ratio excl VM: {}", - ledgerCpuInsRatioExclVm.max() / 1000000); - CLOG_INFO(Perf, "Ledger Mean CPU ins ratio excl VM: {}", - ledgerCpuInsRatioExclVm.mean() / 1000000); - CLOG_INFO( - Perf, - "Ledger stddev CPU ins ratio excl VM: {} milliseconds", - ledgerCpuInsRatioExclVm.std_dev() / 1000000); - - CLOG_INFO(Perf, "Tx count utilization {}%", - al.getTxCountUtilization().mean() / 1000.0); - CLOG_INFO(Perf, "Instruction utilization {}%", - al.getInstructionUtilization().mean() / 1000.0); - CLOG_INFO(Perf, "Tx size utilization {}%", - al.getTxSizeUtilization().mean() / 1000.0); - CLOG_INFO(Perf, "Read bytes utilization {}%", - al.getReadByteUtilization().mean() / 1000.0); - CLOG_INFO(Perf, "Write bytes utilization {}%", - al.getWriteByteUtilization().mean() / 1000.0); - CLOG_INFO(Perf, "Read entry utilization {}%", - al.getReadEntryUtilization().mean() / 1000.0); - CLOG_INFO(Perf, "Write entry utilization {}%", - al.getWriteEntryUtilization().mean() / 1000.0); - - CLOG_INFO(Perf, "Tx Success Rate: {:f}%", - al.successRate() * 100); + for (size_t i = 0; i < 100; ++i) + { + app.getBucketManager().getLiveBucketList().resolveAllFutures(); + releaseAssert(app.getBucketManager() + .getLiveBucketList() + .futuresAllResolved()); + al.benchmark(); } - return 0; - }); + CLOG_INFO(Perf, "Max ledger close: {} milliseconds", + ledgerClose.max()); + CLOG_INFO(Perf, "Min ledger close: {} milliseconds", + ledgerClose.min()); + CLOG_INFO(Perf, "Mean ledger close: {} milliseconds", + ledgerClose.mean()); + CLOG_INFO(Perf, "stddev ledger close: {} milliseconds", + ledgerClose.std_dev()); + + CLOG_INFO(Perf, "Max CPU ins ratio: {}", + cpuInsRatio.max() / 1000000); + CLOG_INFO(Perf, "Mean CPU ins ratio: {}", + cpuInsRatio.mean() / 1000000); + + CLOG_INFO(Perf, "Max CPU ins ratio excl VM: {}", + cpuInsRatioExclVm.max() / 1000000); + CLOG_INFO(Perf, "Mean CPU ins ratio excl VM: {}", + cpuInsRatioExclVm.mean() / 1000000); + CLOG_INFO(Perf, "stddev CPU ins ratio excl VM: {}", + cpuInsRatioExclVm.std_dev() / 1000000); + + CLOG_INFO(Perf, "Ledger Max CPU ins ratio: {}", + ledgerCpuInsRatio.max() / 1000000); + CLOG_INFO(Perf, "Ledger Mean CPU ins ratio: {}", + ledgerCpuInsRatio.mean() / 1000000); + CLOG_INFO(Perf, "Ledger stddev CPU ins ratio: {}", + ledgerCpuInsRatio.std_dev() / 1000000); + + CLOG_INFO(Perf, "Ledger Max CPU ins ratio excl VM: {}", + ledgerCpuInsRatioExclVm.max() / 1000000); + CLOG_INFO(Perf, "Ledger Mean CPU ins ratio excl VM: {}", + ledgerCpuInsRatioExclVm.mean() / 1000000); + CLOG_INFO(Perf, + "Ledger stddev CPU ins ratio excl VM: {} milliseconds", + ledgerCpuInsRatioExclVm.std_dev() / 1000000); + + CLOG_INFO(Perf, "Tx count utilization {}%", + al.getTxCountUtilization().mean() / 1000.0); + CLOG_INFO(Perf, "Instruction utilization {}%", + al.getInstructionUtilization().mean() / 1000.0); + CLOG_INFO(Perf, "Tx size utilization {}%", + al.getTxSizeUtilization().mean() / 1000.0); + CLOG_INFO(Perf, "Read bytes utilization {}%", + al.getReadByteUtilization().mean() / 1000.0); + CLOG_INFO(Perf, "Write bytes utilization {}%", + al.getWriteByteUtilization().mean() / 1000.0); + CLOG_INFO(Perf, "Read entry utilization {}%", + al.getReadEntryUtilization().mean() / 1000.0); + CLOG_INFO(Perf, "Write entry utilization {}%", + al.getWriteEntryUtilization().mean() / 1000.0); + + CLOG_INFO(Perf, "Tx Success Rate: {:f}%", al.successRate() * 100); + } + + return 0; + }); } #endif diff --git a/src/main/Config.cpp b/src/main/Config.cpp index a932d9217a..4e6ca6ddf4 100644 --- a/src/main/Config.cpp +++ b/src/main/Config.cpp @@ -1455,58 +1455,174 @@ Config::processConfig(std::shared_ptr t) {"LOADGEN_OP_COUNT_DISTRIBUTION_FOR_TESTING", [&]() { LOADGEN_OP_COUNT_DISTRIBUTION_FOR_TESTING = - readIntArray(item); + readIntArray(item); }}, {"LOADGEN_WASM_BYTES_FOR_TESTING", [&]() { LOADGEN_WASM_BYTES_FOR_TESTING = - readIntArray(item); + readIntArray(item); }}, {"LOADGEN_WASM_BYTES_DISTRIBUTION_FOR_TESTING", [&]() { LOADGEN_WASM_BYTES_DISTRIBUTION_FOR_TESTING = - readIntArray(item); + readIntArray(item); }}, {"LOADGEN_NUM_DATA_ENTRIES_FOR_TESTING", [&]() { LOADGEN_NUM_DATA_ENTRIES_FOR_TESTING = - readIntArray(item); + readIntArray(item); }}, {"LOADGEN_NUM_DATA_ENTRIES_DISTRIBUTION_FOR_TESTING", [&]() { LOADGEN_NUM_DATA_ENTRIES_DISTRIBUTION_FOR_TESTING = - readIntArray(item); + readIntArray(item); }}, {"LOADGEN_IO_KILOBYTES_FOR_TESTING", [&]() { LOADGEN_IO_KILOBYTES_FOR_TESTING = - readIntArray(item); + readIntArray(item); }}, {"LOADGEN_IO_KILOBYTES_DISTRIBUTION_FOR_TESTING", [&]() { LOADGEN_IO_KILOBYTES_DISTRIBUTION_FOR_TESTING = - readIntArray(item); + readIntArray(item); }}, {"LOADGEN_TX_SIZE_BYTES_FOR_TESTING", [&]() { LOADGEN_TX_SIZE_BYTES_FOR_TESTING = - readIntArray(item); + readIntArray(item); }}, {"LOADGEN_TX_SIZE_BYTES_DISTRIBUTION_FOR_TESTING", [&]() { LOADGEN_TX_SIZE_BYTES_DISTRIBUTION_FOR_TESTING = - readIntArray(item); + readIntArray(item); }}, {"LOADGEN_INSTRUCTIONS_FOR_TESTING", [&]() { LOADGEN_INSTRUCTIONS_FOR_TESTING = - readIntArray(item); + readIntArray(item); }}, {"LOADGEN_INSTRUCTIONS_DISTRIBUTION_FOR_TESTING", [&]() { LOADGEN_INSTRUCTIONS_DISTRIBUTION_FOR_TESTING = + readIntArray(item); + }}, + {"APPLY_LOAD_DATA_ENTRY_SIZE_FOR_TESTING", + [&]() { + APPLY_LOAD_DATA_ENTRY_SIZE_FOR_TESTING = + readInt(item); + }}, + {"APPLY_LOAD_BL_SIMULATED_LEDGERS", + [&]() { + APPLY_LOAD_BL_SIMULATED_LEDGERS = readInt(item); + }}, + {"APPLY_LOAD_BL_WRITE_FREQUENCY", + [&]() { + APPLY_LOAD_BL_WRITE_FREQUENCY = readInt(item); + }}, + {"APPLY_LOAD_BL_BATCH_SIZE", + [&]() { APPLY_LOAD_BL_BATCH_SIZE = readInt(item); }}, + {"APPLY_LOAD_BL_LAST_BATCH_LEDGERS", + [&]() { + APPLY_LOAD_BL_LAST_BATCH_LEDGERS = readInt(item); + }}, + {"APPLY_LOAD_BL_LAST_BATCH_SIZE", + [&]() { + APPLY_LOAD_BL_LAST_BATCH_SIZE = readInt(item); + }}, + {"APPLY_LOAD_NUM_RO_ENTRIES_FOR_TESTING", + [&]() { + APPLY_LOAD_NUM_RO_ENTRIES_FOR_TESTING = + readIntArray(item); + }}, + {"APPLY_LOAD_NUM_RO_ENTRIES_DISTRIBUTION_FOR_TESTING", + [&]() { + APPLY_LOAD_NUM_RO_ENTRIES_DISTRIBUTION_FOR_TESTING = + readIntArray(item); + }}, + {"APPLY_LOAD_NUM_RW_ENTRIES_FOR_TESTING", + [&]() { + APPLY_LOAD_NUM_RW_ENTRIES_FOR_TESTING = + readIntArray(item); + }}, + {"APPLY_LOAD_NUM_RW_ENTRIES_DISTRIBUTION_FOR_TESTING", + [&]() { + APPLY_LOAD_NUM_RW_ENTRIES_DISTRIBUTION_FOR_TESTING = + readIntArray(item); + }}, + {"APPLY_LOAD_EVENT_COUNT_FOR_TESTING", + [&]() { + APPLY_LOAD_EVENT_COUNT_FOR_TESTING = + readIntArray(item); + }}, + {"APPLY_LOAD_EVENT_COUNT_DISTRIBUTION_FOR_TESTING", + [&]() { + APPLY_LOAD_EVENT_COUNT_DISTRIBUTION_FOR_TESTING = readIntArray(item); }}, + {"APPLY_LOAD_LEDGER_MAX_INSTRUCTIONS", + [&]() { + APPLY_LOAD_LEDGER_MAX_INSTRUCTIONS = + readInt(item); + }}, + {"APPLY_LOAD_TX_MAX_INSTRUCTIONS", + [&]() { + APPLY_LOAD_TX_MAX_INSTRUCTIONS = readInt(item); + }}, + {"APPLY_LOAD_LEDGER_MAX_READ_LEDGER_ENTRIES", + [&]() { + APPLY_LOAD_LEDGER_MAX_READ_LEDGER_ENTRIES = + readInt(item); + }}, + {"APPLY_LOAD_TX_MAX_READ_LEDGER_ENTRIES", + [&]() { + APPLY_LOAD_TX_MAX_READ_LEDGER_ENTRIES = + readInt(item); + }}, + {"APPLY_LOAD_LEDGER_MAX_WRITE_LEDGER_ENTRIES", + [&]() { + APPLY_LOAD_LEDGER_MAX_WRITE_LEDGER_ENTRIES = + readInt(item); + }}, + {"APPLY_LOAD_TX_MAX_WRITE_LEDGER_ENTRIES", + [&]() { + APPLY_LOAD_TX_MAX_WRITE_LEDGER_ENTRIES = + readInt(item); + }}, + {"APPLY_LOAD_LEDGER_MAX_READ_BYTES", + [&]() { + APPLY_LOAD_LEDGER_MAX_READ_BYTES = readInt(item); + }}, + {"APPLY_LOAD_TX_MAX_READ_BYTES", + [&]() { + APPLY_LOAD_TX_MAX_READ_BYTES = readInt(item); + }}, + {"APPLY_LOAD_LEDGER_MAX_WRITE_BYTES", + [&]() { + APPLY_LOAD_LEDGER_MAX_WRITE_BYTES = + readInt(item); + }}, + {"APPLY_LOAD_TX_MAX_WRITE_BYTES", + [&]() { + APPLY_LOAD_TX_MAX_WRITE_BYTES = readInt(item); + }}, + {"APPLY_LOAD_MAX_TX_SIZE_BYTES", + [&]() { + APPLY_LOAD_MAX_TX_SIZE_BYTES = readInt(item); + }}, + {"APPLY_LOAD_MAX_LEDGER_TX_SIZE_BYTES", + [&]() { + APPLY_LOAD_MAX_LEDGER_TX_SIZE_BYTES = + readInt(item); + }}, + {"APPLY_LOAD_MAX_CONTRACT_EVENT_SIZE_BYTES", + [&]() { + APPLY_LOAD_MAX_CONTRACT_EVENT_SIZE_BYTES = + readInt(item); + }}, + {"APPLY_LOAD_MAX_TX_COUNT", + [&]() { APPLY_LOAD_MAX_TX_COUNT = readInt(item); }}, + {"CATCHUP_WAIT_MERGES_TX_APPLY_FOR_TESTING", [&]() { CATCHUP_WAIT_MERGES_TX_APPLY_FOR_TESTING = readBool(item); diff --git a/src/main/Config.h b/src/main/Config.h index df88ed4184..f881c0e257 100644 --- a/src/main/Config.h +++ b/src/main/Config.h @@ -315,9 +315,69 @@ class Config : public std::enable_shared_from_this // Instructions per transaction for SOROBAN_INVOKE and MIX_CLASSIC_SOROBAN // loadgen modes - std::vector LOADGEN_INSTRUCTIONS_FOR_TESTING; + // Also used for configuring apply-load command. + std::vector LOADGEN_INSTRUCTIONS_FOR_TESTING; std::vector LOADGEN_INSTRUCTIONS_DISTRIBUTION_FOR_TESTING; + // apply-load-specific configuration parameters: + // Size of the synthetic contract data entries used in apply-load. + // Currently we generate entries of the equal size for more precise + // control over the modelled instructions. + uint32_t APPLY_LOAD_DATA_ENTRY_SIZE_FOR_TESTING = 0; + + // The parameters below control the synthetic bucket list generation in + // apply-load. + + // Number of ledgers to simulate in apply-load. The more ledgers there are, + // the more bucket list levels will be populated. + uint32_t APPLY_LOAD_BL_SIMULATED_LEDGERS = 1000; + // Write a batch of entries every that many ledgers. + uint32_t APPLY_LOAD_BL_WRITE_FREQUENCY = 1000; + // Number of entries to write in every batch. + uint32_t APPLY_LOAD_BL_BATCH_SIZE = 1000; + // The final `APPLY_LOAD_BL_LAST_BATCH_LEDGERS` of synthetic load will each + // have `APPLY_LOAD_BL_LAST_BATCH_SIZE` entries in order to populate the + // lowest BL levels. + uint32_t APPLY_LOAD_BL_LAST_BATCH_LEDGERS = 300; + // Number of entries to write in every ledger of + // `APPLY_LOAD_BL_LAST_BATCH_LEDGERS`. + uint32_t APPLY_LOAD_BL_LAST_BATCH_SIZE = 100; + + // The APPLY_LOAD_* parameters below are for initializing Soroban + // settings before applying the benchmark transactions. + uint32_t APPLY_LOAD_LEDGER_MAX_INSTRUCTIONS = 0; + uint32_t APPLY_LOAD_TX_MAX_INSTRUCTIONS = 0; + + uint32_t APPLY_LOAD_LEDGER_MAX_READ_LEDGER_ENTRIES = 0; + uint32_t APPLY_LOAD_TX_MAX_READ_LEDGER_ENTRIES = 0; + + uint32_t APPLY_LOAD_LEDGER_MAX_WRITE_LEDGER_ENTRIES = 0; + uint32_t APPLY_LOAD_TX_MAX_WRITE_LEDGER_ENTRIES = 0; + + uint32_t APPLY_LOAD_LEDGER_MAX_READ_BYTES = 0; + uint32_t APPLY_LOAD_TX_MAX_READ_BYTES = 0; + + uint32_t APPLY_LOAD_LEDGER_MAX_WRITE_BYTES = 0; + uint32_t APPLY_LOAD_TX_MAX_WRITE_BYTES = 0; + + uint32_t APPLY_LOAD_MAX_TX_SIZE_BYTES = 0; + uint32_t APPLY_LOAD_MAX_LEDGER_TX_SIZE_BYTES = 0; + + uint32_t APPLY_LOAD_MAX_CONTRACT_EVENT_SIZE_BYTES = 0; + uint32_t APPLY_LOAD_MAX_TX_COUNT = 0; + + // Number of read-only and read-write entries in the apply-load + // transactions. Every entry will have + // `APPLY_LOAD_DATA_ENTRY_SIZE_FOR_TESTING` size. + std::vector APPLY_LOAD_NUM_RO_ENTRIES_FOR_TESTING; + std::vector APPLY_LOAD_NUM_RO_ENTRIES_DISTRIBUTION_FOR_TESTING; + std::vector APPLY_LOAD_NUM_RW_ENTRIES_FOR_TESTING; + std::vector APPLY_LOAD_NUM_RW_ENTRIES_DISTRIBUTION_FOR_TESTING; + + // Number of events to generate in the apply-load transactions. + std::vector APPLY_LOAD_EVENT_COUNT_FOR_TESTING; + std::vector APPLY_LOAD_EVENT_COUNT_DISTRIBUTION_FOR_TESTING; + // Waits for merges to complete before applying transactions during catchup bool CATCHUP_WAIT_MERGES_TX_APPLY_FOR_TESTING; diff --git a/src/rust/Cargo.toml b/src/rust/Cargo.toml index 616767e00a..08af378b52 100644 --- a/src/rust/Cargo.toml +++ b/src/rust/Cargo.toml @@ -96,14 +96,12 @@ tracy-client = { version = "=0.17.0", features = [ [dependencies.soroban-test-wasms] version = "=22.0.0" git = "https://github.com/stellar/rs-soroban-env" -rev = "0497816694bef2b103494c8c61b7c8a06a72c7d3" -#path = "../../../rs-soroban-env/soroban-test-wasms" +rev = "a3f7fca9c2ad89796c7525a648da086543502dd5" [dependencies.soroban-synth-wasm] version = "=22.0.0" git = "https://github.com/stellar/rs-soroban-env" -rev = "0497816694bef2b103494c8c61b7c8a06a72c7d3" -#path = "../../../rs-soroban-env/soroban-synth-wasm" +rev = "a3f7fca9c2ad89796c7525a648da086543502dd5" [features] diff --git a/src/simulation/ApplyLoad.cpp b/src/simulation/ApplyLoad.cpp index 7b190a5365..bbf9eb6e08 100644 --- a/src/simulation/ApplyLoad.cpp +++ b/src/simulation/ApplyLoad.cpp @@ -2,6 +2,7 @@ #include +#include "bucket/test/BucketTestUtils.h" #include "herder/Herder.h" #include "ledger/LedgerManager.h" #include "test/TxTests.h" @@ -15,21 +16,80 @@ #include "medida/metrics_registry.h" #include "util/XDRCereal.h" +#include "xdrpp/printer.h" #include namespace stellar { +namespace +{ +SorobanUpgradeConfig +getUpgradeConfig(Config const& cfg) +{ + SorobanUpgradeConfig upgradeConfig; + upgradeConfig.maxContractSizeBytes = 65536; + upgradeConfig.maxContractDataKeySizeBytes = 250; + upgradeConfig.maxContractDataEntrySizeBytes = 65536; + upgradeConfig.ledgerMaxInstructions = + cfg.APPLY_LOAD_LEDGER_MAX_INSTRUCTIONS; + upgradeConfig.txMaxInstructions = cfg.APPLY_LOAD_TX_MAX_INSTRUCTIONS; + upgradeConfig.txMemoryLimit = 41943040; + upgradeConfig.ledgerMaxReadLedgerEntries = + cfg.APPLY_LOAD_LEDGER_MAX_READ_LEDGER_ENTRIES; + upgradeConfig.ledgerMaxReadBytes = cfg.APPLY_LOAD_LEDGER_MAX_READ_BYTES; + upgradeConfig.ledgerMaxWriteLedgerEntries = + cfg.APPLY_LOAD_LEDGER_MAX_WRITE_LEDGER_ENTRIES; + upgradeConfig.ledgerMaxWriteBytes = cfg.APPLY_LOAD_LEDGER_MAX_WRITE_BYTES; + upgradeConfig.ledgerMaxTxCount = cfg.APPLY_LOAD_MAX_TX_COUNT; + upgradeConfig.txMaxReadLedgerEntries = + cfg.APPLY_LOAD_TX_MAX_READ_LEDGER_ENTRIES; + upgradeConfig.txMaxReadBytes = cfg.APPLY_LOAD_TX_MAX_READ_BYTES; + upgradeConfig.txMaxWriteLedgerEntries = + cfg.APPLY_LOAD_TX_MAX_WRITE_LEDGER_ENTRIES; + upgradeConfig.txMaxWriteBytes = cfg.APPLY_LOAD_TX_MAX_WRITE_BYTES; + upgradeConfig.txMaxContractEventsSizeBytes = + cfg.APPLY_LOAD_MAX_CONTRACT_EVENT_SIZE_BYTES; + upgradeConfig.ledgerMaxTransactionsSizeBytes = + cfg.APPLY_LOAD_MAX_LEDGER_TX_SIZE_BYTES; + upgradeConfig.txMaxSizeBytes = cfg.APPLY_LOAD_MAX_TX_SIZE_BYTES; + upgradeConfig.bucketListSizeWindowSampleSize = 30; + upgradeConfig.evictionScanSize = 100000; + upgradeConfig.startingEvictionScanLevel = 7; + // Increase the default TTL and reduce the rent rate in order to avoid the + // state archival and too high rent fees. The apply load test is generally + // not concerned about the resource fees. + upgradeConfig.minPersistentTTL = 1'000'000'000; + upgradeConfig.minTemporaryTTL = 1'000'000'000; + upgradeConfig.maxEntryTTL = 1'000'000'001; + upgradeConfig.persistentRentRateDenominator = 1'000'000'000'000LL; + upgradeConfig.tempRentRateDenominator = 1'000'000'000'000LL; + + // These values are set above using values from Config, so the assertions + // will fail if the config file is missing any of these values. + releaseAssert(upgradeConfig.ledgerMaxInstructions > 0); + releaseAssert(upgradeConfig.txMaxInstructions > 0); + releaseAssert(upgradeConfig.ledgerMaxReadLedgerEntries > 0); + releaseAssert(upgradeConfig.ledgerMaxReadBytes > 0); + releaseAssert(upgradeConfig.ledgerMaxWriteLedgerEntries > 0); + releaseAssert(upgradeConfig.ledgerMaxWriteBytes > 0); + releaseAssert(upgradeConfig.ledgerMaxTxCount > 0); + releaseAssert(upgradeConfig.txMaxReadLedgerEntries > 0); + releaseAssert(upgradeConfig.txMaxReadBytes > 0); + releaseAssert(upgradeConfig.txMaxWriteLedgerEntries > 0); + releaseAssert(upgradeConfig.txMaxWriteBytes > 0); + releaseAssert(upgradeConfig.txMaxContractEventsSizeBytes > 0); + releaseAssert(upgradeConfig.ledgerMaxTransactionsSizeBytes > 0); + releaseAssert(upgradeConfig.txMaxSizeBytes > 0); + return upgradeConfig; +} +} -ApplyLoad::ApplyLoad(Application& app, uint64_t ledgerMaxInstructions, - uint64_t ledgerMaxReadLedgerEntries, - uint64_t ledgerMaxReadBytes, - uint64_t ledgerMaxWriteLedgerEntries, - uint64_t ledgerMaxWriteBytes, uint64_t ledgerMaxTxCount, - uint64_t ledgerMaxTransactionsSizeBytes) +ApplyLoad::ApplyLoad(Application& app) : mTxGenerator(app) , mApp(app) - , mNumAccounts( - ledgerMaxTxCount * SOROBAN_TRANSACTION_QUEUE_SIZE_MULTIPLIER + 1) + , mNumAccounts(mApp.getConfig().APPLY_LOAD_MAX_TX_COUNT * + SOROBAN_TRANSACTION_QUEUE_SIZE_MULTIPLIER + + 1) , mTxCountUtilization( mApp.getMetrics().NewHistogram({"soroban", "benchmark", "tx-count"})) , mInstructionUtilization( @@ -44,47 +104,26 @@ ApplyLoad::ApplyLoad(Application& app, uint64_t ledgerMaxInstructions, {"soroban", "benchmark", "read-entry"})) , mWriteEntryUtilization(mApp.getMetrics().NewHistogram( {"soroban", "benchmark", "write-entry"})) +{ + setup(); +} + +void +ApplyLoad::setup() { auto rootTestAccount = TestAccount::createRoot(mApp); mRoot = std::make_shared(rootTestAccount); releaseAssert(mTxGenerator.loadAccount(mRoot)); - mUpgradeConfig.maxContractSizeBytes = 65536; - mUpgradeConfig.maxContractDataKeySizeBytes = 250; - mUpgradeConfig.maxContractDataEntrySizeBytes = 65536; - mUpgradeConfig.ledgerMaxInstructions = ledgerMaxInstructions; - mUpgradeConfig.txMaxInstructions = 100000000; - mUpgradeConfig.txMemoryLimit = 41943040; - mUpgradeConfig.ledgerMaxReadLedgerEntries = ledgerMaxReadLedgerEntries; - mUpgradeConfig.ledgerMaxReadBytes = ledgerMaxReadBytes; - mUpgradeConfig.ledgerMaxWriteLedgerEntries = ledgerMaxWriteLedgerEntries; - mUpgradeConfig.ledgerMaxWriteBytes = ledgerMaxWriteBytes; - mUpgradeConfig.ledgerMaxTxCount = ledgerMaxTxCount; - mUpgradeConfig.txMaxReadLedgerEntries = 100; - mUpgradeConfig.txMaxReadBytes = 200000; - mUpgradeConfig.txMaxWriteLedgerEntries = 50; - mUpgradeConfig.txMaxWriteBytes = 66560; - mUpgradeConfig.txMaxContractEventsSizeBytes = 8198; - mUpgradeConfig.ledgerMaxTransactionsSizeBytes = - ledgerMaxTransactionsSizeBytes; - mUpgradeConfig.txMaxSizeBytes = 71680; - mUpgradeConfig.bucketListSizeWindowSampleSize = 30; - mUpgradeConfig.evictionScanSize = 100000; - mUpgradeConfig.startingEvictionScanLevel = 7; - - setupAccountsAndUpgradeProtocol(); + setupAccounts(); setupUpgradeContract(); upgradeSettings(); - setupLoadContracts(); - - // One contract per account - releaseAssert(mTxGenerator.getApplySorobanSuccess().count() == - mNumAccounts + 4); - releaseAssert(mTxGenerator.getApplySorobanFailure().count() == 0); + setupLoadContract(); + setupBucketList(); } void @@ -101,7 +140,7 @@ ApplyLoad::closeLedger(std::vector const& txs, } void -ApplyLoad::setupAccountsAndUpgradeProtocol() +ApplyLoad::setupAccounts() { auto const& lm = mApp.getLedgerManager(); // pass in false for initialAccounts so we fund new account with a lower @@ -167,8 +206,9 @@ void ApplyLoad::upgradeSettings() { auto const& lm = mApp.getLedgerManager(); + auto upgradeConfig = getUpgradeConfig(mApp.getConfig()); auto upgradeBytes = - mTxGenerator.getConfigUpgradeSetFromLoadConfig(mUpgradeConfig); + mTxGenerator.getConfigUpgradeSetFromLoadConfig(upgradeConfig); SorobanResources resources; resources.instructions = 1'250'000; @@ -180,7 +220,7 @@ ApplyLoad::upgradeSettings() mUpgradeInstanceKey, std::nullopt, resources); auto upgradeSetKey = mTxGenerator.getConfigUpgradeSetKey( - mUpgradeConfig, + upgradeConfig, mUpgradeInstanceKey.contractData().contract.contractId()); auto upgrade = xdr::xvector{}; @@ -193,7 +233,7 @@ ApplyLoad::upgradeSettings() } void -ApplyLoad::setupLoadContracts() +ApplyLoad::setupLoadContract() { auto wasm = rust_bridge::get_test_wasm_loadgen(); xdr::opaque_vec<> wasmBytes; @@ -205,6 +245,9 @@ ApplyLoad::setupLoadContracts() mLoadCodeKey = contractCodeLedgerKey; + int64_t currApplySorobanSuccess = + mTxGenerator.getApplySorobanSuccess().count(); + auto const& lm = mApp.getLedgerManager(); auto uploadTx = mTxGenerator.createUploadWasmTransaction( lm.getLastClosedLedgerNum() + 1, 0, wasmBytes, contractCodeLedgerKey, @@ -212,24 +255,126 @@ ApplyLoad::setupLoadContracts() closeLedger({uploadTx.second}); - for (auto const& kvp : mTxGenerator.getAccounts()) - { - auto salt = sha256("Load contract " + std::to_string(kvp.first)); + auto salt = sha256("Load contract"); + + auto createTx = mTxGenerator.createContractTransaction( + lm.getLastClosedLedgerNum() + 1, 0, contractCodeLedgerKey, + wasmBytes.size() + 160, salt, std::nullopt); + closeLedger({createTx.second}); - auto createTx = mTxGenerator.createContractTransaction( - lm.getLastClosedLedgerNum() + 1, 0, contractCodeLedgerKey, - wasmBytes.size() + 160, salt, std::nullopt); - closeLedger({createTx.second}); + releaseAssert(mTxGenerator.getApplySorobanSuccess().count() - + currApplySorobanSuccess == + 2); + releaseAssert(mTxGenerator.getApplySorobanFailure().count() == 0); - auto instanceKey = - createTx.second->sorobanResources().footprint.readWrite.back(); + auto instanceKey = + createTx.second->sorobanResources().footprint.readWrite.back(); + + mLoadInstance.readOnlyKeys.emplace_back(mLoadCodeKey); + mLoadInstance.readOnlyKeys.emplace_back(instanceKey); + mLoadInstance.contractID = instanceKey.contractData().contract; + mLoadInstance.contractEntriesSize = + footprintSize(mApp, mLoadInstance.readOnlyKeys); +} - TxGenerator::ContractInstance instance; - instance.readOnlyKeys.emplace_back(mLoadCodeKey); - instance.readOnlyKeys.emplace_back(instanceKey); - instance.contractID = instanceKey.contractData().contract; - mLoadInstances.emplace(kvp.first, instance); +void +ApplyLoad::setupBucketList() +{ + auto lh = mApp.getLedgerManager().getLastClosedLedgerHeader().header; + auto& bl = mApp.getBucketManager().getLiveBucketList(); + auto const& cfg = mApp.getConfig(); + + uint64_t currentKey = 0; + + LedgerEntry baseLe; + baseLe.data.type(CONTRACT_DATA); + baseLe.data.contractData().contract = mLoadInstance.contractID; + baseLe.data.contractData().key.type(SCV_U64); + baseLe.data.contractData().key.u64() = 0; + baseLe.data.contractData().durability = ContractDataDurability::PERSISTENT; + baseLe.data.contractData().val.type(SCV_BYTES); + mDataEntrySize = xdr::xdr_size(baseLe); + // Add some padding to reach the configured LE size. + if (mDataEntrySize < + mApp.getConfig().APPLY_LOAD_DATA_ENTRY_SIZE_FOR_TESTING) + { + baseLe.data.contractData().val.bytes().resize( + mApp.getConfig().APPLY_LOAD_DATA_ENTRY_SIZE_FOR_TESTING - + mDataEntrySize); + mDataEntrySize = + mApp.getConfig().APPLY_LOAD_DATA_ENTRY_SIZE_FOR_TESTING; + } + else + { + CLOG_WARNING(Perf, + "Apply load generated entry size is larger than " + "APPLY_LOAD_DATA_ENTRY_SIZE_FOR_TESTING: {} > {}", + mApp.getConfig().APPLY_LOAD_DATA_ENTRY_SIZE_FOR_TESTING, + mDataEntrySize); + } + + for (uint32_t i = 0; i < cfg.APPLY_LOAD_BL_SIMULATED_LEDGERS; ++i) + { + if (i % 1000 == 0) + { + CLOG_INFO(Bucket, "Generating BL ledger {}, levels thus far", i); + for (uint32_t j = 0; j < LiveBucketList::kNumLevels; ++j) + { + auto const& lev = bl.getLevel(j); + auto currSz = BucketTestUtils::countEntries(lev.getCurr()); + auto snapSz = BucketTestUtils::countEntries(lev.getSnap()); + CLOG_INFO(Bucket, "Level {}: {} = {} + {}", j, currSz + snapSz, + currSz, snapSz); + } + } + lh.ledgerSeq++; + std::vector initEntries; + bool isLastBatch = i >= cfg.APPLY_LOAD_BL_SIMULATED_LEDGERS - + cfg.APPLY_LOAD_BL_LAST_BATCH_LEDGERS; + if (i % cfg.APPLY_LOAD_BL_WRITE_FREQUENCY == 0 || isLastBatch) + { + uint32_t entryCount = isLastBatch + ? cfg.APPLY_LOAD_BL_LAST_BATCH_SIZE + : cfg.APPLY_LOAD_BL_BATCH_SIZE; + for (uint32_t j = 0; j < entryCount; j++) + { + LedgerEntry le = baseLe; + le.lastModifiedLedgerSeq = lh.ledgerSeq; + le.data.contractData().key.u64() = currentKey++; + initEntries.push_back(le); + + LedgerEntry ttlEntry; + ttlEntry.data.type(TTL); + ttlEntry.lastModifiedLedgerSeq = lh.ledgerSeq; + ttlEntry.data.ttl().keyHash = xdrSha256(LedgerEntryKey(le)); + ttlEntry.data.ttl().liveUntilLedgerSeq = 1'000'000'000; + initEntries.push_back(ttlEntry); + } + } + bl.addBatch(mApp, lh.ledgerSeq, lh.ledgerVersion, initEntries, {}, {}); + } + lh.ledgerSeq++; + mDataEntryCount = currentKey; + CLOG_INFO(Bucket, "Final generated bucket list levels"); + for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i) + { + auto const& lev = bl.getLevel(i); + auto currSz = BucketTestUtils::countEntries(lev.getCurr()); + auto snapSz = BucketTestUtils::countEntries(lev.getSnap()); + CLOG_INFO(Bucket, "Level {}: {} = {} + {}", i, currSz + snapSz, currSz, + snapSz); + } + mApp.getBucketManager().snapshotLedger(lh); + { + LedgerTxn ltx(mApp.getLedgerTxnRoot()); + ltx.loadHeader().current() = lh; + mApp.getLedgerManager().manuallyAdvanceLedgerHeader( + ltx.loadHeader().current()); + ltx.commit(); } + mApp.getLedgerManager().storeCurrentLedgerForTest(lh); + mApp.getHerder().forceSCPStateIntoSyncWithLastClosedLedger(); + closeLedger({}, {}); } void @@ -256,12 +401,9 @@ ApplyLoad::benchmark() auto it = accounts.find(accountIndex); releaseAssert(it != accounts.end()); - auto instanceIter = mLoadInstances.find(it->first); - releaseAssert(instanceIter != mLoadInstances.end()); - auto const& instance = instanceIter->second; - auto tx = mTxGenerator.invokeSorobanLoadTransaction( - lm.getLastClosedLedgerNum() + 1, it->first, instance, - rust_bridge::get_write_bytes().data.size() + 160, std::nullopt); + auto tx = mTxGenerator.invokeSorobanLoadTransactionV2( + lm.getLastClosedLedgerNum() + 1, it->first, mLoadInstance, + mDataEntryCount, mDataEntrySize, 1'000'000); { LedgerTxn ltx(mApp.getLedgerTxnRoot()); diff --git a/src/simulation/ApplyLoad.h b/src/simulation/ApplyLoad.h index 9630bca290..08ec773813 100644 --- a/src/simulation/ApplyLoad.h +++ b/src/simulation/ApplyLoad.h @@ -14,11 +14,7 @@ namespace stellar class ApplyLoad { public: - ApplyLoad(Application& app, uint64_t ledgerMaxInstructions, - uint64_t ledgerMaxReadLedgerEntries, uint64_t ledgerMaxReadBytes, - uint64_t ledgerMaxWriteLedgerEntries, - uint64_t ledgerMaxWriteBytes, uint64_t ledgerMaxTxCount, - uint64_t ledgerMaxTransactionsSizeBytes); + ApplyLoad(Application& app); // Fills up a list of transactions with // SOROBAN_TRANSACTION_QUEUE_SIZE_MULTIPLIER * the max ledger resources @@ -48,9 +44,12 @@ class ApplyLoad void closeLedger(std::vector const& txs, xdr::xvector const& upgrades = {}); - void setupAccountsAndUpgradeProtocol(); + void setup(); + + void setupAccounts(); void setupUpgradeContract(); - void setupLoadContracts(); + void setupLoadContract(); + void setupBucketList(); // Upgrades using mUpgradeConfig void upgradeSettings(); @@ -59,9 +58,9 @@ class ApplyLoad LedgerKey mUpgradeInstanceKey; LedgerKey mLoadCodeKey; - UnorderedMap mLoadInstances; - - SorobanUpgradeConfig mUpgradeConfig; + TxGenerator::ContractInstance mLoadInstance; + size_t mDataEntryCount = 0; + size_t mDataEntrySize = 0; TxGenerator mTxGenerator; Application& mApp; diff --git a/src/simulation/TxGenerator.cpp b/src/simulation/TxGenerator.cpp index b5e938baa3..f00578628b 100644 --- a/src/simulation/TxGenerator.cpp +++ b/src/simulation/TxGenerator.cpp @@ -3,6 +3,7 @@ #include "ledger/LedgerManager.h" #include "transactions/TransactionBridge.h" #include "transactions/test/SorobanTxTestUtils.h" +#include #include namespace stellar @@ -38,6 +39,22 @@ sampleDiscrete(std::vector const& values, } } // namespace +uint64_t +footprintSize(Application& app, xdr::xvector const& keys) +{ + LedgerSnapshot lsg(app); + uint64_t total = 0; + for (auto const& key : keys) + { + auto entry = lsg.load(key); + if (entry) + { + total += xdr::xdr_size(entry.current()); + } + } + return total; +} + TxGenerator::TxGenerator(Application& app) : mApp(app) , mMinBalance(0) @@ -90,22 +107,6 @@ TxGenerator::generateFee(std::optional maxGeneratedFeeRate, return fee; } -uint64_t -TxGenerator::bytesToRead(xdr::xvector const& keys) -{ - LedgerSnapshot lsg(mApp); - uint64_t total = 0; - for (auto const& key : keys) - { - auto entry = lsg.load(key); - if (entry) - { - total += xdr::xdr_size(entry.current()); - } - } - return total; -} - bool TxGenerator::loadAccount(TestAccount& account) { @@ -450,10 +451,9 @@ TxGenerator::invokeSorobanLoadTransaction( instructionsPerPaddingByte * paddingBytes; // Pick random number of cycles between bounds - uint64_t targetInstructions = - sampleDiscrete(appCfg.LOADGEN_INSTRUCTIONS_FOR_TESTING, - appCfg.LOADGEN_INSTRUCTIONS_DISTRIBUTION_FOR_TESTING, - DEFAULT_INSTRUCTIONS); + uint64_t targetInstructions = sampleDiscrete( + appCfg.LOADGEN_INSTRUCTIONS_FOR_TESTING, + appCfg.LOADGEN_INSTRUCTIONS_DISTRIBUTION_FOR_TESTING, 0u); // Factor in instructions for storage targetInstructions = baseInstructionCount + instructionsForStorageAndAuth >= @@ -484,8 +484,8 @@ TxGenerator::invokeSorobanLoadTransaction( ihf.invokeContract().args = {guestCyclesU64, hostCyclesU64, numEntriesU32, kiloBytesPerEntryU32}; - resources.readBytes = bytesToRead(resources.footprint.readOnly) + - bytesToRead(resources.footprint.readWrite); + resources.readBytes = footprintSize(mApp, resources.footprint.readOnly) + + footprintSize(mApp, resources.footprint.readWrite); resources.writeBytes = totalWriteBytes; increaseOpSize(op, paddingBytes); @@ -513,6 +513,159 @@ TxGenerator::invokeSorobanLoadTransaction( return std::make_pair(account, tx); } +std::pair +TxGenerator::invokeSorobanLoadTransactionV2( + uint32_t ledgerNum, uint64_t accountId, ContractInstance const& instance, + uint64_t dataEntryCount, size_t dataEntrySize, + std::optional maxGeneratedFeeRate) +{ + auto const& appCfg = mApp.getConfig(); + + // The estimates below are fairly tight as they depend on linear + // functions (maybe with a small constant factor as well). + uint32_t const baseInstructionCount = 737'119; + uint32_t const baselineTxSizeBytes = 256; + uint32_t const eventSize = 80; + uint32_t const instructionsPerGuestCycle = 40; + uint32_t const instructionsPerHostCycle = 4'875; + uint32_t const instructionsPerAuthByte = 35; + uint32_t const instructionsPerEvent = 8'500; + + SorobanResources resources; + resources.footprint.readOnly = instance.readOnlyKeys; + uint32_t roEntries = sampleDiscrete( + appCfg.APPLY_LOAD_NUM_RO_ENTRIES_FOR_TESTING, + appCfg.APPLY_LOAD_NUM_RO_ENTRIES_DISTRIBUTION_FOR_TESTING, 0u); + uint32_t rwEntries = sampleDiscrete( + appCfg.APPLY_LOAD_NUM_RW_ENTRIES_FOR_TESTING, + appCfg.APPLY_LOAD_NUM_RW_ENTRIES_DISTRIBUTION_FOR_TESTING, 0u); + + releaseAssert(dataEntryCount > roEntries + rwEntries); + if (roEntries >= instance.readOnlyKeys.size()) + { + roEntries -= instance.readOnlyKeys.size(); + } + else + { + roEntries = 0; + } + std::unordered_set usedEntries; + stellar::uniform_int_distribution entryDist(0, + dataEntryCount - 1); + auto generateEntries = [&](uint32_t entryCount, + xdr::xvector& footprint) { + for (uint32_t i = 0; i < entryCount; ++i) + { + uint64_t entryId = entryDist(gRandomEngine); + if (usedEntries.emplace(entryId).second) + { + auto lk = contractDataKey(instance.contractID, makeU64(entryId), + ContractDataDurability::PERSISTENT); + footprint.emplace_back(lk); + } + else + { + --i; + } + } + }; + generateEntries(roEntries, resources.footprint.readOnly); + generateEntries(rwEntries, resources.footprint.readWrite); + + uint32_t txOverheadBytes = baselineTxSizeBytes + xdr::xdr_size(resources); + uint32_t desiredTxBytes = sampleDiscrete( + appCfg.LOADGEN_TX_SIZE_BYTES_FOR_TESTING, + appCfg.LOADGEN_TX_SIZE_BYTES_DISTRIBUTION_FOR_TESTING, 0u); + uint32_t paddingBytes = + txOverheadBytes > desiredTxBytes ? 0 : desiredTxBytes - txOverheadBytes; + uint32_t entriesSize = dataEntrySize * (roEntries + rwEntries); + + uint32_t eventCount = sampleDiscrete( + appCfg.APPLY_LOAD_EVENT_COUNT_FOR_TESTING, + appCfg.APPLY_LOAD_EVENT_COUNT_DISTRIBUTION_FOR_TESTING, 0u); + + // Pick random number of cycles between bounds + uint32_t targetInstructions = sampleDiscrete( + appCfg.LOADGEN_INSTRUCTIONS_FOR_TESTING, + appCfg.LOADGEN_INSTRUCTIONS_DISTRIBUTION_FOR_TESTING, 0u); + + auto numEntries = (roEntries + rwEntries + instance.readOnlyKeys.size()); + + // The entry encoding estimates are somewhat loose because we're + // unfortunately building storage with O(n^2) complexity. + + // Figuring out the number of instructions for storage is difficult because + // we build storage with O(n^2) complexity, so instead, I graphed the + // instruction count provided in the diagnostics as the invocation starts + // against different entry counts, and got the equation below from that. The + // estimate is pretty close (usually off by 50,000 to 200,000 instructions). + // + // The instructionsPerEntryByte should probably be taken into account here + // but I left the linear calculation for that because the estimate is + // already close. + uint32_t instructionsForEntries = + (205 * std::pow(numEntries, 2)) + (12000 * numEntries) + 65485; + + uint32_t const instructionsPerEntryByte = 44; + + uint32_t instructionsWithoutCpuLoad = + baseInstructionCount + instructionsPerAuthByte * paddingBytes + + instructionsPerEntryByte * entriesSize + instructionsForEntries + + eventCount * instructionsPerEvent; + if (targetInstructions > instructionsWithoutCpuLoad) + { + targetInstructions -= instructionsWithoutCpuLoad; + } + else + { + targetInstructions = 0; + } + + // Instead of mixing both guest and host cycles using the commented out code + // above, we just use guestCycles because there's an issue with how the U256 + // add host function is modeled in the host. The instruction count is + // greatly overestimated relative to actual time spent. + uint64_t guestCycles = targetInstructions / instructionsPerGuestCycle; + targetInstructions -= guestCycles * instructionsPerGuestCycle; + uint64_t hostCycles = 0; + + Operation op; + op.body.type(INVOKE_HOST_FUNCTION); + auto& ihf = op.body.invokeHostFunctionOp().hostFunction; + ihf.type(HOST_FUNCTION_TYPE_INVOKE_CONTRACT); + ihf.invokeContract().contractAddress = instance.contractID; + ihf.invokeContract().functionName = "do_cpu_only_work"; + ihf.invokeContract().args = {makeU32(guestCycles), makeU32(hostCycles), + makeU32(eventCount)}; + resources.writeBytes = dataEntrySize * rwEntries; + resources.readBytes = dataEntrySize * roEntries + + instance.contractEntriesSize + resources.writeBytes; + + increaseOpSize(op, paddingBytes); + + resources.instructions = instructionsWithoutCpuLoad + + hostCycles * instructionsPerHostCycle + + guestCycles * instructionsPerGuestCycle; + + auto resourceFee = + sorobanResourceFee(mApp, resources, txOverheadBytes + paddingBytes, + eventSize * eventCount); + resourceFee += 1'000'000; + + // A tx created using this method may be discarded when creating the txSet, + // so we need to refresh the TestAccount sequence number to avoid a + // txBAD_SEQ. + auto account = findAccount(accountId, ledgerNum); + account->loadSequenceNumber(); + + auto tx = sorobanTransactionFrameFromOps(mApp.getNetworkID(), *account, + {op}, {}, resources, + generateFee(maxGeneratedFeeRate, + /* opsCnt */ 1), + resourceFee); + return std::make_pair(account, tx); +} + std::map const& TxGenerator::getAccounts() { @@ -922,4 +1075,4 @@ TxGenerator::pretendTransaction(uint32_t numAccounts, uint32_t offset, acc, createTransactionFramePtr(acc, ops, true, maxGeneratedFeeRate)); } -} \ No newline at end of file +} diff --git a/src/simulation/TxGenerator.h b/src/simulation/TxGenerator.h index ed21ab0dec..267c9341e3 100644 --- a/src/simulation/TxGenerator.h +++ b/src/simulation/TxGenerator.h @@ -10,6 +10,10 @@ class Counter; } namespace stellar { +// Calculates total size we'll need to read for all specified keys +uint64_t footprintSize(Application& app, + xdr::xvector const& keys); + // Config settings for SOROBAN_CREATE_UPGRADE struct SorobanUpgradeConfig { @@ -62,6 +66,7 @@ class TxGenerator // [wasm, instance] xdr::xvector readOnlyKeys; SCAddress contractID; + uint32_t contractEntriesSize = 0; }; using TestAccountPtr = std::shared_ptr; @@ -111,6 +116,12 @@ class TxGenerator uint64_t contractOverheadBytes, std::optional maxGeneratedFeeRate); std::pair + invokeSorobanLoadTransactionV2(uint32_t ledgerNum, uint64_t accountId, + ContractInstance const& instance, + uint64_t dataEntryCount, + size_t dataEntrySize, + std::optional maxGeneratedFeeRate); + std::pair invokeSorobanCreateUpgradeTransaction( uint32_t ledgerNum, uint64_t accountId, SCBytes const& upgradeBytes, LedgerKey const& codeKey, LedgerKey const& instanceKey, @@ -149,9 +160,6 @@ class TxGenerator private: std::pair sorobanRandomUploadResources(); - // Calculates total size we'll need to read for all specified keys - uint64_t bytesToRead(xdr::xvector const& keys); - void updateMinBalance(); Application& mApp; @@ -166,4 +174,4 @@ class TxGenerator medida::Counter const& mApplySorobanFailure; }; -} \ No newline at end of file +} diff --git a/src/simulation/test/LoadGeneratorTests.cpp b/src/simulation/test/LoadGeneratorTests.cpp index 11d16cd437..5cc05ee820 100644 --- a/src/simulation/test/LoadGeneratorTests.cpp +++ b/src/simulation/test/LoadGeneratorTests.cpp @@ -846,33 +846,58 @@ TEST_CASE("apply load", "[loadgen][applyload]") cfg.TESTING_UPGRADE_MAX_TX_SET_SIZE = 1000; cfg.USE_CONFIG_FOR_GENESIS = true; cfg.LEDGER_PROTOCOL_VERSION = Config::CURRENT_LEDGER_PROTOCOL_VERSION; + cfg.MANUAL_CLOSE = true; + + cfg.APPLY_LOAD_DATA_ENTRY_SIZE_FOR_TESTING = 1000; + + cfg.APPLY_LOAD_BL_SIMULATED_LEDGERS = 10000; + cfg.APPLY_LOAD_BL_WRITE_FREQUENCY = 1000; + cfg.APPLY_LOAD_BL_BATCH_SIZE = 1000; + cfg.APPLY_LOAD_BL_LAST_BATCH_LEDGERS = 300; + cfg.APPLY_LOAD_BL_LAST_BATCH_SIZE = 100; + + cfg.APPLY_LOAD_NUM_RO_ENTRIES_FOR_TESTING = {5, 10, 30}; + cfg.APPLY_LOAD_NUM_RO_ENTRIES_DISTRIBUTION_FOR_TESTING = {1, 1, 1}; + + cfg.APPLY_LOAD_NUM_RW_ENTRIES_FOR_TESTING = {1, 5, 10}; + cfg.APPLY_LOAD_NUM_RW_ENTRIES_DISTRIBUTION_FOR_TESTING = {1, 1, 1}; + + cfg.APPLY_LOAD_EVENT_COUNT_FOR_TESTING = {100}; + cfg.APPLY_LOAD_EVENT_COUNT_DISTRIBUTION_FOR_TESTING = {1}; - cfg.LOADGEN_NUM_DATA_ENTRIES_FOR_TESTING = {5, 10, 30}; - cfg.LOADGEN_NUM_DATA_ENTRIES_DISTRIBUTION_FOR_TESTING = {1, 1, 1}; - cfg.LOADGEN_IO_KILOBYTES_FOR_TESTING = {1, 5, 10}; - cfg.LOADGEN_IO_KILOBYTES_DISTRIBUTION_FOR_TESTING = {10, 2, 1}; cfg.LOADGEN_TX_SIZE_BYTES_FOR_TESTING = {1'000, 2'000, 5'000}; cfg.LOADGEN_TX_SIZE_BYTES_DISTRIBUTION_FOR_TESTING = {3, 2, 1}; + cfg.LOADGEN_INSTRUCTIONS_FOR_TESTING = {10'000'000, 50'000'000}; cfg.LOADGEN_INSTRUCTIONS_DISTRIBUTION_FOR_TESTING = {5, 1}; + cfg.APPLY_LOAD_LEDGER_MAX_INSTRUCTIONS = 500'000'000; + cfg.APPLY_LOAD_TX_MAX_INSTRUCTIONS = 100'000'000; + + cfg.APPLY_LOAD_LEDGER_MAX_READ_LEDGER_ENTRIES = 2000; + cfg.APPLY_LOAD_TX_MAX_READ_LEDGER_ENTRIES = 100; + + cfg.APPLY_LOAD_LEDGER_MAX_READ_BYTES = 50'000'000; + cfg.APPLY_LOAD_TX_MAX_READ_BYTES = 200'000; + + cfg.APPLY_LOAD_LEDGER_MAX_WRITE_LEDGER_ENTRIES = 1250; + cfg.APPLY_LOAD_TX_MAX_WRITE_LEDGER_ENTRIES = 50; + + cfg.APPLY_LOAD_LEDGER_MAX_WRITE_BYTES = 700'000; + cfg.APPLY_LOAD_TX_MAX_WRITE_BYTES = 66560; + + cfg.APPLY_LOAD_MAX_TX_SIZE_BYTES = 71680; + cfg.APPLY_LOAD_MAX_LEDGER_TX_SIZE_BYTES = 800'000; + + cfg.APPLY_LOAD_MAX_CONTRACT_EVENT_SIZE_BYTES = 8198; + cfg.APPLY_LOAD_MAX_TX_COUNT = 50; + REQUIRE(cfg.isUsingBucketListDB()); VirtualClock clock(VirtualClock::REAL_TIME); auto app = createTestApplication(clock, cfg); - uint64_t ledgerMaxInstructions = 500'000'000; - uint64_t ledgerMaxReadLedgerEntries = 2000; - uint64_t ledgerMaxReadBytes = 50'000'000; - uint64_t ledgerMaxWriteLedgerEntries = 1250; - uint64_t ledgerMaxWriteBytes = 700'000; - uint64_t ledgerMaxTxCount = 50; - uint64_t ledgerMaxTransactionsSizeBytes = 800'000; - - ApplyLoad al(*app, ledgerMaxInstructions, ledgerMaxReadLedgerEntries, - ledgerMaxReadBytes, ledgerMaxWriteLedgerEntries, - ledgerMaxWriteBytes, ledgerMaxTxCount, - ledgerMaxTransactionsSizeBytes); + ApplyLoad al(*app); auto& ledgerClose = app->getMetrics().NewTimer({"ledger", "ledger", "close"}); @@ -885,6 +910,11 @@ TEST_CASE("apply load", "[loadgen][applyload]") auto& cpuInsRatioExclVm = app->getMetrics().NewHistogram( {"soroban", "host-fn-op", "invoke-time-fsecs-cpu-insn-ratio-excl-vm"}); cpuInsRatioExclVm.Clear(); + + auto& declaredInsnsUsageRatio = app->getMetrics().NewHistogram( + {"soroban", "host-fn-op", "declared-cpu-insns-usage-ratio"}); + declaredInsnsUsageRatio.Clear(); + for (size_t i = 0; i < 100; ++i) { app->getBucketManager().getLiveBucketList().resolveAllFutures(); @@ -893,7 +923,7 @@ TEST_CASE("apply load", "[loadgen][applyload]") al.benchmark(); } - REQUIRE(al.successRate() - 1.0 < std::numeric_limits::epsilon()); + REQUIRE(1.0 - al.successRate() < std::numeric_limits::epsilon()); CLOG_INFO(Perf, "Max ledger close: {} milliseconds", ledgerClose.max()); CLOG_INFO(Perf, "Min ledger close: {} milliseconds", ledgerClose.min()); CLOG_INFO(Perf, "Mean ledger close: {} milliseconds", ledgerClose.mean()); @@ -907,9 +937,16 @@ TEST_CASE("apply load", "[loadgen][applyload]") cpuInsRatioExclVm.max() / 1000000); CLOG_INFO(Perf, "Mean CPU ins ratio excl VM: {}", cpuInsRatioExclVm.mean() / 1000000); - CLOG_INFO(Perf, "STDDEV CPU ins ratio excl VM: {}", + CLOG_INFO(Perf, "stddev CPU ins ratio excl VM: {}", cpuInsRatioExclVm.std_dev() / 1000000); + CLOG_INFO(Perf, "Min CPU declared insns ratio: {}", + declaredInsnsUsageRatio.min() / 1000000.0); + CLOG_INFO(Perf, "Mean CPU declared insns ratio: {}", + declaredInsnsUsageRatio.mean() / 1000000.0); + CLOG_INFO(Perf, "stddev CPU declared insns ratio: {}", + declaredInsnsUsageRatio.std_dev() / 1000000.0); + CLOG_INFO(Perf, "Tx count utilization {}%", al.getTxCountUtilization().mean() / 1000.0); CLOG_INFO(Perf, "Instruction utilization {}%", diff --git a/src/transactions/InvokeHostFunctionOpFrame.cpp b/src/transactions/InvokeHostFunctionOpFrame.cpp index 28a6ec1f83..b8ca78472f 100644 --- a/src/transactions/InvokeHostFunctionOpFrame.cpp +++ b/src/transactions/InvokeHostFunctionOpFrame.cpp @@ -132,6 +132,7 @@ struct HostFunctionMetrics uint64_t mInvokeTimeNsecs{0}; uint64_t mCpuInsnExclVm{0}; uint64_t mInvokeTimeNsecsExclVm{0}; + uint64_t mDeclaredCpuInsn{0}; // max single entity size metrics uint32_t mMaxReadWriteKeyByte{0}; @@ -213,6 +214,8 @@ struct HostFunctionMetrics mMetrics.mHostFnOpInvokeTimeFsecsCpuInsnRatioExclVm.Update( mInvokeTimeNsecsExclVm * 1000000 / std::max(mCpuInsnExclVm, uint64_t(1))); + mMetrics.mHostFnOpDeclaredInsnsUsageRatio.Update( + mCpuInsn * 1000000 / std::max(mDeclaredCpuInsn, uint64_t(1))); mMetrics.mHostFnOpMaxRwKeyByte.Mark(mMaxReadWriteKeyByte); mMetrics.mHostFnOpMaxRwDataByte.Mark(mMaxReadWriteDataByte); @@ -337,6 +340,8 @@ InvokeHostFunctionOpFrame::doApply( rust::Vec ttlEntryCxxBufs; auto const& resources = mParentTx.sorobanResources(); + metrics.mDeclaredCpuInsn = resources.instructions; + auto const& footprint = resources.footprint; auto footprintLength = footprint.readOnly.size() + footprint.readWrite.size(); From 4ade907cefcc3a1978508cd634d92c7a0518c33c Mon Sep 17 00:00:00 2001 From: Garand Tyson Date: Wed, 27 Nov 2024 10:25:06 -0800 Subject: [PATCH 03/17] Removed SQL from test requiring direct commits via ltx --- docs/integration.md | 2 +- docs/quick-reference.md | 40 -- docs/software/commands.md | 6 - docs/stellar-core_example.cfg | 13 - src/bucket/BucketManager.cpp | 15 +- src/bucket/BucketManager.h | 1 - src/bucket/test/BucketListTests.cpp | 2 +- src/bucket/test/BucketManagerTests.cpp | 2 +- src/bucket/test/BucketTests.cpp | 2 +- src/catchup/ApplyBucketsWork.cpp | 14 +- src/catchup/ApplyBucketsWork.h | 2 + src/database/test/DatabaseTests.cpp | 8 +- src/herder/test/HerderTests.cpp | 73 +-- src/herder/test/UpgradesTests.cpp | 12 +- src/history/test/HistoryTests.cpp | 14 +- src/history/test/HistoryTestsUtils.h | 2 +- .../BucketListIsConsistentWithDatabase.cpp | 235 ++------ .../BucketListIsConsistentWithDatabase.h | 2 +- src/invariant/Invariant.h | 4 +- src/invariant/InvariantManager.h | 9 +- src/invariant/InvariantManagerImpl.cpp | 14 +- src/invariant/InvariantManagerImpl.h | 3 +- .../AccountSubEntriesCountIsValidTests.cpp | 4 +- ...ucketListIsConsistentWithDatabaseTests.cpp | 518 ++++------------ .../test/ConservationOfLumensTests.cpp | 6 +- src/invariant/test/InvariantTests.cpp | 16 +- .../test/LedgerEntryIsValidTests.cpp | 4 +- .../test/LiabilitiesMatchOffersTests.cpp | 14 +- .../test/OrderBookIsNotCrossedTests.cpp | 4 +- .../test/SponsorshipCountIsValidTests.cpp | 2 +- src/ledger/InMemoryLedgerTxn.cpp | 116 +++- src/ledger/InMemoryLedgerTxn.h | 26 +- src/ledger/LedgerManagerImpl.cpp | 65 +- src/ledger/LedgerStateSnapshot.cpp | 6 +- src/ledger/LedgerTxn.cpp | 7 + .../test/LedgerCloseMetaStreamTests.cpp | 237 +------- src/ledger/test/LedgerTxnTests.cpp | 555 +++--------------- src/main/Application.h | 5 - src/main/ApplicationImpl.cpp | 268 ++++----- src/main/ApplicationImpl.h | 4 +- src/main/ApplicationUtils.cpp | 140 +---- src/main/ApplicationUtils.h | 4 +- src/main/CommandLine.cpp | 87 +-- src/main/Config.cpp | 30 +- src/main/Config.h | 37 +- src/main/test/ApplicationUtilsTests.cpp | 263 +-------- src/main/test/ExternalQueueTests.cpp | 44 -- src/overlay/test/OverlayTests.cpp | 4 +- src/simulation/Simulation.cpp | 5 +- src/simulation/Simulation.h | 5 +- src/test/TestUtils.cpp | 21 +- src/test/TestUtils.h | 3 +- src/test/test.cpp | 32 +- src/transactions/test/AllowTrustTests.cpp | 4 +- src/transactions/test/BumpSequenceTests.cpp | 2 +- src/transactions/test/ChangeTrustTests.cpp | 4 +- .../test/ClaimableBalanceTests.cpp | 2 +- .../test/ClawbackClaimableBalanceTests.cpp | 2 +- src/transactions/test/ClawbackTests.cpp | 2 +- src/transactions/test/CreateAccountTests.cpp | 2 +- .../test/EndSponsoringFutureReservesTests.cpp | 2 +- .../test/FeeBumpTransactionTests.cpp | 2 +- src/transactions/test/InflationTests.cpp | 2 +- .../test/InvokeHostFunctionTests.cpp | 2 +- .../test/LiquidityPoolDepositTests.cpp | 2 +- .../test/LiquidityPoolTradeTests.cpp | 2 +- .../test/LiquidityPoolWithdrawTests.cpp | 2 +- src/transactions/test/ManageBuyOfferTests.cpp | 16 +- src/transactions/test/ManageDataTests.cpp | 2 +- src/transactions/test/MergeTests.cpp | 2 +- src/transactions/test/OfferTests.cpp | 2 +- .../test/PathPaymentStrictSendTests.cpp | 4 +- src/transactions/test/PathPaymentTests.cpp | 2 +- src/transactions/test/PaymentTests.cpp | 12 +- .../test/RevokeSponsorshipTests.cpp | 2 +- src/transactions/test/SetOptionsTests.cpp | 2 +- .../test/SetTrustLineFlagsTests.cpp | 4 +- src/transactions/test/TxEnvelopeTests.cpp | 2 +- 78 files changed, 797 insertions(+), 2292 deletions(-) delete mode 100644 src/main/test/ExternalQueueTests.cpp diff --git a/docs/integration.md b/docs/integration.md index 8140017fb3..deb6147fc3 100644 --- a/docs/integration.md +++ b/docs/integration.md @@ -19,7 +19,7 @@ stellar-core generates several types of data that can be used by applications, d Full [Ledger](ledger.md) snapshots are available in both: * [history archives](history.md) (checkpoints, every 64 ledgers, updated every 5 minutes) - * in the case of captive-core (enabled via the `--in-memory` command line option) the ledger is maintained within the stellar-core process and ledger-state need to be tracked as it changes via "meta" updates. +* in the case of captive-core, the ledger is maintained within the stellar-core process and ledger-state need to be tracked as it changes via "meta" updates. ## Ledger State transition information (transactions, etc) diff --git a/docs/quick-reference.md b/docs/quick-reference.md index 24c76a6db5..b52cf8ad4f 100644 --- a/docs/quick-reference.md +++ b/docs/quick-reference.md @@ -146,46 +146,6 @@ some time, as the entire sequence of ledger _headers_ in the archive (though non transactions or ledger states) must be downloaded and verified sequentially. It may therefore be worthwhile to save and reuse such a trusted reference file multiple times before regenerating it. -##### Experimental fast "meta data generation" -`catchup` has a command line flag `--in-memory` that when combined with the -`METADATA_OUTPUT_STREAM` allows a stellar-core instance to stream meta data instead -of using a database as intermediate store. - -This has been tested as being orders of magnitude faster for replaying large sections -of history. - -If you don't specify any value for stream the command will just replay transactions -in memory and throw away all meta. This can be useful for performance testing the transaction processing subsystem. - -The `--in-memory` flag is also supported by the `run` command, which can be used to -run a lightweight, stateless validator or watcher node, and this can be combined with -`METADATA_OUTPUT_STREAM` to stream network activity to another process. - -By default, such a stateless node in `run` mode will catch up to the network starting from the -network's most recent checkpoint, but this behaviour can be further modified using two flags -(that must be used together) called `--start-at-ledger ` and `--start-at-hash `. These -cause the node to start with a fast in-memory catchup to ledger `N` with hash `HEXHASH`, and then -replay ledgers forward to the current state of the network. - -A stateless and meta-streaming node can additionally be configured with -`EXPERIMENTAL_PRECAUTION_DELAY_META=true` (if unspecified, the default is -`false`). If `EXPERIMENTAL_PRECAUTION_DELAY_META` is `true`, then the node will -delay emitting meta for a ledger `` until the _next_ ledger, ``, closes. -The idea is that if a node suffers local corruption in a ledger because of a -software bug or hardware fault, it will be unable to close the _next_ ledger -because it won't be able to reach consensus with other nodes on the input state -of the next ledger. Therefore, the meta for the corrupted ledger will never be -emitted. With `EXPERIMENTAL_PRECAUTION_DELAY_META` set to `false`, a local -corruption bug could cause a node to emit meta that is inconsistent with that of -other nodes on the network. Setting `EXPERIMENTAL_PRECAUTION_DELAY_META` to -`true` does have a cost, though: clients waiting for the meta to determine the -result of a transaction will have to wait for an extra ledger close duration. - -During catchup from history archives, a stateless node will emit meta for any -historical ledger without delay, even if `EXPERIMENTAL_PRECAUTION_DELAY_META` is -`true`, because the ledger's results are already part of the validated consensus -history. - #### Publish backlog There is a command `publish` that allows to flush the publish backlog without starting core. This can be useful to run to guarantee that certain tasks are done before moving diff --git a/docs/software/commands.md b/docs/software/commands.md index 8da3850c46..db17ba6a83 100644 --- a/docs/software/commands.md +++ b/docs/software/commands.md @@ -160,12 +160,6 @@ apply. * **run**: Runs stellar-core service.
Option **--wait-for-consensus** lets validators wait to hear from the network before participating in consensus.
- (deprecated) Option **--in-memory** stores the current ledger in memory rather than a - database.
- (deprecated) Option **--start-at-ledger ** starts **--in-memory** mode with a catchup to - ledger **N** then replays to the current state of the network.
- (deprecated) Option **--start-at-hash ** provides a (mandatory) hash for the ledger - **N** specified by the **--start-at-ledger** option. * **sec-to-pub**: Reads a secret key on standard input and outputs the corresponding public key. Both keys are in Stellar's standard base-32 ASCII format. diff --git a/docs/stellar-core_example.cfg b/docs/stellar-core_example.cfg index 103c115cf4..c8325b7476 100644 --- a/docs/stellar-core_example.cfg +++ b/docs/stellar-core_example.cfg @@ -601,19 +601,6 @@ MAX_SLOTS_TO_REMEMBER=12 # only a passive "watcher" node. METADATA_OUTPUT_STREAM="" -# Setting EXPERIMENTAL_PRECAUTION_DELAY_META to true causes a stateless node -# which is streaming meta to delay streaming the meta for a given ledger until -# it closes the next ledger. This ensures that if a local bug had corrupted the -# given ledger, then the meta for the corrupted ledger will never be emitted, as -# the node will not be able to reach consensus with the network on the next -# ledger. -# -# Setting EXPERIMENTAL_PRECAUTION_DELAY_META to true in combination with a -# non-empty METADATA_OUTPUT_STREAM (which can be configured on the command line -# as well as in the config file) requires an in-memory database (specified by -# using --in-memory on the command line). -EXPERIMENTAL_PRECAUTION_DELAY_META=false - # Number of ledgers worth of transaction metadata to preserve on disk for # debugging purposes. These records are automatically maintained and rotated # during processing, and are helpful for recovery in case of a serious error; diff --git a/src/bucket/BucketManager.cpp b/src/bucket/BucketManager.cpp index e3e5f5f524..57ca4cdfcf 100644 --- a/src/bucket/BucketManager.cpp +++ b/src/bucket/BucketManager.cpp @@ -164,10 +164,6 @@ BucketManager::BucketManager(Application& app) app.getMetrics().NewCounter({"bucketlist-archive", "size", "bytes"})) , mBucketListEvictionCounters(app) , mEvictionStatistics(std::make_shared()) - // Minimal DB is stored in the buckets dir, so delete it only when - // mode does not use minimal DB - , mDeleteEntireBucketDirInDtor( - app.getConfig().isInMemoryModeWithoutMinimalDB()) , mConfig(app.getConfig()) { for (uint32_t t = @@ -259,15 +255,8 @@ BucketManager::getBucketDir() const BucketManager::~BucketManager() { - ZoneScoped; - if (mDeleteEntireBucketDirInDtor) - { - deleteEntireBucketDir(); - } - else - { - deleteTmpDirAndUnlockBucketDir(); - } + + deleteTmpDirAndUnlockBucketDir(); } void diff --git a/src/bucket/BucketManager.h b/src/bucket/BucketManager.h index 3b5f85a65b..09f4e1818f 100644 --- a/src/bucket/BucketManager.h +++ b/src/bucket/BucketManager.h @@ -106,7 +106,6 @@ class BucketManager : NonMovableOrCopyable std::future mEvictionFuture{}; - bool const mDeleteEntireBucketDirInDtor; // Copy app's config for thread-safe access Config const mConfig; diff --git a/src/bucket/test/BucketListTests.cpp b/src/bucket/test/BucketListTests.cpp index 5bb6a71d52..7cc5a6a64b 100644 --- a/src/bucket/test/BucketListTests.cpp +++ b/src/bucket/test/BucketListTests.cpp @@ -861,7 +861,7 @@ TEST_CASE("BucketList check bucket sizes", "[bucket][bucketlist][count]") TEST_CASE_VERSIONS("network config snapshots BucketList size", "[bucketlist]") { VirtualClock clock; - Config cfg(getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS)); + Config cfg(getTestConfig(0, Config::TESTDB_IN_MEMORY)); cfg.USE_CONFIG_FOR_GENESIS = true; auto app = createTestApplication(clock, cfg); diff --git a/src/bucket/test/BucketManagerTests.cpp b/src/bucket/test/BucketManagerTests.cpp index fc5390653f..62fb33af2a 100644 --- a/src/bucket/test/BucketManagerTests.cpp +++ b/src/bucket/test/BucketManagerTests.cpp @@ -501,7 +501,7 @@ TEST_CASE("bucketmanager do not leak empty-merge futures", // are thereby not leaking. Disable BucketListDB so that snapshots do not // hold persist buckets, complicating bucket counting. VirtualClock clock; - Config cfg(getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS)); + Config cfg(getTestConfig(0, Config::TESTDB_IN_MEMORY)); cfg.ARTIFICIALLY_PESSIMIZE_MERGES_FOR_TESTING = true; cfg.TESTING_UPGRADE_LEDGER_PROTOCOL_VERSION = static_cast( diff --git a/src/bucket/test/BucketTests.cpp b/src/bucket/test/BucketTests.cpp index bedb9adb69..30d82ff71c 100644 --- a/src/bucket/test/BucketTests.cpp +++ b/src/bucket/test/BucketTests.cpp @@ -1015,7 +1015,7 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", TEST_CASE_VERSIONS("legacy bucket apply", "[bucket]") { VirtualClock clock; - Config cfg(getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS)); + Config cfg(getTestConfig(0, Config::TESTDB_IN_MEMORY)); for_versions_with_differing_bucket_logic(cfg, [&](Config const& cfg) { Application::pointer app = createTestApplication(clock, cfg); diff --git a/src/catchup/ApplyBucketsWork.cpp b/src/catchup/ApplyBucketsWork.cpp index e37a82680a..ac564c2b04 100644 --- a/src/catchup/ApplyBucketsWork.cpp +++ b/src/catchup/ApplyBucketsWork.cpp @@ -72,6 +72,8 @@ ApplyBucketsWork::ApplyBucketsWork( , mLevel(startingLevel()) , mMaxProtocolVersion(maxProtocolVersion) , mCounters(app.getClock().now()) + , mIsApplyInvariantEnabled( + app.getInvariantManager().isBucketApplyInvariantEnabled()) { } @@ -111,6 +113,7 @@ ApplyBucketsWork::doReset() mLastPos = 0; mBucketToApplyIndex = 0; mMinProtocolVersionSeen = UINT32_MAX; + mSeenKeysBeforeApply.clear(); mSeenKeys.clear(); mBucketsToApply.clear(); mBucketApplicator.reset(); @@ -201,6 +204,14 @@ ApplyBucketsWork::startBucket() auto bucket = mBucketsToApply.at(mBucketToApplyIndex); mMinProtocolVersionSeen = std::min(mMinProtocolVersionSeen, bucket->getBucketVersion()); + + // Take a snapshot of seen keys before applying the bucket, only if + // invariants are enabled since this is expensive. + if (mIsApplyInvariantEnabled) + { + mSeenKeysBeforeApply = mSeenKeys; + } + // Create a new applicator for the bucket. mBucketApplicator = std::make_unique( mApp, mMaxProtocolVersion, mMinProtocolVersionSeen, mLevel, bucket, @@ -297,7 +308,8 @@ ApplyBucketsWork::doWork() // bucket. mApp.getInvariantManager().checkOnBucketApply( mBucketsToApply.at(mBucketToApplyIndex), - mApplyState.currentLedger, mLevel, isCurr, mEntryTypeFilter); + mApplyState.currentLedger, mLevel, isCurr, + mSeenKeysBeforeApply); prepareForNextBucket(); } if (!appliedAllBuckets()) diff --git a/src/catchup/ApplyBucketsWork.h b/src/catchup/ApplyBucketsWork.h index 77674e816e..fc239d1592 100644 --- a/src/catchup/ApplyBucketsWork.h +++ b/src/catchup/ApplyBucketsWork.h @@ -38,12 +38,14 @@ class ApplyBucketsWork : public Work uint32_t mLevel{0}; uint32_t mMaxProtocolVersion{0}; uint32_t mMinProtocolVersionSeen{UINT32_MAX}; + std::unordered_set mSeenKeysBeforeApply; std::unordered_set mSeenKeys; std::vector> mBucketsToApply; std::unique_ptr mBucketApplicator; bool mDelayChecked{false}; BucketApplicator::Counters mCounters; + bool const mIsApplyInvariantEnabled; void advance(std::string const& name, BucketApplicator& applicator); std::shared_ptr getBucket(std::string const& bucketHash); diff --git a/src/database/test/DatabaseTests.cpp b/src/database/test/DatabaseTests.cpp index 4a17cd565c..c2fc838bd3 100644 --- a/src/database/test/DatabaseTests.cpp +++ b/src/database/test/DatabaseTests.cpp @@ -72,7 +72,7 @@ transactionTest(Application::pointer app) TEST_CASE("database smoketest", "[db]") { - Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS); + Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); VirtualClock clock; Application::pointer app = createTestApplication(clock, cfg, true, false); @@ -81,7 +81,7 @@ TEST_CASE("database smoketest", "[db]") TEST_CASE("database on-disk smoketest", "[db]") { - Config const& cfg = getTestConfig(0, Config::TESTDB_ON_DISK_SQLITE); + Config const& cfg = getTestConfig(0, Config::TESTDB_BUCKET_DB_PERSISTENT); VirtualClock clock; Application::pointer app = createTestApplication(clock, cfg, true, false); @@ -201,7 +201,7 @@ checkMVCCIsolation(Application::pointer app) TEST_CASE("sqlite MVCC test", "[db]") { - Config const& cfg = getTestConfig(0, Config::TESTDB_ON_DISK_SQLITE); + Config const& cfg = getTestConfig(0, Config::TESTDB_BUCKET_DB_PERSISTENT); VirtualClock clock; Application::pointer app = createTestApplication(clock, cfg, true, false); checkMVCCIsolation(app); @@ -349,7 +349,7 @@ TEST_CASE("postgres performance", "[db][pgperf][!hide]") TEST_CASE("schema test", "[db]") { - Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS); + Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); VirtualClock clock; Application::pointer app = createTestApplication(clock, cfg); diff --git a/src/herder/test/HerderTests.cpp b/src/herder/test/HerderTests.cpp index fcc07519f4..af32283d65 100644 --- a/src/herder/test/HerderTests.cpp +++ b/src/herder/test/HerderTests.cpp @@ -1135,7 +1135,7 @@ TEST_CASE("surge pricing", "[herder][txset][soroban]") { SECTION("max 0 ops per ledger") { - Config cfg(getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS)); + Config cfg(getTestConfig(0, Config::TESTDB_IN_MEMORY)); cfg.TESTING_UPGRADE_MAX_TX_SET_SIZE = 0; VirtualClock clock; @@ -2564,11 +2564,6 @@ TEST_CASE("SCP State", "[herder]") }; auto doTest = [&](bool forceSCP) { - SECTION("sqlite") - { - configure(Config::TestDbMode::TESTDB_ON_DISK_SQLITE); - } - SECTION("bucketlistDB") { configure(Config::TestDbMode::TESTDB_BUCKET_DB_PERSISTENT); @@ -3258,7 +3253,7 @@ TEST_CASE("accept soroban txs after network upgrade", "[soroban][herder]") auto simulation = Topologies::core(4, 1, Simulation::OVER_LOOPBACK, networkID, [](int i) { - auto cfg = getTestConfig(i, Config::TESTDB_ON_DISK_SQLITE); + auto cfg = getTestConfig(i, Config::TESTDB_IN_MEMORY); cfg.TESTING_UPGRADE_MAX_TX_SET_SIZE = 100; cfg.TESTING_UPGRADE_LEDGER_PROTOCOL_VERSION = static_cast(SOROBAN_PROTOCOL_VERSION) - 1; @@ -3637,42 +3632,6 @@ checkHerder(Application& app, HerderImpl& herder, Herder::State expectedState, REQUIRE(herder.trackingConsensusLedgerIndex() == ledger); } -// Either setup a v19 -> v20 upgrade, or a fee upgrade in v20 -static void -setupUpgradeAtNextLedger(Application& app) -{ - Upgrades::UpgradeParameters scheduledUpgrades; - scheduledUpgrades.mUpgradeTime = - VirtualClock::from_time_t(app.getLedgerManager() - .getLastClosedLedgerHeader() - .header.scpValue.closeTime + - 5); - if (protocolVersionIsBefore(app.getLedgerManager() - .getLastClosedLedgerHeader() - .header.ledgerVersion, - SOROBAN_PROTOCOL_VERSION)) - { - scheduledUpgrades.mProtocolVersion = - static_cast(SOROBAN_PROTOCOL_VERSION); - } - else - { - LedgerTxn ltx(app.getLedgerTxnRoot()); - ConfigUpgradeSetFrameConstPtr configUpgradeSet; - ConfigUpgradeSet configUpgradeSetXdr; - auto& configEntry = configUpgradeSetXdr.updatedEntry.emplace_back(); - configEntry.configSettingID(CONFIG_SETTING_CONTRACT_BANDWIDTH_V0); - configEntry.contractBandwidth().ledgerMaxTxsSizeBytes = 1'000'000; - configEntry.contractBandwidth().txMaxSizeBytes = 500'000; - - configUpgradeSet = makeConfigUpgradeSet(ltx, configUpgradeSetXdr); - - scheduledUpgrades.mConfigUpgradeSetKey = configUpgradeSet->getKey(); - ltx.commit(); - } - app.getHerder().setUpgrades(scheduledUpgrades); -} - // The main purpose of this test is to ensure the externalize path works // correctly. This entails properly updating tracking in Herder, forwarding // externalize information to LM, and Herder appropriately reacting to ledger @@ -3687,7 +3646,7 @@ herderExternalizesValuesWithProtocol(uint32_t version) auto networkID = sha256(getTestConfig().NETWORK_PASSPHRASE); auto simulation = std::make_shared( Simulation::OVER_LOOPBACK, networkID, [version](int i) { - auto cfg = getTestConfig(i, Config::TESTDB_ON_DISK_SQLITE); + auto cfg = getTestConfig(i, Config::TESTDB_BUCKET_DB_PERSISTENT); cfg.TESTING_UPGRADE_LEDGER_PROTOCOL_VERSION = version; return cfg; }); @@ -3720,14 +3679,6 @@ herderExternalizesValuesWithProtocol(uint32_t version) Herder::State::HERDER_BOOTING_STATE); simulation->startAllNodes(); - if (protocolVersionStartsFrom(version, SOROBAN_PROTOCOL_VERSION)) - { - upgradeSorobanNetworkConfig( - [&](SorobanNetworkConfig& cfg) { - cfg.mStateArchivalSettings.bucketListWindowSamplePeriod = 1; - }, - simulation); - } // After SCP is restored, Herder is tracking REQUIRE(getC()->getHerder().getState() == @@ -3801,22 +3752,28 @@ herderExternalizesValuesWithProtocol(uint32_t version) REQUIRE(currentALedger() >= currentLedger); REQUIRE(currentCLedger() == currentLedger); + // Arm the upgrade, but don't close the upgrade ledger yet + // C won't upgrade until it's on the right LCL + upgradeSorobanNetworkConfig( + [&](SorobanNetworkConfig& cfg) { + cfg.mLedgerMaxTransactionsSizeBytes = 1'000'000; + cfg.mTxMaxSizeBytes = 500'000; + cfg.mStateArchivalSettings.bucketListWindowSamplePeriod = 1; + }, + simulation, /*applyUpgrade=*/false); + // disconnect C simulation->dropConnection(validatorAKey.getPublicKey(), validatorCKey.getPublicKey()); + currentLedger = currentALedger(); + // Advance A and B a bit further, and collect externalize messages std::map> validatorSCPMessagesA; std::map> validatorSCPMessagesB; - for (auto& node : {A, B, getC()}) - { - // C won't upgrade until it's on the right LCL - setupUpgradeAtNextLedger(*node); - } - auto destinationLedger = waitForAB(4, true); for (auto start = currentLedger + 1; start <= destinationLedger; start++) { diff --git a/src/herder/test/UpgradesTests.cpp b/src/herder/test/UpgradesTests.cpp index 9ab0e032ba..6b5678ac64 100644 --- a/src/herder/test/UpgradesTests.cpp +++ b/src/herder/test/UpgradesTests.cpp @@ -372,7 +372,7 @@ void testValidateUpgrades(VirtualClock::system_time_point preferredUpgradeDatetime, bool canBeValid) { - auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); cfg.TESTING_UPGRADE_LEDGER_PROTOCOL_VERSION = 10; cfg.TESTING_UPGRADE_DESIRED_FEE = 100; cfg.TESTING_UPGRADE_MAX_TX_SET_SIZE = 50; @@ -630,7 +630,7 @@ TEST_CASE("Ledger Manager applies upgrades properly", "[upgrades]") TEST_CASE("config upgrade validation", "[upgrades]") { VirtualClock clock; - auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); auto app = createTestApplication(clock, cfg); auto headerTime = VirtualClock::to_time_t(genesis(0, 2)); @@ -826,7 +826,7 @@ TEST_CASE("config upgrade validation", "[upgrades]") TEST_CASE("config upgrades applied to ledger", "[soroban][upgrades]") { VirtualClock clock; - auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); cfg.TESTING_UPGRADE_LEDGER_PROTOCOL_VERSION = static_cast(SOROBAN_PROTOCOL_VERSION) - 1; cfg.USE_CONFIG_FOR_GENESIS = false; @@ -2274,7 +2274,7 @@ TEST_CASE_VERSIONS("upgrade base reserve", "[upgrades]") { VirtualClock clock; - auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); auto app = createTestApplication(clock, cfg); auto& lm = app->getLedgerManager(); @@ -2974,7 +2974,7 @@ TEST_CASE("upgrade from cpp14 serialized data", "[upgrades]") TEST_CASE("upgrades serialization roundtrip", "[upgrades]") { - auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); VirtualClock clock; auto app = createTestApplication(clock, cfg); @@ -3058,7 +3058,7 @@ TEST_CASE("upgrades serialization roundtrip", "[upgrades]") TEST_CASE_VERSIONS("upgrade flags", "[upgrades][liquiditypool]") { VirtualClock clock; - auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); auto app = createTestApplication(clock, cfg); diff --git a/src/history/test/HistoryTests.cpp b/src/history/test/HistoryTests.cpp index 1f3bf185d8..b880f8ea69 100644 --- a/src/history/test/HistoryTests.cpp +++ b/src/history/test/HistoryTests.cpp @@ -520,7 +520,7 @@ TEST_CASE("History publish with restart", "[history][publish]") auto catchupSimulation = CatchupSimulation(VirtualClock::VIRTUAL_TIME, std::make_shared(), true, - Config::TESTDB_ON_DISK_SQLITE); + Config::TESTDB_BUCKET_DB_PERSISTENT); auto checkpointLedger = catchupSimulation.getLastCheckpointLedger(2); // Restart at various points in the checkpoint, core should continue @@ -570,7 +570,7 @@ TEST_CASE("History publish with restart", "[history][publish]") // Now catchup to ensure published checkpoints are valid auto app = catchupSimulation.createCatchupApplication( std::numeric_limits::max(), - Config::TESTDB_ON_DISK_SQLITE, "app"); + Config::TESTDB_BUCKET_DB_PERSISTENT, "app"); REQUIRE(catchupSimulation.catchupOffline(app, checkpointLedger)); } } @@ -720,10 +720,8 @@ dbModeName(Config::TestDbMode mode) { switch (mode) { - case Config::TESTDB_IN_MEMORY_OFFERS: - return "TESTDB_IN_MEMORY_OFFERS"; - case Config::TESTDB_ON_DISK_SQLITE: - return "TESTDB_ON_DISK_SQLITE"; + case Config::TESTDB_IN_MEMORY: + return "TESTDB_IN_MEMORY"; #ifdef USE_POSTGRES case Config::TESTDB_POSTGRESQL: return "TESTDB_POSTGRESQL"; @@ -856,7 +854,7 @@ TEST_CASE("History catchup with different modes", 60}; std::vector dbModes = { - Config::TESTDB_ON_DISK_SQLITE, Config::TESTDB_BUCKET_DB_PERSISTENT}; + Config::TESTDB_BUCKET_DB_PERSISTENT}; #ifdef USE_POSTGRES if (!force_sqlite) dbModes.push_back(Config::TESTDB_POSTGRESQL); @@ -1726,7 +1724,7 @@ TEST_CASE("Externalize gap while catchup work is running", "[history][catchup]") TEST_CASE("CheckpointBuilder", "[history][publish]") { VirtualClock clock; - auto cfg = getTestConfig(0, Config::TESTDB_ON_DISK_SQLITE); + auto cfg = getTestConfig(0, Config::TESTDB_BUCKET_DB_PERSISTENT); TmpDirHistoryConfigurator().configure(cfg, true); auto app = createTestApplication(clock, cfg); diff --git a/src/history/test/HistoryTestsUtils.h b/src/history/test/HistoryTestsUtils.h index 88453ef6a6..d3cbc4100b 100644 --- a/src/history/test/HistoryTestsUtils.h +++ b/src/history/test/HistoryTestsUtils.h @@ -217,7 +217,7 @@ class CatchupSimulation std::shared_ptr cg = std::make_shared(), bool startApp = true, - Config::TestDbMode dbMode = Config::TESTDB_IN_MEMORY_OFFERS); + Config::TestDbMode dbMode = Config::TESTDB_IN_MEMORY); ~CatchupSimulation(); Application& diff --git a/src/invariant/BucketListIsConsistentWithDatabase.cpp b/src/invariant/BucketListIsConsistentWithDatabase.cpp index 9f99e44afb..f1f3202e21 100644 --- a/src/invariant/BucketListIsConsistentWithDatabase.cpp +++ b/src/invariant/BucketListIsConsistentWithDatabase.cpp @@ -16,6 +16,7 @@ #include "main/Application.h" #include "main/PersistentState.h" #include "medida/timer.h" +#include "util/GlobalChecks.h" #include "util/XDRCereal.h" #include #include @@ -25,7 +26,9 @@ namespace stellar { -static std::string +namespace +{ +std::string checkAgainstDatabase(AbstractLedgerTxn& ltx, LedgerEntry const& entry) { auto fromDb = ltx.loadWithoutRecord(LedgerEntryKey(entry)); @@ -50,7 +53,7 @@ checkAgainstDatabase(AbstractLedgerTxn& ltx, LedgerEntry const& entry) } } -static std::string +std::string checkAgainstDatabase(AbstractLedgerTxn& ltx, LedgerKey const& key) { auto fromDb = ltx.loadWithoutRecord(key); @@ -64,6 +67,25 @@ checkAgainstDatabase(AbstractLedgerTxn& ltx, LedgerKey const& key) return s; } +std::string +checkDbEntryCounts(Application& app, LedgerRange const& range, + uint64_t expectedOfferCount) +{ + std::string msg; + auto& ltxRoot = app.getLedgerTxnRoot(); + uint64_t numInDb = ltxRoot.countObjects(OFFER, range); + if (numInDb != expectedOfferCount) + { + msg = fmt::format( + FMT_STRING("Incorrect OFFER count: Bucket = {:d} Database " + "= {:d}"), + expectedOfferCount, numInDb); + } + + return msg; +} +} + std::shared_ptr BucketListIsConsistentWithDatabase::registerInvariant(Application& app) { @@ -83,103 +105,6 @@ BucketListIsConsistentWithDatabase::getName() const return "BucketListIsConsistentWithDatabase"; } -struct EntryCounts -{ - uint64_t mAccounts{0}; - uint64_t mTrustLines{0}; - uint64_t mOffers{0}; - uint64_t mData{0}; - uint64_t mClaimableBalance{0}; - uint64_t mLiquidityPool{0}; - uint64_t mContractData{0}; - uint64_t mContractCode{0}; - uint64_t mConfigSettings{0}; - uint64_t mTTL{0}; - - uint64_t - totalEntries() const - { - return mAccounts + mTrustLines + mOffers + mData + mClaimableBalance + - mLiquidityPool + mContractData + mConfigSettings + mTTL; - } - - void - countLiveEntry(LedgerEntry const& e) - { - switch (e.data.type()) - { - case ACCOUNT: - ++mAccounts; - break; - case TRUSTLINE: - ++mTrustLines; - break; - case OFFER: - ++mOffers; - break; - case DATA: - ++mData; - break; - case CLAIMABLE_BALANCE: - ++mClaimableBalance; - break; - case LIQUIDITY_POOL: - ++mLiquidityPool; - break; - case CONTRACT_DATA: - ++mContractData; - break; - case CONTRACT_CODE: - ++mContractCode; - break; - case CONFIG_SETTING: - ++mConfigSettings; - break; - case TTL: - ++mTTL; - break; - default: - throw std::runtime_error( - fmt::format(FMT_STRING("unknown ledger entry type: {:d}"), - static_cast(e.data.type()))); - } - } - - std::string - checkDbEntryCounts(Application& app, LedgerRange const& range, - std::function entryTypeFilter) - { - std::string msg; - auto check = [&](LedgerEntryType let, uint64_t numInBucket) { - if (entryTypeFilter(let)) - { - auto& ltxRoot = app.getLedgerTxnRoot(); - uint64_t numInDb = ltxRoot.countObjects(let, range); - if (numInDb != numInBucket) - { - msg = fmt::format( - FMT_STRING("Incorrect {} count: Bucket = {:d} Database " - "= {:d}"), - xdr::xdr_traits::enum_name(let), - numInBucket, numInDb); - return false; - } - } - return true; - }; - - // Uses short-circuiting to make this compact - check(ACCOUNT, mAccounts) && check(TRUSTLINE, mTrustLines) && - check(OFFER, mOffers) && check(DATA, mData) && - check(CLAIMABLE_BALANCE, mClaimableBalance) && - check(LIQUIDITY_POOL, mLiquidityPool) && - check(CONTRACT_DATA, mContractData) && - check(CONTRACT_CODE, mContractCode) && - check(CONFIG_SETTING, mConfigSettings) && check(TTL, mTTL); - return msg; - } -}; - void BucketListIsConsistentWithDatabase::checkEntireBucketlist() { @@ -188,29 +113,29 @@ BucketListIsConsistentWithDatabase::checkEntireBucketlist() HistoryArchiveState has = lm.getLastClosedLedgerHAS(); std::map bucketLedgerMap = bm.loadCompleteLedgerState(has); - EntryCounts counts; + uint64_t offerCount = 0; medida::Timer timer(std::chrono::microseconds(1)); { LedgerTxn ltx(mApp.getLedgerTxnRoot()); for (auto const& pair : bucketLedgerMap) { - // Don't check entry types in BucketListDB when enabled - if (mApp.getConfig().isUsingBucketListDB() && - !BucketIndex::typeNotSupported(pair.first.type())) + // Don't check entry types supported by BucketListDB, since they + // won't exist in SQL + if (!BucketIndex::typeNotSupported(pair.first.type())) { continue; } - counts.countLiveEntry(pair.second); + ++offerCount; std::string s; timer.Time([&]() { s = checkAgainstDatabase(ltx, pair.second); }); if (!s.empty()) { throw std::runtime_error(s); } - auto i = counts.totalEntries(); - if ((i & 0x7ffff) == 0) + + if ((offerCount & 0x7ffff) == 0) { using namespace std::chrono; nanoseconds ns = timer.duration_unit() * @@ -219,56 +144,33 @@ BucketListIsConsistentWithDatabase::checkEntireBucketlist() CLOG_INFO(Ledger, "Checked bucket-vs-DB consistency for " "{} entries (mean {}/entry)", - i, us); + offerCount, us); } } } - // Count functionality does not support in-memory LedgerTxn - if (!mApp.getConfig().isInMemoryMode()) - { - auto range = LedgerRange::inclusive(LedgerManager::GENESIS_LEDGER_SEQ, - has.currentLedger); + auto range = LedgerRange::inclusive(LedgerManager::GENESIS_LEDGER_SEQ, + has.currentLedger); - // If BucketListDB enabled, only types not supported by BucketListDB - // should be in SQL DB - std::function filter; - if (mApp.getConfig().isUsingBucketListDB()) - { - filter = BucketIndex::typeNotSupported; - } - else - { - filter = [](LedgerEntryType) { return true; }; - } - - auto s = counts.checkDbEntryCounts(mApp, range, filter); - if (!s.empty()) - { - throw std::runtime_error(s); - } + auto s = checkDbEntryCounts(mApp, range, offerCount); + if (!s.empty()) + { + throw std::runtime_error(s); } - if (mApp.getConfig().isUsingBucketListDB() && - mApp.getPersistentState().getState(PersistentState::kDBBackend) != - BucketIndex::DB_BACKEND_STATE) + if (mApp.getPersistentState().getState(PersistentState::kDBBackend) != + BucketIndex::DB_BACKEND_STATE) { - throw std::runtime_error("BucketListDB enabled but BucketListDB flag " - "not set in PersistentState."); + throw std::runtime_error( + "Corrupt DB: BucketListDB flag " + "not set in PersistentState. Please run new-db or upgrade-db"); } } std::string BucketListIsConsistentWithDatabase::checkAfterAssumeState(uint32_t newestLedger) { - // If BucketListDB is disabled, we've already enforced the invariant on a - // per-Bucket level - if (!mApp.getConfig().isUsingBucketListDB()) - { - return {}; - } - - EntryCounts counts; + uint64_t offerCount = 0; LedgerKeySet seenKeys; auto perBucketCheck = [&](auto bucket, auto& ltx) { @@ -289,8 +191,7 @@ BucketListIsConsistentWithDatabase::checkAfterAssumeState(uint32_t newestLedger) auto [_, newKey] = seenKeys.emplace(key); if (newKey) { - counts.countLiveEntry(e.liveEntry()); - + ++offerCount; auto s = checkAgainstDatabase(ltx, e.liveEntry()); if (!s.empty()) { @@ -343,17 +244,15 @@ BucketListIsConsistentWithDatabase::checkAfterAssumeState(uint32_t newestLedger) auto range = LedgerRange::inclusive(LedgerManager::GENESIS_LEDGER_SEQ, newestLedger); - // SQL only stores offers when BucketListDB is enabled - return counts.checkDbEntryCounts( - mApp, range, [](LedgerEntryType let) { return let == OFFER; }); + return checkDbEntryCounts(mApp, range, offerCount); } std::string BucketListIsConsistentWithDatabase::checkOnBucketApply( std::shared_ptr bucket, uint32_t oldestLedger, - uint32_t newestLedger, std::function entryTypeFilter) + uint32_t newestLedger, std::unordered_set const& shadowedKeys) { - EntryCounts counts; + uint64_t offerCount = 0; { LedgerTxn ltx(mApp.getLedgerTxnRoot()); @@ -394,28 +293,25 @@ BucketListIsConsistentWithDatabase::checkOnBucketApply( return s; } - if (entryTypeFilter(e.liveEntry().data.type())) + // Don't check DB against keys shadowed by earlier Buckets + if (BucketIndex::typeNotSupported(e.liveEntry().data.type()) && + shadowedKeys.find(LedgerEntryKey(e.liveEntry())) == + shadowedKeys.end()) { - counts.countLiveEntry(e.liveEntry()); - - // BucketListDB is not compatible with per-Bucket database - // consistency checks - if (!mApp.getConfig().isUsingBucketListDB()) + ++offerCount; + auto s = checkAgainstDatabase(ltx, e.liveEntry()); + if (!s.empty()) { - auto s = checkAgainstDatabase(ltx, e.liveEntry()); - if (!s.empty()) - { - return s; - } + return s; } } } - else if (e.type() == DEADENTRY) + else { - // BucketListDB is not compatible with per-Bucket database - // consistency checks - if (entryTypeFilter(e.deadEntry().type()) && - !mApp.getConfig().isUsingBucketListDB()) + // Only check for OFFER keys that are not shadowed by an earlier + // bucket + if (BucketIndex::typeNotSupported(e.deadEntry().type()) && + shadowedKeys.find(e.deadEntry()) == shadowedKeys.end()) { auto s = checkAgainstDatabase(ltx, e.deadEntry()); if (!s.empty()) @@ -428,13 +324,6 @@ BucketListIsConsistentWithDatabase::checkOnBucketApply( } auto range = LedgerRange::inclusive(oldestLedger, newestLedger); - - // BucketListDB not compatible with per-Bucket database consistency checks - if (!mApp.getConfig().isUsingBucketListDB()) - { - return counts.checkDbEntryCounts(mApp, range, entryTypeFilter); - } - - return std::string{}; + return checkDbEntryCounts(mApp, range, offerCount); } } diff --git a/src/invariant/BucketListIsConsistentWithDatabase.h b/src/invariant/BucketListIsConsistentWithDatabase.h index 36b5a71559..a9bb3003ac 100644 --- a/src/invariant/BucketListIsConsistentWithDatabase.h +++ b/src/invariant/BucketListIsConsistentWithDatabase.h @@ -36,7 +36,7 @@ class BucketListIsConsistentWithDatabase : public Invariant virtual std::string checkOnBucketApply( std::shared_ptr bucket, uint32_t oldestLedger, uint32_t newestLedger, - std::function entryTypeFilter) override; + std::unordered_set const& shadowedKeys) override; virtual std::string checkAfterAssumeState(uint32_t newestLedger) override; diff --git a/src/invariant/Invariant.h b/src/invariant/Invariant.h index 8a2a12ec04..6a90105477 100644 --- a/src/invariant/Invariant.h +++ b/src/invariant/Invariant.h @@ -8,6 +8,7 @@ #include #include #include +#include namespace stellar { @@ -17,6 +18,7 @@ enum LedgerEntryType : std::int32_t; struct LedgerTxnDelta; struct Operation; struct OperationResult; +struct LedgerKey; // NOTE: The checkOn* functions should have a default implementation so that // more can be added in the future without requiring changes to all @@ -45,7 +47,7 @@ class Invariant virtual std::string checkOnBucketApply(std::shared_ptr bucket, uint32_t oldestLedger, uint32_t newestLedger, - std::function entryTypeFilter) + std::unordered_set const& shadowedKeys) { return std::string{}; } diff --git a/src/invariant/InvariantManager.h b/src/invariant/InvariantManager.h index 220209f1c7..61575fcd49 100644 --- a/src/invariant/InvariantManager.h +++ b/src/invariant/InvariantManager.h @@ -35,11 +35,12 @@ class InvariantManager virtual Json::Value getJsonInfo() = 0; virtual std::vector getEnabledInvariants() const = 0; + virtual bool isBucketApplyInvariantEnabled() const = 0; - virtual void checkOnBucketApply( - std::shared_ptr bucket, uint32_t ledger, - uint32_t level, bool isCurr, - std::function entryTypeFilter) = 0; + virtual void + checkOnBucketApply(std::shared_ptr bucket, + uint32_t ledger, uint32_t level, bool isCurr, + std::unordered_set const& shadowedKeys) = 0; virtual void checkAfterAssumeState(uint32_t newestLedger) = 0; diff --git a/src/invariant/InvariantManagerImpl.cpp b/src/invariant/InvariantManagerImpl.cpp index d20177f1f6..1a0a855217 100644 --- a/src/invariant/InvariantManagerImpl.cpp +++ b/src/invariant/InvariantManagerImpl.cpp @@ -69,10 +69,18 @@ InvariantManagerImpl::getEnabledInvariants() const return res; } +bool +InvariantManagerImpl::isBucketApplyInvariantEnabled() const +{ + return std::any_of(mEnabled.begin(), mEnabled.end(), [](auto const& inv) { + return inv->getName() == "BucketListIsConsistentWithDatabase"; + }); +} + void InvariantManagerImpl::checkOnBucketApply( std::shared_ptr bucket, uint32_t ledger, uint32_t level, - bool isCurr, std::function entryTypeFilter) + bool isCurr, std::unordered_set const& shadowedKeys) { uint32_t oldestLedger = isCurr ? LiveBucketList::oldestLedgerInCurr(ledger, level) @@ -83,8 +91,8 @@ InvariantManagerImpl::checkOnBucketApply( : LiveBucketList::sizeOfSnap(ledger, level)); for (auto invariant : mEnabled) { - auto result = invariant->checkOnBucketApply( - bucket, oldestLedger, newestLedger, entryTypeFilter); + auto result = invariant->checkOnBucketApply(bucket, oldestLedger, + newestLedger, shadowedKeys); if (result.empty()) { continue; diff --git a/src/invariant/InvariantManagerImpl.h b/src/invariant/InvariantManagerImpl.h index 689ab6b750..fbbb35fee8 100644 --- a/src/invariant/InvariantManagerImpl.h +++ b/src/invariant/InvariantManagerImpl.h @@ -36,6 +36,7 @@ class InvariantManagerImpl : public InvariantManager virtual Json::Value getJsonInfo() override; virtual std::vector getEnabledInvariants() const override; + bool isBucketApplyInvariantEnabled() const override; virtual void checkOnOperationApply(Operation const& operation, OperationResult const& opres, @@ -44,7 +45,7 @@ class InvariantManagerImpl : public InvariantManager virtual void checkOnBucketApply( std::shared_ptr bucket, uint32_t ledger, uint32_t level, bool isCurr, - std::function entryTypeFilter) override; + std::unordered_set const& shadowedKeys) override; virtual void checkAfterAssumeState(uint32_t newestLedger) override; diff --git a/src/invariant/test/AccountSubEntriesCountIsValidTests.cpp b/src/invariant/test/AccountSubEntriesCountIsValidTests.cpp index acc308be6b..20892ad29e 100644 --- a/src/invariant/test/AccountSubEntriesCountIsValidTests.cpp +++ b/src/invariant/test/AccountSubEntriesCountIsValidTests.cpp @@ -292,7 +292,7 @@ deleteRandomSubEntryFromAccount(Application& app, LedgerEntry& le, TEST_CASE("Create account with no subentries", "[invariant][accountsubentriescount]") { - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); cfg.INVARIANT_CHECKS = {"AccountSubEntriesCountIsValid"}; VirtualClock clock; Application::pointer app = createTestApplication(clock, cfg); @@ -309,7 +309,7 @@ TEST_CASE("Create account then add signers and subentries", "[invariant][accountsubentriescount]") { stellar::uniform_int_distribution changesDist(-1, 2); - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); cfg.INVARIANT_CHECKS = {"AccountSubEntriesCountIsValid"}; for (uint32_t i = 0; i < 50; ++i) diff --git a/src/invariant/test/BucketListIsConsistentWithDatabaseTests.cpp b/src/invariant/test/BucketListIsConsistentWithDatabaseTests.cpp index 7a2a1f6b62..eabec762fa 100644 --- a/src/invariant/test/BucketListIsConsistentWithDatabaseTests.cpp +++ b/src/invariant/test/BucketListIsConsistentWithDatabaseTests.cpp @@ -20,6 +20,7 @@ #include "test/test.h" #include "transactions/TransactionUtils.h" #include "util/Decoder.h" +#include "util/GlobalChecks.h" #include "util/Math.h" #include "util/UnorderedSet.h" #include "util/XDROperators.h" @@ -43,44 +44,10 @@ struct BucketListGenerator public: BucketListGenerator() : mLedgerSeq(1) { - auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS); + auto cfg = getTestConfig(); cfg.OVERRIDE_EVICTION_PARAMS_FOR_TESTING = true; cfg.TESTING_STARTING_EVICTION_SCAN_LEVEL = 1; mAppGenerate = createTestApplication(mClock, cfg); - - auto skey = SecretKey::fromSeed(mAppGenerate->getNetworkID()); - LedgerKey key(ACCOUNT); - key.account().accountID = skey.getPublicKey(); - mLiveKeys.insert(key); - - if (appProtocolVersionStartsFrom(*mAppGenerate, - SOROBAN_PROTOCOL_VERSION)) - { - // All config settings entries will be created automatically during - // the protocol upgrade and NOT generated by tests, so they should - // be reflected in the live key set. This allows tests to still run - // on those entries. - for (auto t : xdr::xdr_traits::enum_values()) - { -#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION - // This setting has been introduced in the vnext xdr, but it's - // not used in code yet. This check can be replaced with a - // runtime protocol check once we create the setting in the - // upgrade path. - if (static_cast(t) == - ConfigSettingID:: - CONFIG_SETTING_CONTRACT_PARALLEL_COMPUTE_V0) - { - continue; - } -#endif - LedgerKey ckey(CONFIG_SETTING); - ckey.configSetting().configSettingID = - static_cast(t); - mLiveKeys.insert(ckey); - } - } - LedgerTxn ltx(mAppGenerate->getLedgerTxnRoot(), false); REQUIRE(mLedgerSeq == ltx.loadHeader().current().ledgerSeq); } @@ -102,8 +69,8 @@ struct BucketListGenerator applyBuckets(Args&&... args) { VirtualClock clock; - Application::pointer app = createTestApplication( - clock, getTestConfig(1, Config::TESTDB_IN_MEMORY_OFFERS)); + Application::pointer app = + createTestApplication(clock, getTestConfig(1)); applyBuckets(app, std::forward(args)...); } @@ -164,8 +131,8 @@ struct BucketListGenerator generateLiveEntries(AbstractLedgerTxn& ltx) { auto entries = - LedgerTestUtils::generateValidLedgerEntriesWithExclusions( - {CONFIG_SETTING}, 5); + LedgerTestUtils::generateValidUniqueLedgerEntriesWithTypes({OFFER}, + 5); for (auto& le : entries) { le.lastModifiedLedgerSeq = mLedgerSeq; @@ -176,12 +143,7 @@ struct BucketListGenerator virtual std::vector generateDeadEntries(AbstractLedgerTxn& ltx) { - UnorderedSet liveDeletable(mLiveKeys.size()); - std::copy_if( - mLiveKeys.begin(), mLiveKeys.end(), - std::inserter(liveDeletable, liveDeletable.end()), - [](LedgerKey const& key) { return key.type() != CONFIG_SETTING; }); - + UnorderedSet liveDeletable = mLiveKeys; std::vector dead; while (dead.size() < 2 && !liveDeletable.empty()) { @@ -226,8 +188,7 @@ struct BucketListGenerator out.put(*in); } - auto bucket = - out.getBucket(bmApply, /*shouldSynchronouslyIndex=*/false); + auto bucket = out.getBucket(bmApply, false); }; writeBucketFile(level.getCurr()); writeBucketFile(level.getSnap()); @@ -280,11 +241,10 @@ doesBucketListContain(LiveBucketList& bl, const BucketEntry& be) struct SelectBucketListGenerator : public BucketListGenerator { uint32_t const mSelectLedger; - LedgerEntryType const mType; std::shared_ptr mSelected; - SelectBucketListGenerator(uint32_t selectLedger, LedgerEntryType type) - : mSelectLedger(selectLedger), mType(type) + SelectBucketListGenerator(uint32_t selectLedger) + : mSelectLedger(selectLedger) { } @@ -293,24 +253,35 @@ struct SelectBucketListGenerator : public BucketListGenerator { if (mLedgerSeq == mSelectLedger) { - UnorderedSet filteredKeys(mLiveKeys.size()); - std::copy_if( - mLiveKeys.begin(), mLiveKeys.end(), - std::inserter(filteredKeys, filteredKeys.end()), - [this](LedgerKey const& key) { return key.type() == mType; }); - - if (!filteredKeys.empty()) + if (!mLiveKeys.empty()) { stellar::uniform_int_distribution dist( - 0, filteredKeys.size() - 1); - auto iter = filteredKeys.begin(); + 0, mLiveKeys.size() - 1); + auto iter = mLiveKeys.begin(); std::advance(iter, dist(gRandomEngine)); mSelected = std::make_shared( ltx.loadWithoutRecord(*iter).current()); } } - return BucketListGenerator::generateLiveEntries(ltx); + + auto live = BucketListGenerator::generateLiveEntries(ltx); + + // Selected entry must not be shadowed + if (mSelected) + { + auto key = LedgerEntryKey(*mSelected); + for (size_t i = 0; i < live.size(); ++i) + { + if (LedgerEntryKey(live.at(i)) == key) + { + live.erase(live.begin() + i); + break; + } + } + } + + return live; } virtual std::vector @@ -341,8 +312,8 @@ class ApplyBucketsWorkAddEntry : public ApplyBucketsWork Application& app, std::map> const& buckets, HistoryArchiveState const& applyState, uint32_t maxProtocolVersion, - std::function filter, LedgerEntry const& entry) - : ApplyBucketsWork(app, buckets, applyState, maxProtocolVersion, filter) + LedgerEntry const& entry) + : ApplyBucketsWork(app, buckets, applyState, maxProtocolVersion) , mEntry(entry) , mAdded{false} { @@ -358,13 +329,8 @@ class ApplyBucketsWorkAddEntry : public ApplyBucketsWork uint32_t maxLedger = std::numeric_limits::max() - 1; auto& ltxRoot = mApp.getLedgerTxnRoot(); - size_t count = 0; - for (auto let : xdr::xdr_traits::enum_values()) - { - count += ltxRoot.countObjects( - static_cast(let), - LedgerRange::inclusive(minLedger, maxLedger)); - } + auto count = ltxRoot.countObjects( + OFFER, LedgerRange::inclusive(minLedger, maxLedger)); if (count > 0) { @@ -433,26 +399,6 @@ class ApplyBucketsWorkModifyEntry : public ApplyBucketsWork LedgerEntry const mEntry; bool mModified; - void - modifyAccountEntry(LedgerEntry& entry) - { - AccountEntry const& account = mEntry.data.account(); - entry.lastModifiedLedgerSeq = mEntry.lastModifiedLedgerSeq; - entry.data.account() = LedgerTestUtils::generateValidAccountEntry(5); - entry.data.account().accountID = account.accountID; - } - - void - modifyTrustLineEntry(LedgerEntry& entry) - { - TrustLineEntry const& trustLine = mEntry.data.trustLine(); - entry.lastModifiedLedgerSeq = mEntry.lastModifiedLedgerSeq; - entry.data.trustLine() = - LedgerTestUtils::generateValidTrustLineEntry(5); - entry.data.trustLine().accountID = trustLine.accountID; - entry.data.trustLine().asset = trustLine.asset; - } - void modifyOfferEntry(LedgerEntry& entry) { @@ -463,90 +409,6 @@ class ApplyBucketsWorkModifyEntry : public ApplyBucketsWork entry.data.offer().offerID = offer.offerID; } - void - modifyDataEntry(LedgerEntry& entry) - { - DataEntry const& data = mEntry.data.data(); - entry.lastModifiedLedgerSeq = mEntry.lastModifiedLedgerSeq; - do - { - entry.data.data() = LedgerTestUtils::generateValidDataEntry(5); - } while (entry.data.data().dataValue == data.dataValue); - entry.data.data().accountID = data.accountID; - entry.data.data().dataName = data.dataName; - } - - void - modifyClaimableBalanceEntry(LedgerEntry& entry) - { - ClaimableBalanceEntry const& cb = mEntry.data.claimableBalance(); - entry.lastModifiedLedgerSeq = mEntry.lastModifiedLedgerSeq; - entry.data.claimableBalance() = - LedgerTestUtils::generateValidClaimableBalanceEntry(5); - - entry.data.claimableBalance().balanceID = cb.balanceID; - } - - void - modifyLiquidityPoolEntry(LedgerEntry& entry) - { - LiquidityPoolEntry const& lp = mEntry.data.liquidityPool(); - entry.lastModifiedLedgerSeq = mEntry.lastModifiedLedgerSeq; - entry.data.liquidityPool() = - LedgerTestUtils::generateValidLiquidityPoolEntry(5); - - entry.data.liquidityPool().liquidityPoolID = lp.liquidityPoolID; - } - - void - modifyConfigSettingEntry(LedgerEntry& entry) - { - ConfigSettingEntry const& cfg = mEntry.data.configSetting(); - entry.lastModifiedLedgerSeq = mEntry.lastModifiedLedgerSeq; - entry.data.configSetting() = - LedgerTestUtils::generateValidConfigSettingEntry(5); - - entry.data.configSetting().configSettingID(cfg.configSettingID()); - } - - void - modifyContractDataEntry(LedgerEntry& entry) - { - ContractDataEntry const& cd = mEntry.data.contractData(); - entry.lastModifiedLedgerSeq = mEntry.lastModifiedLedgerSeq; - entry.data.contractData() = - LedgerTestUtils::generateValidContractDataEntry(5); - - entry.data.contractData().contract = cd.contract; - entry.data.contractData().key = cd.key; - } - - void - modifyContractCodeEntry(LedgerEntry& entry) - { - ContractCodeEntry const& cc = mEntry.data.contractCode(); - entry.lastModifiedLedgerSeq = mEntry.lastModifiedLedgerSeq; - - while (entry.data.contractCode().code == - mEntry.data.contractCode().code) - { - entry.data.contractCode() = - LedgerTestUtils::generateValidContractCodeEntry(5); - } - - entry.data.contractCode().hash = cc.hash; - } - - void - modifyTTLEntry(LedgerEntry& entry) - { - TTLEntry const& ee = mEntry.data.ttl(); - entry.lastModifiedLedgerSeq = mEntry.lastModifiedLedgerSeq; - entry.data.ttl() = LedgerTestUtils::generateValidTTLEntry(5); - - entry.data.ttl().keyHash = ee.keyHash; - } - public: ApplyBucketsWorkModifyEntry( Application& app, @@ -569,41 +431,10 @@ class ApplyBucketsWorkModifyEntry : public ApplyBucketsWork auto entry = ltx.load(mKey); while (entry && entry.current() == mEntry) { - switch (mEntry.data.type()) - { - case ACCOUNT: - modifyAccountEntry(entry.current()); - break; - case TRUSTLINE: - modifyTrustLineEntry(entry.current()); - break; - case OFFER: - modifyOfferEntry(entry.current()); - break; - case DATA: - modifyDataEntry(entry.current()); - break; - case CLAIMABLE_BALANCE: - modifyClaimableBalanceEntry(entry.current()); - break; - case LIQUIDITY_POOL: - modifyLiquidityPoolEntry(entry.current()); - break; - case CONFIG_SETTING: - modifyConfigSettingEntry(entry.current()); - break; - case CONTRACT_DATA: - modifyContractDataEntry(entry.current()); - break; - case CONTRACT_CODE: - modifyContractCodeEntry(entry.current()); - break; - case TTL: - modifyTTLEntry(entry.current()); - break; - default: - REQUIRE(false); - } + releaseAssert( + BucketIndex::typeNotSupported(mEntry.data.type())); + + modifyOfferEntry(entry.current()); mModified = true; } @@ -655,168 +486,61 @@ TEST_CASE("BucketListIsConsistentWithDatabase empty ledgers", REQUIRE_NOTHROW(blg.applyBuckets()); } -TEST_CASE("BucketListIsConsistentWithDatabase test root account", - "[invariant][bucketlistconsistent]") -{ - struct TestRootBucketListGenerator : public BucketListGenerator - { - uint32_t const mTargetLedger; - bool mModifiedRoot; - - TestRootBucketListGenerator() - : mTargetLedger(stellar::uniform_int_distribution(2, 100)( - gRandomEngine)) - , mModifiedRoot(false) - { - } - - virtual std::vector - generateLiveEntries(AbstractLedgerTxn& ltx) - { - if (mLedgerSeq == mTargetLedger) - { - mModifiedRoot = true; - auto& app = mAppGenerate; - auto skey = SecretKey::fromSeed(app->getNetworkID()); - auto root = skey.getPublicKey(); - auto le = - stellar::loadAccountWithoutRecord(ltx, root).current(); - le.lastModifiedLedgerSeq = mLedgerSeq; - return {le}; - } - else - { - return BucketListGenerator::generateLiveEntries(ltx); - } - } - - virtual std::vector - generateDeadEntries(AbstractLedgerTxn& ltx) - { - return {}; - } - }; - - for (size_t j = 0; j < 5; ++j) - { - TestRootBucketListGenerator blg; - blg.generateLedgers(100); - REQUIRE(blg.mModifiedRoot); - REQUIRE_NOTHROW(blg.applyBuckets()); - } -} - TEST_CASE("BucketListIsConsistentWithDatabase added entries", "[invariant][bucketlistconsistent][acceptance]") { - auto runTest = [](bool withFilter) { - for (size_t nTests = 0; nTests < 40; ++nTests) - { - BucketListGenerator blg; - blg.generateLedgers(100); - - stellar::uniform_int_distribution addAtLedgerDist( - 2, blg.mLedgerSeq); - auto le = LedgerTestUtils::generateValidLedgerEntryWithExclusions( - {CONFIG_SETTING}, 5); - le.lastModifiedLedgerSeq = addAtLedgerDist(gRandomEngine); - - if (!withFilter) - { - auto filter = [](auto) { return true; }; - if (le.data.type() == CONFIG_SETTING) - { - // Config settings would have a duplicate key due to low key - // space. - REQUIRE_THROWS_AS( - blg.applyBuckets(filter, le), - std::runtime_error); - } - else - { - REQUIRE_THROWS_AS( - blg.applyBuckets(filter, le), - InvariantDoesNotHold); - } - } - else - { - auto filter = [&](auto let) { return let != le.data.type(); }; - REQUIRE_NOTHROW( - blg.applyBuckets(filter, le)); - } - } - }; - - runTest(true); + for (size_t nTests = 0; nTests < 40; ++nTests) + { + BucketListGenerator blg; + blg.generateLedgers(100); - // This tests the filtering behavior of BucketListIsConsistentWithDatabase - // because the bucket apply will not add anything of the specified - // LedgerEntryType, but we will inject an additional LedgerEntry of that - // type anyway. But it shouldn't throw because the invariant isn't looking - // for those changes. - runTest(false); + stellar::uniform_int_distribution addAtLedgerDist( + 2, blg.mLedgerSeq); + auto le = + LedgerTestUtils::generateValidLedgerEntryWithTypes({OFFER}, 10); + le.lastModifiedLedgerSeq = addAtLedgerDist(gRandomEngine); + REQUIRE_THROWS_AS(blg.applyBuckets(le), + InvariantDoesNotHold); + } } TEST_CASE("BucketListIsConsistentWithDatabase deleted entries", "[invariant][bucketlistconsistent][acceptance]") { - for (auto t : xdr::xdr_traits::enum_values()) + size_t nTests = 0; + while (nTests < 10) { - size_t nTests = 0; - while (nTests < 10) + SelectBucketListGenerator blg(100); + blg.generateLedgers(100); + if (!blg.mSelected) { - SelectBucketListGenerator blg(100, static_cast(t)); - blg.generateLedgers(100); - if (!blg.mSelected) - { - continue; - } - if (t == CONFIG_SETTING) - { - // Configuration can not be deleted. - REQUIRE_THROWS_AS(blg.applyBuckets( - *blg.mSelected), - std::runtime_error); - } - else - { - REQUIRE_THROWS_AS(blg.applyBuckets( - *blg.mSelected), - InvariantDoesNotHold); - } - ++nTests; + continue; } + + REQUIRE_THROWS_AS( + blg.applyBuckets(*blg.mSelected), + InvariantDoesNotHold); + ++nTests; } } TEST_CASE("BucketListIsConsistentWithDatabase modified entries", "[invariant][bucketlistconsistent][acceptance]") { - for (auto t : xdr::xdr_traits::enum_values()) + size_t nTests = 0; + while (nTests < 10) { - // Skip CONFIG_SETTING for now because the test modification test does - // not work unless blg itself updates the entry. - if (t == CONFIG_SETTING) + SelectBucketListGenerator blg(100); + blg.generateLedgers(100); + if (!blg.mSelected) { continue; } - size_t nTests = 0; - while (nTests < 10) - { - SelectBucketListGenerator blg(100, static_cast(t)); - blg.generateLedgers(100); - if (!blg.mSelected) - { - continue; - } - - REQUIRE_THROWS_AS( - blg.applyBuckets(*blg.mSelected), - InvariantDoesNotHold); - ++nTests; - } + REQUIRE_THROWS_AS( + blg.applyBuckets(*blg.mSelected), + InvariantDoesNotHold); + ++nTests; } } @@ -916,8 +640,8 @@ TEST_CASE("BucketListIsConsistentWithDatabase merged LIVEENTRY and DEADENTRY", { uint32_t const mTargetLedger; - MergeBucketListGenerator(LedgerEntryType let) - : SelectBucketListGenerator(25, let), mTargetLedger(110) + MergeBucketListGenerator() + : SelectBucketListGenerator(25), mTargetLedger(110) { } @@ -940,73 +664,61 @@ TEST_CASE("BucketListIsConsistentWithDatabase merged LIVEENTRY and DEADENTRY", return (bool)ltx.load(LedgerEntryKey(le)); }; - auto cfg = getTestConfig(1, Config::TESTDB_IN_MEMORY_OFFERS); + auto cfg = getTestConfig(1); cfg.OVERRIDE_EVICTION_PARAMS_FOR_TESTING = true; cfg.TESTING_STARTING_EVICTION_SCAN_LEVEL = 1; testutil::BucketListDepthModifier bldm(3); - for (auto t : xdr::xdr_traits::enum_values()) + uint32_t nTests = 0; + while (nTests < 5) { - if (t == CONFIG_SETTING) + MergeBucketListGenerator blg; + auto& blGenerate = + blg.mAppGenerate->getBucketManager().getLiveBucketList(); + + blg.generateLedgers(100); + if (!blg.mSelected) { - // Merge logic is not applicable to configuration. continue; } - uint32_t nTests = 0; - while (nTests < 5) - { - MergeBucketListGenerator blg(static_cast(t)); - auto& blGenerate = - blg.mAppGenerate->getBucketManager().getLiveBucketList(); - - blg.generateLedgers(100); - if (!blg.mSelected) - { - continue; - } - - BucketEntry dead(DEADENTRY); - dead.deadEntry() = LedgerEntryKey(*blg.mSelected); - BucketEntry live(LIVEENTRY); - live.liveEntry() = *blg.mSelected; - BucketEntry init(INITENTRY); - init.liveEntry() = *blg.mSelected; - - { - VirtualClock clock; - Application::pointer appApply = - createTestApplication(clock, cfg); - REQUIRE_NOTHROW(blg.applyBuckets(appApply)); - REQUIRE(exists(*blg.mAppGenerate, *blg.mSelected)); - REQUIRE(exists(*appApply, *blg.mSelected)); - } + BucketEntry dead(DEADENTRY); + dead.deadEntry() = LedgerEntryKey(*blg.mSelected); + BucketEntry live(LIVEENTRY); + live.liveEntry() = *blg.mSelected; + BucketEntry init(INITENTRY); + init.liveEntry() = *blg.mSelected; - blg.generateLedgers(10); - REQUIRE(doesBucketListContain(blGenerate, dead)); - REQUIRE((doesBucketListContain(blGenerate, live) || - doesBucketListContain(blGenerate, init))); + { + VirtualClock clock; + Application::pointer appApply = createTestApplication(clock, cfg); + REQUIRE_NOTHROW(blg.applyBuckets(appApply)); + REQUIRE(exists(*blg.mAppGenerate, *blg.mSelected)); + REQUIRE(exists(*appApply, *blg.mSelected)); + } - blg.generateLedgers(100); - REQUIRE(!doesBucketListContain(blGenerate, dead)); - REQUIRE(!(doesBucketListContain(blGenerate, live) || - doesBucketListContain(blGenerate, init))); - REQUIRE(!exists(*blg.mAppGenerate, *blg.mSelected)); + blg.generateLedgers(10); + REQUIRE(doesBucketListContain(blGenerate, dead)); + REQUIRE((doesBucketListContain(blGenerate, live) || + doesBucketListContain(blGenerate, init))); - { - VirtualClock clock; - Application::pointer appApply = - createTestApplication(clock, cfg); - REQUIRE_NOTHROW(blg.applyBuckets(appApply)); - auto& blApply = - appApply->getBucketManager().getLiveBucketList(); - REQUIRE(!doesBucketListContain(blApply, dead)); - REQUIRE(!(doesBucketListContain(blApply, live) || - doesBucketListContain(blApply, init))); - REQUIRE(!exists(*appApply, *blg.mSelected)); - } + blg.generateLedgers(100); + REQUIRE(!doesBucketListContain(blGenerate, dead)); + REQUIRE(!(doesBucketListContain(blGenerate, live) || + doesBucketListContain(blGenerate, init))); + REQUIRE(!exists(*blg.mAppGenerate, *blg.mSelected)); - ++nTests; + { + VirtualClock clock; + Application::pointer appApply = createTestApplication(clock, cfg); + REQUIRE_NOTHROW(blg.applyBuckets(appApply)); + auto& blApply = appApply->getBucketManager().getLiveBucketList(); + REQUIRE(!doesBucketListContain(blApply, dead)); + REQUIRE(!(doesBucketListContain(blApply, live) || + doesBucketListContain(blApply, init))); + REQUIRE(!exists(*appApply, *blg.mSelected)); } + + ++nTests; } } diff --git a/src/invariant/test/ConservationOfLumensTests.cpp b/src/invariant/test/ConservationOfLumensTests.cpp index 6b91b127b0..e5686c70ef 100644 --- a/src/invariant/test/ConservationOfLumensTests.cpp +++ b/src/invariant/test/ConservationOfLumensTests.cpp @@ -153,7 +153,7 @@ TEST_CASE("Fee pool change without inflation", TEST_CASE("Account balances changed without inflation", "[invariant][conservationoflumens]") { - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); cfg.INVARIANT_CHECKS = {"ConservationOfLumens"}; uint32_t const N = 10; @@ -187,7 +187,7 @@ TEST_CASE("Account balances changed without inflation", TEST_CASE("Account balances unchanged without inflation", "[invariant][conservationoflumens]") { - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); cfg.INVARIANT_CHECKS = {"ConservationOfLumens"}; uint32_t const N = 10; @@ -228,7 +228,7 @@ TEST_CASE("Account balances unchanged without inflation", TEST_CASE("Inflation changes are consistent", "[invariant][conservationoflumens]") { - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); cfg.INVARIANT_CHECKS = {"ConservationOfLumens"}; stellar::uniform_int_distribution payoutsDist(1, 100); stellar::uniform_int_distribution amountDist(1, 100000); diff --git a/src/invariant/test/InvariantTests.cpp b/src/invariant/test/InvariantTests.cpp index 448dedf7ca..ab78a375fb 100644 --- a/src/invariant/test/InvariantTests.cpp +++ b/src/invariant/test/InvariantTests.cpp @@ -53,9 +53,10 @@ class TestInvariant : public Invariant } virtual std::string - checkOnBucketApply(std::shared_ptr bucket, - uint32_t oldestLedger, uint32_t newestLedger, - std::function filter) override + checkOnBucketApply( + std::shared_ptr bucket, uint32_t oldestLedger, + uint32_t newestLedger, + std::unordered_set const& shadowedKeys) override { return mShouldFail ? "fail" : ""; } @@ -167,10 +168,9 @@ TEST_CASE("onBucketApply fail succeed", "[invariant]") uint32_t ledger = 1; uint32_t level = 0; bool isCurr = true; - REQUIRE_THROWS_AS( - app->getInvariantManager().checkOnBucketApply( - bucket, ledger, level, isCurr, [](auto) { return true; }), - InvariantDoesNotHold); + REQUIRE_THROWS_AS(app->getInvariantManager().checkOnBucketApply( + bucket, ledger, level, isCurr, {}), + InvariantDoesNotHold); } { @@ -188,7 +188,7 @@ TEST_CASE("onBucketApply fail succeed", "[invariant]") uint32_t level = 0; bool isCurr = true; REQUIRE_NOTHROW(app->getInvariantManager().checkOnBucketApply( - bucket, ledger, level, isCurr, [](auto) { return true; })); + bucket, ledger, level, isCurr, {})); } } diff --git a/src/invariant/test/LedgerEntryIsValidTests.cpp b/src/invariant/test/LedgerEntryIsValidTests.cpp index 4d946183ee..082066e6e7 100644 --- a/src/invariant/test/LedgerEntryIsValidTests.cpp +++ b/src/invariant/test/LedgerEntryIsValidTests.cpp @@ -19,7 +19,7 @@ using namespace stellar::InvariantTestUtils; TEST_CASE("Trigger validity check for each entry type", "[invariant][ledgerentryisvalid]") { - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); cfg.INVARIANT_CHECKS = {"LedgerEntryIsValid"}; VirtualClock clock; @@ -67,7 +67,7 @@ TEST_CASE("Trigger validity check for each entry type", TEST_CASE("Modify ClaimableBalanceEntry", "[invariant][ledgerentryisvalid][claimablebalance]") { - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); cfg.INVARIANT_CHECKS = {"LedgerEntryIsValid"}; VirtualClock clock; diff --git a/src/invariant/test/LiabilitiesMatchOffersTests.cpp b/src/invariant/test/LiabilitiesMatchOffersTests.cpp index 1c95224341..c4de34c9c0 100644 --- a/src/invariant/test/LiabilitiesMatchOffersTests.cpp +++ b/src/invariant/test/LiabilitiesMatchOffersTests.cpp @@ -58,7 +58,7 @@ updateAccountWithRandomBalance(LedgerEntry le, Application& app, TEST_CASE("Create account above minimum balance", "[invariant][liabilitiesmatchoffers]") { - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); cfg.INVARIANT_CHECKS = {"LiabilitiesMatchOffers"}; for (uint32_t i = 0; i < 10; ++i) @@ -75,7 +75,7 @@ TEST_CASE("Create account above minimum balance", TEST_CASE("Create account below minimum balance", "[invariant][liabilitiesmatchoffers]") { - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); cfg.INVARIANT_CHECKS = {"LiabilitiesMatchOffers"}; for (uint32_t i = 0; i < 10; ++i) @@ -92,7 +92,7 @@ TEST_CASE("Create account below minimum balance", TEST_CASE("Create account then decrease balance below minimum", "[invariant][liabilitiesmatchoffers]") { - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); cfg.INVARIANT_CHECKS = {"LiabilitiesMatchOffers"}; for (uint32_t i = 0; i < 10; ++i) @@ -111,7 +111,7 @@ TEST_CASE("Create account then decrease balance below minimum", TEST_CASE("Account below minimum balance increases but stays below minimum", "[invariant][liabilitiesmatchoffers]") { - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); cfg.INVARIANT_CHECKS = {"LiabilitiesMatchOffers"}; for (uint32_t i = 0; i < 10; ++i) @@ -130,7 +130,7 @@ TEST_CASE("Account below minimum balance increases but stays below minimum", TEST_CASE("Account below minimum balance decreases", "[invariant][liabilitiesmatchoffers]") { - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); cfg.INVARIANT_CHECKS = {"LiabilitiesMatchOffers"}; for (uint32_t i = 0; i < 10; ++i) @@ -250,7 +250,7 @@ generateBuyingLiabilities(Application& app, LedgerEntry offer, bool excess, TEST_CASE("Create account then increase liabilities without changing balance", "[invariant][liabilitiesmatchoffers]") { - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); cfg.INVARIANT_CHECKS = {"LiabilitiesMatchOffers"}; VirtualClock clock; @@ -289,7 +289,7 @@ TEST_CASE("Create account then increase liabilities without changing balance", TEST_CASE("Invariant for liabilities", "[invariant][liabilitiesmatchoffers]") { - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); cfg.INVARIANT_CHECKS = {"LiabilitiesMatchOffers"}; VirtualClock clock; diff --git a/src/invariant/test/OrderBookIsNotCrossedTests.cpp b/src/invariant/test/OrderBookIsNotCrossedTests.cpp index c10a6a5daf..7e3b1ab2c4 100644 --- a/src/invariant/test/OrderBookIsNotCrossedTests.cpp +++ b/src/invariant/test/OrderBookIsNotCrossedTests.cpp @@ -109,7 +109,7 @@ TEST_CASE("OrderBookIsNotCrossed in-memory order book is consistent with " "[invariant][OrderBookIsNotCrossed]") { VirtualClock clock; - auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS); + auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); // When testing the order book not crossed invariant, enable it and no other // invariants (these tests do things which violate other invariants). cfg.INVARIANT_CHECKS = {}; @@ -185,7 +185,7 @@ TEST_CASE("OrderBookIsNotCrossed properly throws if order book is crossed", { VirtualClock clock; - auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS); + auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); // When testing the order book not crossed invariant, enable it and no other // invariants (these tests do things which violate other invariants). cfg.INVARIANT_CHECKS = {}; diff --git a/src/invariant/test/SponsorshipCountIsValidTests.cpp b/src/invariant/test/SponsorshipCountIsValidTests.cpp index 9f35cd5292..91d75c805b 100644 --- a/src/invariant/test/SponsorshipCountIsValidTests.cpp +++ b/src/invariant/test/SponsorshipCountIsValidTests.cpp @@ -18,7 +18,7 @@ using namespace stellar::InvariantTestUtils; TEST_CASE("sponsorship invariant", "[invariant][sponsorshipcountisvalid]") { VirtualClock clock; - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); cfg.INVARIANT_CHECKS = {"SponsorshipCountIsValid"}; auto app = createTestApplication(clock, cfg); diff --git a/src/ledger/InMemoryLedgerTxn.cpp b/src/ledger/InMemoryLedgerTxn.cpp index bcdaca07a2..4c7d47ae83 100644 --- a/src/ledger/InMemoryLedgerTxn.cpp +++ b/src/ledger/InMemoryLedgerTxn.cpp @@ -3,11 +3,10 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "ledger/InMemoryLedgerTxn.h" -#include "crypto/SecretKey.h" +#include "ledger/LedgerTxn.h" #include "ledger/LedgerTxnImpl.h" #include "transactions/TransactionUtils.h" #include "util/GlobalChecks.h" -#include "util/XDROperators.h" namespace stellar { @@ -73,8 +72,9 @@ InMemoryLedgerTxn::FilteredEntryIteratorImpl::clone() const } InMemoryLedgerTxn::InMemoryLedgerTxn(InMemoryLedgerTxnRoot& parent, - Database& db) - : LedgerTxn(parent), mDb(db) + Database& db, + AbstractLedgerTxnParent* realRoot) + : LedgerTxn(parent), mDb(db), mRealRootForOffers(realRoot) { } @@ -141,6 +141,36 @@ InMemoryLedgerTxn::updateLedgerKeyMap(EntryIterator iter) { auto const& genKey = iter.key(); updateLedgerKeyMap(genKey, iter.entryExists()); + + // In addition to maintaining in-memory map, commit offers to "real" ltx + // root to test SQL backed offers + if (mRealRootForOffers && + genKey.type() == InternalLedgerEntryType::LEDGER_ENTRY) + { + auto const& ledgerKey = genKey.ledgerKey(); + if (ledgerKey.type() == OFFER) + { + LedgerTxn ltx(*mRealRootForOffers); + if (!iter.entryExists()) + { + ltx.erase(ledgerKey); + } + else + { + auto ltxe = ltx.load(genKey); + if (!ltxe) + { + ltx.create(iter.entry()); + } + else + { + ltxe.current() = iter.entry().ledgerEntry(); + } + } + + ltx.commit(); + } + } } } @@ -332,4 +362,82 @@ InMemoryLedgerTxn::getPoolShareTrustLinesByAccountAndAsset( return res; } +void +InMemoryLedgerTxn::dropOffers(bool rebuild) +{ + if (mRealRootForOffers) + { + mRealRootForOffers->dropOffers(rebuild); + } + else + { + LedgerTxn::dropOffers(rebuild); + } +} + +uint64_t +InMemoryLedgerTxn::countObjects(LedgerEntryType let) const +{ + if (mRealRootForOffers) + { + return mRealRootForOffers->countObjects(let); + } + + return 0; +} + +uint64_t +InMemoryLedgerTxn::countObjects(LedgerEntryType let, + LedgerRange const& ledgers) const +{ + if (mRealRootForOffers) + { + return mRealRootForOffers->countObjects(let, ledgers); + } + + return 0; +} + +void +InMemoryLedgerTxn::deleteObjectsModifiedOnOrAfterLedger(uint32_t ledger) const +{ + if (mRealRootForOffers) + { + mRealRootForOffers->deleteObjectsModifiedOnOrAfterLedger(ledger); + } +} + +UnorderedMap +InMemoryLedgerTxn::getAllOffers() +{ + if (mRealRootForOffers) + { + return mRealRootForOffers->getAllOffers(); + } + + return LedgerTxn::getAllOffers(); +} + +std::shared_ptr +InMemoryLedgerTxn::getBestOffer(Asset const& buying, Asset const& selling) +{ + if (mRealRootForOffers) + { + return mRealRootForOffers->getBestOffer(buying, selling); + } + + return LedgerTxn::getBestOffer(buying, selling); +} + +std::shared_ptr +InMemoryLedgerTxn::getBestOffer(Asset const& buying, Asset const& selling, + OfferDescriptor const& worseThan) +{ + if (mRealRootForOffers) + { + return mRealRootForOffers->getBestOffer(buying, selling, worseThan); + } + + return LedgerTxn::getBestOffer(buying, selling, worseThan); +} } diff --git a/src/ledger/InMemoryLedgerTxn.h b/src/ledger/InMemoryLedgerTxn.h index 76cf56fcae..38917186cb 100644 --- a/src/ledger/InMemoryLedgerTxn.h +++ b/src/ledger/InMemoryLedgerTxn.h @@ -44,6 +44,12 @@ class InMemoryLedgerTxn : public LedgerTxn Database& mDb; std::unique_ptr mTransaction; + // For some tests, we need to bypass ledger close and commit directly to the + // in-memory ltx. However, we still want to test SQL backed offers. The + // "never" committing root sets this flag to true such that offer-related + // calls get based to the real SQL backed root + AbstractLedgerTxnParent* const mRealRootForOffers; + UnorderedMap> mOffersAndPoolShareTrustlineKeys; @@ -75,7 +81,8 @@ class InMemoryLedgerTxn : public LedgerTxn EntryIterator getFilteredEntryIterator(EntryIterator const& iter); public: - InMemoryLedgerTxn(InMemoryLedgerTxnRoot& parent, Database& db); + InMemoryLedgerTxn(InMemoryLedgerTxnRoot& parent, Database& db, + AbstractLedgerTxnParent* realRoot = nullptr); virtual ~InMemoryLedgerTxn(); void addChild(AbstractLedgerTxn& child, TransactionMode mode) override; @@ -100,6 +107,23 @@ class InMemoryLedgerTxn : public LedgerTxn UnorderedMap getPoolShareTrustLinesByAccountAndAsset(AccountID const& account, Asset const& asset) override; + + // These functions call into the real LedgerTxn root to test offer SQL + // related functionality + UnorderedMap getAllOffers() override; + std::shared_ptr + getBestOffer(Asset const& buying, Asset const& selling) override; + std::shared_ptr + getBestOffer(Asset const& buying, Asset const& selling, + OfferDescriptor const& worseThan) override; + + void dropOffers(bool rebuild) override; + + uint64_t countObjects(LedgerEntryType let) const override; + uint64_t countObjects(LedgerEntryType let, + LedgerRange const& ledgers) const override; + + void deleteObjectsModifiedOnOrAfterLedger(uint32_t ledger) const override; }; } diff --git a/src/ledger/LedgerManagerImpl.cpp b/src/ledger/LedgerManagerImpl.cpp index b562fe8c6f..6fa5019a0b 100644 --- a/src/ledger/LedgerManagerImpl.cpp +++ b/src/ledger/LedgerManagerImpl.cpp @@ -348,41 +348,35 @@ LedgerManagerImpl::loadLastKnownLedger(bool restoreBucketlist, releaseAssert(latestLedgerHeader.has_value()); - // Step 3. Restore BucketList if we're doing a full core startup - // (startServices=true), OR when using BucketListDB - if (restoreBucketlist || mApp.getConfig().isUsingBucketListDB()) + HistoryArchiveState has = getLastClosedLedgerHAS(); + auto missing = mApp.getBucketManager().checkForMissingBucketsFiles(has); + auto pubmissing = + mApp.getHistoryManager().getMissingBucketsReferencedByPublishQueue(); + missing.insert(missing.end(), pubmissing.begin(), pubmissing.end()); + if (!missing.empty()) { - HistoryArchiveState has = getLastClosedLedgerHAS(); - auto missing = mApp.getBucketManager().checkForMissingBucketsFiles(has); - auto pubmissing = mApp.getHistoryManager() - .getMissingBucketsReferencedByPublishQueue(); - missing.insert(missing.end(), pubmissing.begin(), pubmissing.end()); - if (!missing.empty()) + CLOG_ERROR(Ledger, "{} buckets are missing from bucket directory '{}'", + missing.size(), mApp.getBucketManager().getBucketDir()); + throw std::runtime_error("Bucket directory is corrupt"); + } + + if (mApp.getConfig().MODE_ENABLES_BUCKETLIST) + { + // Only restart merges in full startup mode. Many modes in core + // (standalone offline commands, in-memory setup) do not need to + // spin up expensive merge processes. + auto assumeStateWork = + mApp.getWorkScheduler().executeWork( + has, latestLedgerHeader->ledgerVersion, restoreBucketlist); + if (assumeStateWork->getState() == BasicWork::State::WORK_SUCCESS) { - CLOG_ERROR(Ledger, - "{} buckets are missing from bucket directory '{}'", - missing.size(), mApp.getBucketManager().getBucketDir()); - throw std::runtime_error("Bucket directory is corrupt"); + CLOG_INFO(Ledger, "Assumed bucket-state for LCL: {}", + ledgerAbbrev(*latestLedgerHeader)); } - - if (mApp.getConfig().MODE_ENABLES_BUCKETLIST) + else { - // Only restart merges in full startup mode. Many modes in core - // (standalone offline commands, in-memory setup) do not need to - // spin up expensive merge processes. - auto assumeStateWork = - mApp.getWorkScheduler().executeWork( - has, latestLedgerHeader->ledgerVersion, restoreBucketlist); - if (assumeStateWork->getState() == BasicWork::State::WORK_SUCCESS) - { - CLOG_INFO(Ledger, "Assumed bucket-state for LCL: {}", - ledgerAbbrev(*latestLedgerHeader)); - } - else - { - // Work should only fail during graceful shutdown - releaseAssertOrThrow(mApp.isStopping()); - } + // Work should only fail during graceful shutdown + releaseAssertOrThrow(mApp.isStopping()); } } @@ -1003,14 +997,7 @@ LedgerManagerImpl::closeLedger(LedgerCloseData const& ledgerData) // member variable: if we throw while committing below, we will at worst // emit duplicate meta, when retrying. mNextMetaToEmit = std::move(ledgerCloseMeta); - - // If the LedgerCloseData provided an expected hash, then we validated - // it above. - if (!mApp.getConfig().EXPERIMENTAL_PRECAUTION_DELAY_META || - ledgerData.getExpectedHash()) - { - emitNextMeta(); - } + emitNextMeta(); } // The next 5 steps happen in a relatively non-obvious, subtle order. diff --git a/src/ledger/LedgerStateSnapshot.cpp b/src/ledger/LedgerStateSnapshot.cpp index 3455d51131..e04101a0fc 100644 --- a/src/ledger/LedgerStateSnapshot.cpp +++ b/src/ledger/LedgerStateSnapshot.cpp @@ -222,7 +222,11 @@ LedgerSnapshot::LedgerSnapshot(AbstractLedgerTxn& ltx) LedgerSnapshot::LedgerSnapshot(Application& app) { - if (app.getConfig().DEPRECATED_SQL_LEDGER_STATE) + if (app.getConfig().DEPRECATED_SQL_LEDGER_STATE +#ifdef BUILD_TESTS + || app.getConfig().MODE_USES_IN_MEMORY_LEDGER +#endif + ) { // Legacy read-only SQL transaction mLegacyLedgerTxn = std::make_unique( diff --git a/src/ledger/LedgerTxn.cpp b/src/ledger/LedgerTxn.cpp index 14896d01e0..322c34c10c 100644 --- a/src/ledger/LedgerTxn.cpp +++ b/src/ledger/LedgerTxn.cpp @@ -3058,6 +3058,13 @@ uint32_t LedgerTxnRoot::Impl::prefetchInternal(UnorderedSet const& keys, LedgerKeyMeter* lkMeter) { +#ifdef BUILD_TESTS + if (mApp.getConfig().MODE_USES_IN_MEMORY_LEDGER) + { + return 0; + } +#endif + ZoneScoped; uint32_t total = 0; diff --git a/src/ledger/test/LedgerCloseMetaStreamTests.cpp b/src/ledger/test/LedgerCloseMetaStreamTests.cpp index 078e7d2d35..9b613555aa 100644 --- a/src/ledger/test/LedgerCloseMetaStreamTests.cpp +++ b/src/ledger/test/LedgerCloseMetaStreamTests.cpp @@ -39,11 +39,10 @@ using namespace stellar; TEST_CASE("LedgerCloseMetaStream file descriptor - LIVE_NODE", "[ledgerclosemetastreamlive]") { - // Live reqires a multinode simulation, as we're not allowed to run a + // Live requires a multinode simulation, as we're not allowed to run a // validator and record metadata streams at the same time (to avoid the // unbounded-latency stream-write step): N nodes participating in consensus, - // and two watching and streaming metadata -- the second one using - // EXPERIMENTAL_PRECAUTION_DELAY_META. + // and one watching and streaming metadata Hash expectedLastUnsafeHash, expectedLastSafeHash; TmpDirManager tdm(std::string("streamtmp-") + binToHex(randomBytes(8))); @@ -71,12 +70,7 @@ TEST_CASE("LedgerCloseMetaStream file descriptor - LIVE_NODE", SIMULATION_CREATE_NODE(Node1); // Validator SIMULATION_CREATE_NODE(Node2); // Validator SIMULATION_CREATE_NODE(Node3); // Validator - - // Watcher, !EXPERIMENTAL_PRECAUTION_DELAY_META - SIMULATION_CREATE_NODE(Node4); - - // Watcher, EXPERIMENTAL_PRECAUTION_DELAY_META - SIMULATION_CREATE_NODE(Node5); + SIMULATION_CREATE_NODE(Node4); // Watcher SCPQuorumSet qSet; qSet.threshold = 3; @@ -88,44 +82,27 @@ TEST_CASE("LedgerCloseMetaStream file descriptor - LIVE_NODE", Config const& cfg2 = getTestConfig(2); Config const& cfg3 = getTestConfig(3); Config cfg4 = getTestConfig(4); - Config cfg5 = getTestConfig( - 5, - Config:: - TESTDB_IN_MEMORY_NO_OFFERS); // needed by - // EXPERIMENTAL_PRECAUTION_DELAY_META - - // Step 2: open writable files and pass them to configs 4 and 5 - // (watchers). + + // Step 2: open writable files and pass them to watcher config cfg4.NODE_IS_VALIDATOR = false; cfg4.FORCE_SCP = false; - cfg5.NODE_IS_VALIDATOR = false; - cfg5.FORCE_SCP = false; #ifdef _WIN32 cfg4.METADATA_OUTPUT_STREAM = metaPath; - cfg5.METADATA_OUTPUT_STREAM = metaPathSafe; #else int fd = ::open(metaPath.c_str(), O_CREAT | O_WRONLY, 0644); REQUIRE(fd != -1); cfg4.METADATA_OUTPUT_STREAM = fmt::format(FMT_STRING("fd:{}"), fd); - int fdSafe = ::open(metaPathSafe.c_str(), O_CREAT | O_WRONLY, 0644); - REQUIRE(fdSafe != -1); - cfg5.METADATA_OUTPUT_STREAM = fmt::format(FMT_STRING("fd:{}"), fdSafe); #endif - cfg4.EXPERIMENTAL_PRECAUTION_DELAY_META = false; - cfg5.EXPERIMENTAL_PRECAUTION_DELAY_META = true; - // Step 3: Run simulation a few steps to stream metadata. auto app1 = simulation->addNode(vNode1SecretKey, qSet, &cfg1); auto app2 = simulation->addNode(vNode2SecretKey, qSet, &cfg2); auto app3 = simulation->addNode(vNode3SecretKey, qSet, &cfg3); auto app4 = simulation->addNode(vNode4SecretKey, qSet, &cfg4); - auto app5 = simulation->addNode(vNode5SecretKey, qSet, &cfg5); simulation->addPendingConnection(vNode1NodeID, vNode2NodeID); simulation->addPendingConnection(vNode1NodeID, vNode3NodeID); simulation->addPendingConnection(vNode1NodeID, vNode4NodeID); - simulation->addPendingConnection(vNode1NodeID, vNode5NodeID); simulation->startAllNodes(); bool watchersAreCorrupted = false; @@ -144,26 +121,21 @@ TEST_CASE("LedgerCloseMetaStream file descriptor - LIVE_NODE", auto const lastClosedLedger = app4->getLedgerManager().getLastClosedLedgerNum(); - REQUIRE(app5->getLedgerManager().getLastClosedLedgerNum() == - lastClosedLedger); if (lastClosedLedger == expectedLastWatcherLedger - 1) { - expectedLastSafeHash = app5->getLedgerManager() + expectedLastSafeHash = app4->getLedgerManager() .getLastClosedLedgerHeader() .hash; if (induceOneLedgerFork) { - for (auto& app : {app4, app5}) - { - txtest::closeLedgerOn( - *app, ledgerToCorrupt, - app->getLedgerManager() - .getLastClosedLedgerHeader() - .header.scpValue.closeTime + - 1); - } + txtest::closeLedgerOn( + *app4, ledgerToCorrupt, + app4->getLedgerManager() + .getLastClosedLedgerHeader() + .header.scpValue.closeTime + + 1); expectedLastUnsafeHash = app4->getLedgerManager() @@ -181,8 +153,6 @@ TEST_CASE("LedgerCloseMetaStream file descriptor - LIVE_NODE", REQUIRE(app4->getLedgerManager().getLastClosedLedgerNum() == expectedLastWatcherLedger); - REQUIRE(app5->getLedgerManager().getLastClosedLedgerNum() == - expectedLastWatcherLedger); if (!induceOneLedgerFork) { @@ -206,7 +176,6 @@ TEST_CASE("LedgerCloseMetaStream file descriptor - LIVE_NODE", }; auto lcms = readLcms(metaPath); - auto lcmsSafe = readLcms(metaPathSafe); // The "- 1" is because we don't stream meta for the genesis ledger. REQUIRE(lcms.size() == expectedLastWatcherLedger - 1); if (lcms.back().v() == 0) @@ -221,188 +190,6 @@ TEST_CASE("LedgerCloseMetaStream file descriptor - LIVE_NODE", { REQUIRE(false); } - - // The node with EXPERIMENTAL_PRECAUTION_DELAY_META should not have streamed - // the meta for the latest ledger (or the latest ledger before the corrupt - // one) yet. - REQUIRE(lcmsSafe.size() == lcms.size() - 1); - - if (lcmsSafe.back().v() == 0) - { - REQUIRE(lcmsSafe.back().v0().ledgerHeader.hash == expectedLastSafeHash); - } - else if (lcmsSafe.back().v() == 1) - { - REQUIRE(lcmsSafe.back().v1().ledgerHeader.hash == expectedLastSafeHash); - } - REQUIRE(lcmsSafe == - std::vector(lcms.begin(), lcms.end() - 1)); -} - -TEST_CASE("LedgerCloseMetaStream file descriptor - REPLAY_IN_MEMORY", - "[ledgerclosemetastreamreplay]") -{ - // Step 1: generate some history for replay. - using namespace stellar::historytestutils; - TmpDirHistoryConfigurator tCfg; - { - Config genCfg = getTestConfig(0, Config::TESTDB_DEFAULT); - genCfg.MANUAL_CLOSE = false; - VirtualClock genClock; - genCfg = tCfg.configure(genCfg, true); - auto genApp = createTestApplication(genClock, genCfg); - auto& genHam = genApp->getHistoryArchiveManager(); - genHam.initializeHistoryArchive(tCfg.getArchiveDirName()); - for (size_t i = 0; i < 100; ++i) - { - genClock.crank(false); - } - auto& genHm = genApp->getHistoryManager(); - while (genHm.getPublishSuccessCount() < 5) - { - genClock.crank(true); - } - while (genClock.cancelAllEvents() || - genApp->getProcessManager().getNumRunningProcesses() > 0) - { - genClock.crank(false); - } - } - - // Step 2: open a writable file descriptor. - TmpDirManager tdm(std::string("streamtmp-") + binToHex(randomBytes(8))); - TmpDir td = tdm.tmpDir("streams"); - std::string metaPath = td.getName() + "/stream.xdr"; - auto cfg1 = getTestConfig(1); -#ifdef _WIN32 - cfg1.METADATA_OUTPUT_STREAM = metaPath; -#else - int fd = ::open(metaPath.c_str(), O_CREAT | O_WRONLY, 0644); - REQUIRE(fd != -1); - cfg1.METADATA_OUTPUT_STREAM = fmt::format(FMT_STRING("fd:{}"), fd); -#endif - - bool const delayMeta = GENERATE(true, false); - - // Step 3: pass it to an application and have it catch up to the generated - // history, streaming ledgerCloseMeta to the file descriptor. - Hash hash; - { - auto cfg = tCfg.configure(cfg1, false); - cfg.NODE_IS_VALIDATOR = false; - cfg.FORCE_SCP = false; - cfg.RUN_STANDALONE = true; - cfg.setInMemoryMode(); - cfg.EXPERIMENTAL_PRECAUTION_DELAY_META = delayMeta; - SECTION("skip mode") - { - cfg.MODE_STORES_HISTORY_MISC = true; - cfg.CATCHUP_SKIP_KNOWN_RESULTS_FOR_TESTING = true; - } - VirtualClock clock; - auto app = createTestApplication(clock, cfg, /*newdb=*/false); - - CatchupConfiguration cc{CatchupConfiguration::CURRENT, - std::numeric_limits::max(), - CatchupConfiguration::Mode::OFFLINE_COMPLETE}; - Json::Value catchupInfo; - auto& ham = app->getHistoryArchiveManager(); - auto& lm = app->getLedgerManager(); - auto archive = ham.selectRandomReadableHistoryArchive(); - int res = catchup(app, cc, catchupInfo, archive); - REQUIRE(res == 0); - hash = lm.getLastClosedLedgerHeader().hash; - while (clock.cancelAllEvents() || - app->getProcessManager().getNumRunningProcesses() > 0) - { - clock.crank(false); - } - } - - // Step 4: reopen the file as an XDR stream and read back the LCMs - // and check they have the expected content. - // - // The EXPERIMENTAL_PRECAUTION_DELAY_META case should still have streamed - // the latest meta, because catchup should have validated that ledger's hash - // by validating a chain of hashes back from one obtained from consensus. - XDRInputFileStream stream; - stream.open(metaPath); - LedgerCloseMeta lcm; - size_t nLcm = 1; - while (stream && stream.readOne(lcm)) - { - ++nLcm; - } - // 5 checkpoints is ledger 0x13f - REQUIRE(nLcm == 0x13f); - if (lcm.v() == 0) - { - REQUIRE(lcm.v0().ledgerHeader.hash == hash); - } - else if (lcm.v() == 1) - { - REQUIRE(lcm.v1().ledgerHeader.hash == hash); - } - else - { - REQUIRE(false); - } -} - -TEST_CASE("EXPERIMENTAL_PRECAUTION_DELAY_META configuration", - "[ledgerclosemetastreamlive][ledgerclosemetastreamreplay]") -{ - VirtualClock clock; - Config cfg = getTestConfig(); - - SECTION("EXPERIMENTAL_PRECAUTION_DELAY_META may take either value " - "(which is ignored) without METADATA_OUTPUT_STREAM") - { - cfg.METADATA_OUTPUT_STREAM = ""; - auto const delayMeta = GENERATE(false, true); - auto const inMemory = GENERATE(false, true); - cfg.EXPERIMENTAL_PRECAUTION_DELAY_META = delayMeta; - if (inMemory) - { - cfg.setInMemoryMode(); - } - REQUIRE_NOTHROW(createTestApplication(clock, cfg)); - } - - SECTION("EXPERIMENTAL_PRECAUTION_DELAY_META together with " - "METADATA_OUTPUT_STREAM requires --in-memory") - { - TmpDirManager tdm(std::string("streamtmp-") + binToHex(randomBytes(8))); - TmpDir td = tdm.tmpDir("streams"); - std::string metaPath = td.getName() + "/stream.xdr"; - std::string metaStream; - -#ifdef _WIN32 - metaStream = metaPath; -#else - int fd = ::open(metaPath.c_str(), O_CREAT | O_WRONLY, 0644); - REQUIRE(fd != -1); - metaStream = fmt::format(FMT_STRING("fd:{}"), fd); -#endif - - cfg.METADATA_OUTPUT_STREAM = metaStream; - auto const delayMeta = GENERATE(false, true); - auto const inMemory = GENERATE(false, true); - cfg.EXPERIMENTAL_PRECAUTION_DELAY_META = delayMeta; - if (inMemory) - { - cfg.setInMemoryMode(); - } - if (delayMeta && !inMemory) - { - REQUIRE_THROWS_AS(createTestApplication(clock, cfg), - std::invalid_argument); - } - else - { - REQUIRE_NOTHROW(createTestApplication(clock, cfg)); - } - } } TEST_CASE("METADATA_DEBUG_LEDGERS works", "[metadebug]") diff --git a/src/ledger/test/LedgerTxnTests.cpp b/src/ledger/test/LedgerTxnTests.cpp index 10200eea2d..11f0a2c9fd 100644 --- a/src/ledger/test/LedgerTxnTests.cpp +++ b/src/ledger/test/LedgerTxnTests.cpp @@ -339,13 +339,18 @@ TEST_CASE("LedgerTxn round trip", "[ledgertxn]") std::bernoulli_distribution shouldCommitDist; auto generateNew = [](AbstractLedgerTxn& ltx, - UnorderedMap& entries) { + UnorderedMap& entries, + bool offerOnly) { size_t const NEW_ENTRIES = 100; UnorderedMap newBatch; while (newBatch.size() < NEW_ENTRIES) { - auto le = LedgerTestUtils::generateValidLedgerEntryWithExclusions( - {CONFIG_SETTING}); + auto le = + offerOnly + ? LedgerTestUtils::generateValidLedgerEntryOfType(OFFER) + : LedgerTestUtils::generateValidLedgerEntryWithExclusions( + {CONFIG_SETTING}); + auto key = LedgerEntryKey(le); if (entries.find(LedgerEntryKey(le)) == entries.end()) { @@ -428,7 +433,7 @@ TEST_CASE("LedgerTxn round trip", "[ledgertxn]") } }; - auto runTest = [&](AbstractLedgerTxnParent& ltxParent) { + auto runTest = [&](AbstractLedgerTxnParent& ltxParent, bool offerOnly) { UnorderedMap entries; UnorderedSet dead; size_t const NUM_BATCHES = 10; @@ -439,7 +444,7 @@ TEST_CASE("LedgerTxn round trip", "[ledgertxn]") UnorderedMap updatedEntries = entries; UnorderedSet updatedDead = dead; LedgerTxn ltx1(ltxParent); - generateNew(ltx1, updatedEntries); + generateNew(ltx1, updatedEntries, offerOnly); generateModify(ltx1, updatedEntries); generateErase(ltx1, updatedEntries, updatedDead); @@ -459,7 +464,7 @@ TEST_CASE("LedgerTxn round trip", "[ledgertxn]") auto app = createTestApplication(clock, getTestConfig(0, mode)); LedgerTxn ltx1(app->getLedgerTxnRoot()); - runTest(ltx1); + runTest(ltx1, false); } SECTION("round trip to LedgerTxnRoot") @@ -468,13 +473,9 @@ TEST_CASE("LedgerTxn round trip", "[ledgertxn]") { VirtualClock clock; // BucketListDB incompatible with direct root commits - auto app = createTestApplication( - clock, - getTestConfig(0, mode == Config::TESTDB_DEFAULT - ? Config::TESTDB_IN_MEMORY_NO_OFFERS - : mode)); + auto app = createTestApplication(clock, getTestConfig(0, mode)); - runTest(app->getLedgerTxnRoot()); + runTest(app->getLedgerTxnRoot(), true); } SECTION("with no cache") @@ -482,31 +483,23 @@ TEST_CASE("LedgerTxn round trip", "[ledgertxn]") VirtualClock clock; // BucketListDB incompatible with direct root commits - auto cfg = - getTestConfig(0, mode == Config::TESTDB_DEFAULT - ? Config::TESTDB_IN_MEMORY_NO_OFFERS - : mode); + auto cfg = getTestConfig(0, mode); cfg.ENTRY_CACHE_SIZE = 0; auto app = createTestApplication(clock, cfg); - runTest(app->getLedgerTxnRoot()); + runTest(app->getLedgerTxnRoot(), true); } } }; - SECTION("default") - { - runTestWithDbMode(Config::TESTDB_DEFAULT); - } - - SECTION("sqlite") + SECTION("bucketlist") { - runTestWithDbMode(Config::TESTDB_ON_DISK_SQLITE); + runTestWithDbMode(Config::TESTDB_BUCKET_DB_PERSISTENT); } SECTION("in-memory") { - runTestWithDbMode(Config::TESTDB_IN_MEMORY_NO_OFFERS); + runTestWithDbMode(Config::TESTDB_IN_MEMORY); } #ifdef USE_POSTGRES @@ -713,19 +706,14 @@ TEST_CASE("LedgerTxn createWithoutLoading and updateWithoutLoading", } }; - SECTION("default") + SECTION("bucketlist") { - runTest(Config::TESTDB_DEFAULT); - } - - SECTION("sqlite") - { - runTest(Config::TESTDB_ON_DISK_SQLITE); + runTest(Config::TESTDB_BUCKET_DB_PERSISTENT); } SECTION("in-memory") { - runTest(Config::TESTDB_IN_MEMORY_NO_OFFERS); + runTest(Config::TESTDB_IN_MEMORY); } #ifdef USE_POSTGRES @@ -813,19 +801,14 @@ TEST_CASE("LedgerTxn erase", "[ledgertxn]") validate(ltx3, {}); } }; - SECTION("default") + SECTION("bucketlist") { - runTest(Config::TESTDB_DEFAULT); - } - - SECTION("sqlite") - { - runTest(Config::TESTDB_ON_DISK_SQLITE); + runTest(Config::TESTDB_BUCKET_DB_PERSISTENT); } SECTION("in-memory") { - runTest(Config::TESTDB_IN_MEMORY_NO_OFFERS); + runTest(Config::TESTDB_IN_MEMORY); } #ifdef USE_POSTGRES @@ -918,19 +901,14 @@ TEST_CASE("LedgerTxn eraseWithoutLoading", "[ledgertxn]") } }; - SECTION("default") - { - runTest(Config::TESTDB_DEFAULT); - } - - SECTION("sqlite") + SECTION("bucketlist") { - runTest(Config::TESTDB_ON_DISK_SQLITE); + runTest(Config::TESTDB_BUCKET_DB_PERSISTENT); } SECTION("in-memory") { - runTest(Config::TESTDB_IN_MEMORY_NO_OFFERS); + runTest(Config::TESTDB_IN_MEMORY); } #ifdef USE_POSTGRES @@ -1035,7 +1013,7 @@ testInflationWinners( { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); testAtRoot(*app); } @@ -1044,7 +1022,7 @@ testInflationWinners( if (updates.size() > 1) { VirtualClock clock; - auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); cfg.ENTRY_CACHE_SIZE = 0; auto app = createTestApplication(clock, cfg); @@ -1055,7 +1033,7 @@ testInflationWinners( { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); testInflationWinners(app->getLedgerTxnRoot(), maxWinners, minBalance, expected, updates.cbegin(), updates.cend()); @@ -1384,19 +1362,14 @@ TEST_CASE("LedgerTxn loadHeader", "[ledgertxn]") } }; - SECTION("default") - { - runTest(Config::TESTDB_DEFAULT); - } - - SECTION("sqlite") + SECTION("bucketlist") { - runTest(Config::TESTDB_ON_DISK_SQLITE); + runTest(Config::TESTDB_BUCKET_DB_PERSISTENT); } SECTION("in-memory") { - runTest(Config::TESTDB_IN_MEMORY_NO_OFFERS); + runTest(Config::TESTDB_IN_MEMORY); } #ifdef USE_POSTGRES @@ -1494,103 +1467,16 @@ TEST_CASE_VERSIONS("LedgerTxn load", "[ledgertxn]") } }); } - - SECTION("load tests for all versions") - { - for_all_versions(*app, [&]() { - SECTION("invalid keys") - { - LedgerTxn ltx1(app->getLedgerTxnRoot()); - - auto acc = txtest::getAccount("acc"); - auto acc2 = txtest::getAccount("acc2"); - - { - auto native = txtest::makeNativeAsset(); - UNSCOPED_INFO("native asset on trustline key"); - - // Invariant not supported in BucketListDB and in-memory - // mode - if (mode != Config::TESTDB_DEFAULT && - mode != Config::TESTDB_IN_MEMORY_NO_OFFERS) - { - REQUIRE_THROWS_AS(ltx1.load(trustlineKey( - acc.getPublicKey(), native)), - NonSociRelatedException); - } - } - - { - auto usd = txtest::makeAsset(acc, "usd"); - UNSCOPED_INFO("issuer on trustline key"); - - // Invariant not supported in BucketListDB and in-memory - // mode - if (mode != Config::TESTDB_DEFAULT && - mode != Config::TESTDB_IN_MEMORY_NO_OFFERS) - { - REQUIRE_THROWS_AS(ltx1.load(trustlineKey( - acc.getPublicKey(), usd)), - NonSociRelatedException); - } - } - - { - std::string accountIDStr, issuerStr, assetCodeStr; - auto invalidAssets = testutil::getInvalidAssets(acc); - for (auto const& asset : invalidAssets) - { - auto key = trustlineKey(acc2.getPublicKey(), asset); - - // Invariant not supported in BucketListDB and - // in-memory mode - if (mode != Config::TESTDB_DEFAULT && - mode != Config::TESTDB_IN_MEMORY_NO_OFFERS) - { - REQUIRE_THROWS_AS(ltx1.load(key), - NonSociRelatedException); - } - } - } - - SECTION("load generated keys") - { - for (int i = 0; i < 1000; ++i) - { - LedgerKey lk = autocheck::generator()(5); - - try - { - ltx1.load(lk); - } - catch (NonSociRelatedException&) - { - // this is fine - } - catch (std::exception&) - { - REQUIRE(false); - } - } - } - } - }); - } }; - SECTION("default") + SECTION("bucketlist") { - runTest(Config::TESTDB_DEFAULT); - } - - SECTION("sqlite") - { - runTest(Config::TESTDB_ON_DISK_SQLITE); + runTest(Config::TESTDB_BUCKET_DB_PERSISTENT); } SECTION("in-memory") { - runTest(Config::TESTDB_IN_MEMORY_NO_OFFERS); + runTest(Config::TESTDB_IN_MEMORY); } #ifdef USE_POSTGRES @@ -1933,19 +1819,14 @@ TEST_CASE("LedgerTxn loadAllOffers", "[ledgertxn]") } }; - SECTION("default") - { - runTest(Config::TESTDB_DEFAULT); - } - - SECTION("sqlite") + SECTION("bucketlist") { - runTest(Config::TESTDB_ON_DISK_SQLITE); + runTest(Config::TESTDB_BUCKET_DB_PERSISTENT); } SECTION("in-memory") { - runTest(Config::TESTDB_IN_MEMORY_NO_OFFERS); + runTest(Config::TESTDB_IN_MEMORY); } #ifdef USE_POSTGRES @@ -2334,14 +2215,19 @@ TEST_CASE("LedgerTxn loadBestOffer", "[ledgertxn]") loadAccount(ltx2, account.accountID); } - // Note that we can't prefetch for more than 1000 offers - double expectedPrefetchHitRate = - std::min(numOffers - offerID, - static_cast(getMaxOffersToCross())) / - static_cast(accounts.size()); - REQUIRE(fabs(expectedPrefetchHitRate - - ltx2.getPrefetchHitRate()) < .000001); - REQUIRE(preLoadPrefetchHitRate < ltx2.getPrefetchHitRate()); + // Prefetch doesn't work in in-memory mode, but this is for + // testing only so we only care about accuracy + if (mode != Config::TESTDB_IN_MEMORY) + { + // Note that we can't prefetch for more than 1000 offers + double expectedPrefetchHitRate = + std::min(numOffers - offerID, + static_cast(getMaxOffersToCross())) / + static_cast(accounts.size()); + REQUIRE(fabs(expectedPrefetchHitRate - + ltx2.getPrefetchHitRate()) < .000001); + REQUIRE(preLoadPrefetchHitRate < ltx2.getPrefetchHitRate()); + } }; SECTION("prefetch for all worse remaining offers") @@ -2362,14 +2248,16 @@ TEST_CASE("LedgerTxn loadBestOffer", "[ledgertxn]") } }; - SECTION("default") + SECTION("bucketlist") { - runTest(Config::TESTDB_DEFAULT); + runTest(Config::TESTDB_BUCKET_DB_PERSISTENT); } - SECTION("sqlite") + // This mode is only used in testing, but we should still make sure it works + // for other tests that leverage it + SECTION("in-memory") { - runTest(Config::TESTDB_ON_DISK_SQLITE); + runTest(Config::TESTDB_IN_MEMORY); } #ifdef USE_POSTGRES @@ -2738,7 +2626,7 @@ TEST_CASE("LedgerTxnRoot prefetch classic entries", "[ledgertxn]") e.lastModifiedLedgerSeq = 1; entrySet.emplace(e); } - if (cfg.isUsingBucketListDB()) + if (!cfg.MODE_USES_IN_MEMORY_LEDGER) { std::vector ledgerVect{entrySet.begin(), entrySet.end()}; @@ -2789,14 +2677,9 @@ TEST_CASE("LedgerTxnRoot prefetch classic entries", "[ledgertxn]") } }; - SECTION("default") - { - runTest(getTestConfig()); - } - - SECTION("sqlite") + SECTION("bucketlist") { - runTest(getTestConfig(0, Config::TESTDB_ON_DISK_SQLITE)); + runTest(getTestConfig(Config::TESTDB_BUCKET_DB_PERSISTENT)); } #ifdef USE_POSTGRES @@ -2821,7 +2704,9 @@ TEST_CASE("Create performance benchmark", "[!hide][createbench]") { // First add some bulking entries so we're not using a // totally empty database. - entries = LedgerTestUtils::generateValidLedgerEntries(n); + entries = + LedgerTestUtils::generateValidUniqueLedgerEntriesWithTypes( + {OFFER}, n); LedgerTxn ltx(app->getLedgerTxnRoot()); for (auto e : entries) { @@ -2831,7 +2716,8 @@ TEST_CASE("Create performance benchmark", "[!hide][createbench]") } // Then do some precise timed creates. - entries = LedgerTestUtils::generateValidLedgerEntries(n); + entries = LedgerTestUtils::generateValidUniqueLedgerEntriesWithTypes( + {OFFER}, n); auto& m = app->getMetrics().NewMeter({"ledger", "create", "commit"}, "entry"); while (!entries.empty()) @@ -2858,8 +2744,8 @@ TEST_CASE("Create performance benchmark", "[!hide][createbench]") SECTION("sqlite") { - runTest(Config::TESTDB_ON_DISK_SQLITE, true); - runTest(Config::TESTDB_ON_DISK_SQLITE, false); + runTest(Config::TESTDB_BUCKET_DB_PERSISTENT, true); + runTest(Config::TESTDB_BUCKET_DB_PERSISTENT, false); } #ifdef USE_POSTGRES @@ -2885,7 +2771,9 @@ TEST_CASE("Erase performance benchmark", "[!hide][erasebench]") { // First add some bulking entries so we're not using a // totally empty database. - entries = LedgerTestUtils::generateValidLedgerEntries(n); + entries = + LedgerTestUtils::generateValidUniqueLedgerEntriesWithTypes( + {OFFER}, n); LedgerTxn ltx(app->getLedgerTxnRoot()); for (auto e : entries) { @@ -2921,8 +2809,8 @@ TEST_CASE("Erase performance benchmark", "[!hide][erasebench]") SECTION("sqlite") { - runTest(Config::TESTDB_ON_DISK_SQLITE, true); - runTest(Config::TESTDB_ON_DISK_SQLITE, false); + runTest(Config::TESTDB_BUCKET_DB_PERSISTENT, true); + runTest(Config::TESTDB_BUCKET_DB_PERSISTENT, false); } #ifdef USE_POSTGRES @@ -2941,7 +2829,6 @@ TEST_CASE("LedgerTxnRoot prefetch soroban entries", "[ledgertxn]") // Test setup. VirtualClock clock; - cfg.DEPRECATED_SQL_LEDGER_STATE = false; Application::pointer app = createTestApplication(clock, cfg); UnorderedSet keysToPrefetch; auto& root = app->getLedgerTxnRoot(); @@ -3148,219 +3035,6 @@ TEST_CASE("LedgerKeyMeter tests") REQUIRE(lkMeter.canLoad(ttlKey, std::numeric_limits::max())); } -TEST_CASE("Bulk load batch size benchmark", "[!hide][bulkbatchsizebench]") -{ - size_t floor = 1000; - size_t ceiling = 20000; - size_t bestBatchSize = 0; - double bestTime = 0xffffffff; - - auto runTest = [&](Config::TestDbMode mode) { - for (; floor <= ceiling; floor += 1000) - { - UnorderedSet keys; - VirtualClock clock; - Config cfg(getTestConfig(0, mode)); - cfg.PREFETCH_BATCH_SIZE = floor; - - auto app = createTestApplication(clock, cfg); - - auto& root = app->getLedgerTxnRoot(); - - auto entries = LedgerTestUtils::generateValidLedgerEntries(50000); - LedgerTxn ltx(root); - for (auto e : entries) - { - ltx.createWithoutLoading(e); - keys.insert(LedgerEntryKey(e)); - } - ltx.commit(); - - auto& m = app->getMetrics().NewTimer( - {"ledger", "bulk-load", std::to_string(floor) + " batch"}); - LedgerTxn ltx2(root); - { - m.TimeScope(); - root.prefetchClassic(keys); - } - ltx2.commit(); - - auto total = m.sum(); - CLOG_INFO(Ledger, "Bulk Load test batch size: {} took {}", floor, - total); - - if (total < bestTime) - { - bestBatchSize = floor; - bestTime = total; - } - } - CLOG_INFO(Ledger, "Best batch and best time per entry {} : {}", - bestBatchSize, bestTime); - }; - - SECTION("sqlite") - { - runTest(Config::TESTDB_ON_DISK_SQLITE); - } - -#ifdef USE_POSTGRES - SECTION("postgresql") - { - runTest(Config::TESTDB_POSTGRESQL); - } -#endif -} - -TEST_CASE("Signers performance benchmark", "[!hide][signersbench]") -{ - auto getTimeScope = [](Application& app, uint32_t numSigners, - std::string const& phase) { - std::string benchmarkStr = "benchmark-" + std::to_string(numSigners); - return app.getMetrics() - .NewTimer({"signers", benchmarkStr, phase}) - .TimeScope(); - }; - - auto getTimeSpent = [](Application& app, uint32_t numSigners, - std::string const& phase) { - std::string benchmarkStr = "benchmark-" + std::to_string(numSigners); - auto time = - app.getMetrics().NewTimer({"signers", benchmarkStr, phase}).sum(); - return phase + ": " + std::to_string(time) + " ms"; - }; - - auto generateEntries = [](size_t numAccounts, uint32_t numSigners) { - std::vector accounts; - accounts.reserve(numAccounts); - for (size_t i = 0; i < numAccounts; ++i) - { - LedgerEntry le; - le.data.type(ACCOUNT); - le.lastModifiedLedgerSeq = 2; - le.data.account() = LedgerTestUtils::generateValidAccountEntry(); - - auto& signers = le.data.account().signers; - if (signers.size() > numSigners) - { - signers.resize(numSigners); - } - else if (signers.size() < numSigners) - { - signers.reserve(numSigners); - std::generate_n(std::back_inserter(signers), - numSigners - signers.size(), - std::bind(autocheck::generator(), 5)); - std::sort(signers.begin(), signers.end(), - [](Signer const& lhs, Signer const& rhs) { - return lhs.key < rhs.key; - }); - } - - accounts.emplace_back(le); - } - return accounts; - }; - - auto generateKeys = [](std::vector const& accounts) { - std::vector keys; - keys.reserve(accounts.size()); - std::transform( - accounts.begin(), accounts.end(), std::back_inserter(keys), - [](LedgerEntry const& le) { return LedgerEntryKey(le); }); - return keys; - }; - - auto writeEntries = - [&getTimeScope](Application& app, uint32_t numSigners, - std::vector const& accounts) { - CLOG_WARNING(Ledger, "Creating accounts"); - LedgerTxn ltx(app.getLedgerTxnRoot()); - { - auto timer = getTimeScope(app, numSigners, "create"); - for (auto const& le : accounts) - { - ltx.create(le); - } - } - - CLOG_WARNING(Ledger, "Writing accounts"); - { - auto timer = getTimeScope(app, numSigners, "write"); - ltx.commit(); - } - }; - - auto readEntriesAndUpdateLastModified = - [&getTimeScope](Application& app, uint32_t numSigners, - std::vector const& accounts) { - CLOG_WARNING(Ledger, "Reading accounts"); - LedgerTxn ltx(app.getLedgerTxnRoot()); - { - auto timer = getTimeScope(app, numSigners, "read"); - for (auto const& key : accounts) - { - ++ltx.load(key).current().lastModifiedLedgerSeq; - } - } - - CLOG_WARNING(Ledger, "Writing accounts with unchanged signers"); - { - auto timer = getTimeScope(app, numSigners, "rewrite"); - ltx.commit(); - } - }; - - auto runTest = [&](Config::TestDbMode mode, size_t numAccounts, - uint32_t numSigners) { - VirtualClock clock; - Config cfg(getTestConfig(0, mode)); - cfg.ENTRY_CACHE_SIZE = 0; - Application::pointer app = createTestApplication(clock, cfg); - - CLOG_WARNING(Ledger, "Generating {} accounts with {} signers each", - numAccounts, numSigners); - auto accounts = generateEntries(numAccounts, numSigners); - auto keys = generateKeys(accounts); - - writeEntries(*app, numSigners, accounts); - readEntriesAndUpdateLastModified(*app, numSigners, keys); - - CLOG_WARNING(Ledger, "Done ({}, {}, {}, {})", - getTimeSpent(*app, numSigners, "create"), - getTimeSpent(*app, numSigners, "write"), - getTimeSpent(*app, numSigners, "read"), - getTimeSpent(*app, numSigners, "rewrite")); - }; - - auto runTests = [&](Config::TestDbMode mode) { - SECTION("0 signers") - { - runTest(mode, 100000, 0); - } - SECTION("10 signers") - { - runTest(mode, 100000, 10); - } - SECTION("20 signers") - { - runTest(mode, 100000, 20); - } - }; - - SECTION("sqlite") - { - runTests(Config::TESTDB_ON_DISK_SQLITE); - } - -#ifdef USE_POSTGRES - SECTION("postgresql") - { - runTests(Config::TESTDB_POSTGRESQL); - } -#endif -} - TEST_CASE("Load best offers benchmark", "[!hide][bestoffersbench]") { auto getTimeScope = [](Application& app, std::string const& phase) { @@ -3530,7 +3204,7 @@ TEST_CASE("Load best offers benchmark", "[!hide][bestoffersbench]") SECTION("sqlite") { - runTest(Config::TESTDB_ON_DISK_SQLITE, 10, 5, 25000); + runTest(Config::TESTDB_BUCKET_DB_PERSISTENT, 10, 5, 25000); } } @@ -3936,14 +3610,16 @@ TEST_CASE("LedgerTxn in memory order book", "[ledgertxn]") } }; - SECTION("default") + SECTION("bucketlist") { - runTest(Config::TESTDB_DEFAULT); + runTest(Config::TESTDB_BUCKET_DB_PERSISTENT); } - SECTION("sqlite") + // This mode is just used for testing, but we should still make sure it + // works + SECTION("in-memory") { - runTest(Config::TESTDB_ON_DISK_SQLITE); + runTest(Config::TESTDB_IN_MEMORY); } #ifdef USE_POSTGRES @@ -3954,7 +3630,7 @@ TEST_CASE("LedgerTxn in memory order book", "[ledgertxn]") #endif } -TEST_CASE_VERSIONS("LedgerTxn bulk-load offers", "[ledgertxn]") +TEST_CASE("Access deactivated entry", "[ledgertxn]") { auto runTest = [&](Config::TestDbMode mode) { VirtualClock clock; @@ -3964,47 +3640,6 @@ TEST_CASE_VERSIONS("LedgerTxn bulk-load offers", "[ledgertxn]") le1.data.type(OFFER); le1.data.offer() = LedgerTestUtils::generateValidOfferEntry(); - LedgerKey lk1 = LedgerEntryKey(le1); - auto lk2 = lk1; - lk2.offer().sellerID = - LedgerTestUtils::generateValidOfferEntry().sellerID; - - { - LedgerTxn ltx(app->getLedgerTxnRoot()); - ltx.create(le1); - ltx.commit(); - } - - for_all_versions(*app, [&]() { - app->getLedgerTxnRoot().prefetchClassic({lk1, lk2}); - LedgerTxn ltx(app->getLedgerTxnRoot()); - REQUIRE(ltx.load(lk1)); - }); - }; - - SECTION("sqlite") - { - runTest(Config::TESTDB_ON_DISK_SQLITE); - } - -#ifdef USE_POSTGRES - SECTION("postgresql") - { - runTest(Config::TESTDB_POSTGRESQL); - } -#endif -} - -TEST_CASE("Access deactivated entry", "[ledgertxn]") -{ - auto runTest = [&](Config::TestDbMode mode) { - VirtualClock clock; - auto app = createTestApplication(clock, getTestConfig(0, mode)); - - LedgerEntry le1; - le1.data.type(DATA); - le1.data.data() = LedgerTestUtils::generateValidDataEntry(); - LedgerKey lk1 = LedgerEntryKey(le1); { @@ -4120,14 +3755,14 @@ TEST_CASE("Access deactivated entry", "[ledgertxn]") } }; - SECTION("sqlite") + SECTION("bucketlist") { - runTest(Config::TESTDB_ON_DISK_SQLITE); + runTest(Config::TESTDB_BUCKET_DB_PERSISTENT); } SECTION("in-memory") { - runTest(Config::TESTDB_IN_MEMORY_NO_OFFERS); + runTest(Config::TESTDB_IN_MEMORY); } #ifdef USE_POSTGRES @@ -4183,7 +3818,7 @@ TEST_CASE("LedgerTxn generalized ledger entries", "[ledgertxn]") TEST_CASE("LedgerTxn best offers cache eviction", "[ledgertxn]") { VirtualClock clock; - auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS); + auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); auto app = createTestApplication(clock, cfg); auto buying = autocheck::generator()(UINT32_MAX); @@ -4400,7 +4035,7 @@ testPoolShareTrustLinesByAccountAndAsset( { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); for_versions_from(18, *app, [&] { testAtRoot(*app); }); } @@ -4409,7 +4044,7 @@ testPoolShareTrustLinesByAccountAndAsset( if (updates.size() > 1) { VirtualClock clock; - auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS); + auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); cfg.ENTRY_CACHE_SIZE = 0; auto app = createTestApplication(clock, cfg); @@ -4420,7 +4055,7 @@ testPoolShareTrustLinesByAccountAndAsset( { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); for_versions_from(18, *app, [&] { testPoolShareTrustLinesByAccountAndAsset( @@ -4448,7 +4083,7 @@ TEST_CASE_VERSIONS("LedgerTxn loadPoolShareTrustLinesByAccountAndAsset", { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); LedgerTxn ltx1(app->getLedgerTxnRoot()); LedgerTxn ltx2(ltx1); @@ -4461,7 +4096,7 @@ TEST_CASE_VERSIONS("LedgerTxn loadPoolShareTrustLinesByAccountAndAsset", { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); LedgerTxn ltx1(app->getLedgerTxnRoot()); ltx1.getDelta(); @@ -4532,7 +4167,7 @@ TEST_CASE_VERSIONS("LedgerTxn loadPoolShareTrustLinesByAccountAndAsset", TEST_CASE("InMemoryLedgerTxn simulate buckets", "[ledgertxn]") { VirtualClock clock; - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); auto app = createTestApplication(clock, cfg); @@ -4574,7 +4209,7 @@ TEST_CASE("InMemoryLedgerTxn simulate buckets", "[ledgertxn]") TEST_CASE("InMemoryLedgerTxn getOffersByAccountAndAsset", "[ledgertxn]") { VirtualClock clock; - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); auto app = createTestApplication(clock, cfg); @@ -4618,7 +4253,7 @@ TEST_CASE("InMemoryLedgerTxn getPoolShareTrustLinesByAccountAndAsset", "[ledgertxn]") { VirtualClock clock; - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); auto app = createTestApplication(clock, cfg); @@ -4667,7 +4302,7 @@ TEST_CASE_VERSIONS("InMemoryLedgerTxn close multiple ledgers with merges", "[ledgertxn]") { VirtualClock clock; - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); auto app = createTestApplication(clock, cfg); @@ -4691,7 +4326,7 @@ TEST_CASE_VERSIONS("InMemoryLedgerTxn close multiple ledgers with merges", TEST_CASE("InMemoryLedgerTxn filtering", "[ledgertxn]") { VirtualClock clock; - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); auto app = createTestApplication(clock, cfg); auto root = TestAccount::createRoot(*app); diff --git a/src/main/Application.h b/src/main/Application.h index 0e5bac078f..ae23517a57 100644 --- a/src/main/Application.h +++ b/src/main/Application.h @@ -327,11 +327,6 @@ class Application return ret; } - // This method is used in in-memory mode: when rebuilding state from buckets - // is not possible, this method resets the database state back to genesis - // (while preserving the overlay data). - virtual void resetDBForInMemoryMode() = 0; - virtual AppConnector& getAppConnector() = 0; protected: diff --git a/src/main/ApplicationImpl.cpp b/src/main/ApplicationImpl.cpp index 6d9c32f613..d5f8f7208b 100644 --- a/src/main/ApplicationImpl.cpp +++ b/src/main/ApplicationImpl.cpp @@ -190,105 +190,112 @@ maybeRebuildLedger(Application& app, bool applyBuckets) std::set toRebuild; auto& ps = app.getPersistentState(); auto bucketListDBEnabled = app.getConfig().isUsingBucketListDB(); - for (auto let : xdr::xdr_traits::enum_values()) + +#ifdef BUILD_TESTS + if (app.getConfig().MODE_USES_IN_MEMORY_LEDGER) { - // If BucketListDB is enabled, drop all tables except for offers - LedgerEntryType t = static_cast(let); - if (let != OFFER && bucketListDBEnabled) + // in-memory mode must always rebuild SQL table + toRebuild.emplace(OFFER); + } + else +#endif + { + for (auto let : xdr::xdr_traits::enum_values()) { - toDrop.emplace(t); - continue; - } + // If BucketListDB is enabled, drop all tables except for offers + LedgerEntryType t = static_cast(let); + if (let != OFFER && bucketListDBEnabled) + { + toDrop.emplace(t); + continue; + } - if (ps.shouldRebuildForType(t)) - { - toRebuild.emplace(t); + if (ps.shouldRebuildForType(t)) + { + toRebuild.emplace(t); + } } } - if (!app.getConfig().MODE_USES_IN_MEMORY_LEDGER) - { - app.getDatabase().clearPreparedStatementCache(); - soci::transaction tx(app.getDatabase().getSession()); + app.getDatabase().clearPreparedStatementCache(); + soci::transaction tx(app.getDatabase().getSession()); - auto loopEntries = [&](auto const& entryTypeSet, bool shouldRebuild) { - for (auto let : entryTypeSet) + auto loopEntries = [&](auto const& entryTypeSet, bool shouldRebuild) { + for (auto let : entryTypeSet) + { + switch (let) { - switch (let) - { - case ACCOUNT: - LOG_INFO(DEFAULT_LOG, "Dropping accounts"); - app.getLedgerTxnRoot().dropAccounts(shouldRebuild); - break; - case TRUSTLINE: - LOG_INFO(DEFAULT_LOG, "Dropping trustlines"); - app.getLedgerTxnRoot().dropTrustLines(shouldRebuild); - break; - case OFFER: - LOG_INFO(DEFAULT_LOG, "Dropping offers"); - app.getLedgerTxnRoot().dropOffers(shouldRebuild); - break; - case DATA: - LOG_INFO(DEFAULT_LOG, "Dropping accountdata"); - app.getLedgerTxnRoot().dropData(shouldRebuild); - break; - case CLAIMABLE_BALANCE: - LOG_INFO(DEFAULT_LOG, "Dropping claimablebalances"); - app.getLedgerTxnRoot().dropClaimableBalances(shouldRebuild); - break; - case LIQUIDITY_POOL: - LOG_INFO(DEFAULT_LOG, "Dropping liquiditypools"); - app.getLedgerTxnRoot().dropLiquidityPools(shouldRebuild); - break; - case CONTRACT_DATA: - LOG_INFO(DEFAULT_LOG, "Dropping contractdata"); - app.getLedgerTxnRoot().dropContractData(shouldRebuild); - break; - case CONTRACT_CODE: - LOG_INFO(DEFAULT_LOG, "Dropping contractcode"); - app.getLedgerTxnRoot().dropContractCode(shouldRebuild); - break; - case CONFIG_SETTING: - LOG_INFO(DEFAULT_LOG, "Dropping configsettings"); - app.getLedgerTxnRoot().dropConfigSettings(shouldRebuild); - break; - case TTL: - LOG_INFO(DEFAULT_LOG, "Dropping ttl"); - app.getLedgerTxnRoot().dropTTL(shouldRebuild); - break; - default: - abort(); - } + case ACCOUNT: + LOG_INFO(DEFAULT_LOG, "Dropping accounts"); + app.getLedgerTxnRoot().dropAccounts(shouldRebuild); + break; + case TRUSTLINE: + LOG_INFO(DEFAULT_LOG, "Dropping trustlines"); + app.getLedgerTxnRoot().dropTrustLines(shouldRebuild); + break; + case OFFER: + LOG_INFO(DEFAULT_LOG, "Dropping offers"); + app.getLedgerTxnRoot().dropOffers(shouldRebuild); + break; + case DATA: + LOG_INFO(DEFAULT_LOG, "Dropping accountdata"); + app.getLedgerTxnRoot().dropData(shouldRebuild); + break; + case CLAIMABLE_BALANCE: + LOG_INFO(DEFAULT_LOG, "Dropping claimablebalances"); + app.getLedgerTxnRoot().dropClaimableBalances(shouldRebuild); + break; + case LIQUIDITY_POOL: + LOG_INFO(DEFAULT_LOG, "Dropping liquiditypools"); + app.getLedgerTxnRoot().dropLiquidityPools(shouldRebuild); + break; + case CONTRACT_DATA: + LOG_INFO(DEFAULT_LOG, "Dropping contractdata"); + app.getLedgerTxnRoot().dropContractData(shouldRebuild); + break; + case CONTRACT_CODE: + LOG_INFO(DEFAULT_LOG, "Dropping contractcode"); + app.getLedgerTxnRoot().dropContractCode(shouldRebuild); + break; + case CONFIG_SETTING: + LOG_INFO(DEFAULT_LOG, "Dropping configsettings"); + app.getLedgerTxnRoot().dropConfigSettings(shouldRebuild); + break; + case TTL: + LOG_INFO(DEFAULT_LOG, "Dropping ttl"); + app.getLedgerTxnRoot().dropTTL(shouldRebuild); + break; + default: + abort(); } - }; + } + }; - loopEntries(toRebuild, true); - loopEntries(toDrop, false); - tx.commit(); + loopEntries(toRebuild, true); + loopEntries(toDrop, false); + tx.commit(); - // Nothing to apply, exit early - if (toRebuild.empty()) - { - return; - } + // Nothing to apply, exit early + if (toRebuild.empty()) + { + return; + } - // No transaction is needed. ApplyBucketsWork breaks the apply into many - // small chunks, each of which has its own transaction. If it fails at - // some point in the middle, then rebuildledger will not be cleared so - // this will run again on next start up. - if (applyBuckets) + // No transaction is needed. ApplyBucketsWork breaks the apply into many + // small chunks, each of which has its own transaction. If it fails at + // some point in the middle, then rebuildledger will not be cleared so + // this will run again on next start up. + if (applyBuckets) + { + LOG_INFO(DEFAULT_LOG, "Rebuilding ledger tables by applying buckets"); + auto filter = [&toRebuild](LedgerEntryType t) { + return toRebuild.find(t) != toRebuild.end(); + }; + if (!applyBucketsForLCL(app, filter)) { - LOG_INFO(DEFAULT_LOG, - "Rebuilding ledger tables by applying buckets"); - auto filter = [&toRebuild](LedgerEntryType t) { - return toRebuild.find(t) != toRebuild.end(); - }; - if (!applyBucketsForLCL(app, filter)) - { - throw std::runtime_error("Could not rebuild ledger tables"); - } - LOG_INFO(DEFAULT_LOG, "Successfully rebuilt ledger tables"); + throw std::runtime_error("Could not rebuild ledger tables"); } + LOG_INFO(DEFAULT_LOG, "Successfully rebuilt ledger tables"); } for (auto let : toRebuild) @@ -328,29 +335,29 @@ ApplicationImpl::initialize(bool createNewDB, bool forceRebuild) mStatusManager = std::make_unique(); mAppConnector = std::make_unique(*this); - if (getConfig().MODE_USES_IN_MEMORY_LEDGER) + if (mConfig.ENTRY_CACHE_SIZE < 20000) { - resetLedgerState(); + LOG_WARNING(DEFAULT_LOG, + "ENTRY_CACHE_SIZE({}) is below the recommended minimum " + "of 20000", + mConfig.ENTRY_CACHE_SIZE); } - else - { - if (mConfig.ENTRY_CACHE_SIZE < 20000) - { - LOG_WARNING(DEFAULT_LOG, - "ENTRY_CACHE_SIZE({}) is below the recommended minimum " - "of 20000", - mConfig.ENTRY_CACHE_SIZE); - } - mLedgerTxnRoot = std::make_unique( - *this, mConfig.ENTRY_CACHE_SIZE, mConfig.PREFETCH_BATCH_SIZE + mLedgerTxnRoot = std::make_unique( + *this, mConfig.ENTRY_CACHE_SIZE, mConfig.PREFETCH_BATCH_SIZE #ifdef BEST_OFFER_DEBUGGING - , - mConfig.BEST_OFFER_DEBUGGING_ENABLED + , + mConfig.BEST_OFFER_DEBUGGING_ENABLED #endif - ); + ); - BucketListIsConsistentWithDatabase::registerInvariant(*this); +#ifdef BUILD_TESTS + if (getConfig().MODE_USES_IN_MEMORY_LEDGER) + { + resetLedgerState(); } +#endif + + BucketListIsConsistentWithDatabase::registerInvariant(*this); AccountSubEntriesCountIsValid::registerInvariant(*this); ConservationOfLumens::registerInvariant(*this); @@ -383,6 +390,7 @@ ApplicationImpl::initialize(bool createNewDB, bool forceRebuild) void ApplicationImpl::resetLedgerState() { +#ifdef BUILD_TESTS if (getConfig().MODE_USES_IN_MEMORY_LEDGER) { mNeverCommittingLedgerTxn.reset(); @@ -392,9 +400,10 @@ ApplicationImpl::resetLedgerState() #endif ); mNeverCommittingLedgerTxn = std::make_unique( - *mInMemoryLedgerTxnRoot, getDatabase()); + *mInMemoryLedgerTxnRoot, getDatabase(), mLedgerTxnRoot.get()); } else +#endif { auto& lsRoot = getLedgerTxnRoot(); lsRoot.deleteObjectsModifiedOnOrAfterLedger(0); @@ -693,18 +702,6 @@ ApplicationImpl::~ApplicationImpl() LOG_INFO(DEFAULT_LOG, "Application destroyed"); } -void -ApplicationImpl::resetDBForInMemoryMode() -{ - // Load the peer information and reinitialize the DB - auto& pm = getOverlayManager().getPeerManager(); - auto peerData = pm.loadAllPeers(); - newDB(); - pm.storePeers(peerData); - - LOG_INFO(DEFAULT_LOG, "In-memory state is reset back to genesis"); -} - uint64_t ApplicationImpl::timeNow() { @@ -737,26 +734,6 @@ ApplicationImpl::validateAndLogConfig() "RUN_STANDALONE is not set"); } - // EXPERIMENTAL_PRECAUTION_DELAY_META is only meaningful when there's a - // METADATA_OUTPUT_STREAM. We only allow EXPERIMENTAL_PRECAUTION_DELAY_META - // on a captive core, without a persistent database; old-style ingestion - // which reads from the core database could do the delaying itself. - if (mConfig.METADATA_OUTPUT_STREAM != "" && - mConfig.EXPERIMENTAL_PRECAUTION_DELAY_META && !mConfig.isInMemoryMode()) - { - throw std::invalid_argument( - "Using a METADATA_OUTPUT_STREAM with " - "EXPERIMENTAL_PRECAUTION_DELAY_META set to true " - "requires --in-memory"); - } - - if (mConfig.isInMemoryMode()) - { - CLOG_WARNING( - Bucket, - "in-memory mode is enabled. This feature is deprecated! Node " - "may see performance degredation and lose sync with the network."); - } if (!mDatabase->isSqlite()) { CLOG_WARNING(Database, @@ -822,8 +799,7 @@ ApplicationImpl::validateAndLogConfig() "DEPRECATED_SQL_LEDGER_STATE set to false but " "deprecated SQL ledger state is active. To disable deprecated " "SQL ledger state, " - "MODE_ENABLES_BUCKETLIST must be set and --in-memory flag " - "must not be used."); + "MODE_ENABLES_BUCKETLIST must be set."); } } @@ -874,13 +850,6 @@ ApplicationImpl::validateAndLogConfig() } } - if (isNetworkedValidator && mConfig.isInMemoryMode()) - { - throw std::invalid_argument( - "In-memory mode is set, NODE_IS_VALIDATOR is set, " - "and RUN_STANDALONE is not set"); - } - if (getHistoryArchiveManager().publishEnabled()) { if (!mConfig.modeStoresAllHistory()) @@ -1634,8 +1603,15 @@ AbstractLedgerTxnParent& ApplicationImpl::getLedgerTxnRoot() { releaseAssert(threadIsMain()); - return mConfig.MODE_USES_IN_MEMORY_LEDGER ? *mNeverCommittingLedgerTxn - : *mLedgerTxnRoot; + +#ifdef BUILD_TESTS + if (mConfig.MODE_USES_IN_MEMORY_LEDGER) + { + return *mNeverCommittingLedgerTxn; + } +#endif + + return *mLedgerTxnRoot; } AppConnector& diff --git a/src/main/ApplicationImpl.h b/src/main/ApplicationImpl.h index a7553214f9..e1c0afd73d 100644 --- a/src/main/ApplicationImpl.h +++ b/src/main/ApplicationImpl.h @@ -135,8 +135,6 @@ class ApplicationImpl : public Application virtual AbstractLedgerTxnParent& getLedgerTxnRoot() override; - virtual void resetDBForInMemoryMode() override; - private: VirtualClock& mVirtualClock; Config mConfig; @@ -194,8 +192,10 @@ class ApplicationImpl : public Application // is held in the never-committing LedgerTxn in its entirety -- so if it // ever grows beyond RAM-size you need to use a mode with some sort of // database on secondary storage. +#ifdef BUILD_TESTS std::unique_ptr mInMemoryLedgerTxnRoot; std::unique_ptr mNeverCommittingLedgerTxn; +#endif std::unique_ptr mCommandHandler; diff --git a/src/main/ApplicationUtils.cpp b/src/main/ApplicationUtils.cpp index 26e5343ef6..bbc14086a6 100644 --- a/src/main/ApplicationUtils.cpp +++ b/src/main/ApplicationUtils.cpp @@ -114,79 +114,9 @@ minimalDbPath(Config const& cfg) return dpath; } -void -setupMinimalDBForInMemoryMode(Config const& cfg, uint32_t startAtLedger) -{ - releaseAssertOrThrow(cfg.isInMemoryMode()); - - VirtualClock clock; - Application::pointer app; - - // Look for an existing minimal database, and see if it's possible to - // restore ledger state from buckets. If it is not possible, reset the - // existing database back to genesis. If the minimal database does not - // exist, create a new one. - bool found = false; - - auto cfgToCheckDB = cfg; - cfgToCheckDB.METADATA_OUTPUT_STREAM = ""; - - if (std::filesystem::exists(minimalDbPath(cfg))) - { - app = Application::create(clock, cfgToCheckDB, /* newDB */ false); - found = true; - } - else - { - LOG_INFO(DEFAULT_LOG, "Minimal database not found, creating one..."); - app = Application::create(clock, cfgToCheckDB, /* newDB */ true); - } - - // Rebuild the state from scratch if: - // - --start-at-ledger was not provided - // - target catchup ledger is before LCL - // - target catchup ledger is too far ahead of LCL - // In all other cases, attempt restoring the ledger states via - // local bucket application - if (found) - { - LOG_INFO(DEFAULT_LOG, "Found the existing minimal database"); - - // DB state might be set to 0 if core previously exited while rebuilding - // state. In this case, we want to rebuild the DB from scratch - bool rebuildDB = - app->getLedgerManager().getLastClosedLedgerHAS().currentLedger < - LedgerManager::GENESIS_LEDGER_SEQ; - - if (!rebuildDB) - { - // Ledger state is not yet ready during this setup step - app->getLedgerManager().loadLastKnownLedger( - /* restoreBucketlist */ false, /* isLedgerStateReady */ false); - auto lcl = app->getLedgerManager().getLastClosedLedgerNum(); - LOG_INFO(DEFAULT_LOG, "Current in-memory state, got LCL: {}", lcl); - rebuildDB = - !canRebuildInMemoryLedgerFromBuckets(startAtLedger, lcl); - } - - if (rebuildDB) - { - LOG_INFO(DEFAULT_LOG, "Cannot restore the in-memory state, " - "rebuilding the state from scratch"); - app->resetDBForInMemoryMode(); - } - } -} - Application::pointer -setupApp(Config& cfg, VirtualClock& clock, uint32_t startAtLedger, - std::string const& startAtHash) +setupApp(Config& cfg, VirtualClock& clock) { - if (cfg.isInMemoryMode()) - { - setupMinimalDBForInMemoryMode(cfg, startAtLedger); - } - LOG_INFO(DEFAULT_LOG, "Starting stellar-core {}", STELLAR_CORE_VERSION); Application::pointer app; app = Application::create(clock, cfg, false); @@ -195,13 +125,14 @@ setupApp(Config& cfg, VirtualClock& clock, uint32_t startAtLedger, return nullptr; } - // With in-memory mode, ledger state is not yet ready during this setup step + // With in-memory testing mode, ledger state is not yet ready during this + // setup step app->getLedgerManager().loadLastKnownLedger( /* restoreBucketlist */ false, - /* isLedgerStateReady */ !cfg.isInMemoryMode()); + /* isLedgerStateReady */ !cfg.MODE_USES_IN_MEMORY_LEDGER); auto lcl = app->getLedgerManager().getLastClosedLedgerHeader(); - if (cfg.isInMemoryMode() && + if (cfg.MODE_USES_IN_MEMORY_LEDGER && lcl.header.ledgerSeq == LedgerManager::GENESIS_LEDGER_SEQ) { // If ledger is genesis, rebuild genesis state from buckets @@ -211,67 +142,6 @@ setupApp(Config& cfg, VirtualClock& clock, uint32_t startAtLedger, } } - bool doCatchupForInMemoryMode = - cfg.isInMemoryMode() && startAtLedger != 0 && !startAtHash.empty(); - if (doCatchupForInMemoryMode) - { - // At this point, setupApp has either confirmed that we can rebuild from - // the existing buckets, or reset the DB to genesis - if (lcl.header.ledgerSeq != LedgerManager::GENESIS_LEDGER_SEQ) - { - auto lclHashStr = binToHex(lcl.hash); - if (lcl.header.ledgerSeq == startAtLedger && - lclHashStr != startAtHash) - { - LOG_ERROR(DEFAULT_LOG, - "Provided hash {} does not agree with stored hash {}", - startAtHash, lclHashStr); - return nullptr; - } - - auto has = app->getLedgerManager().getLastClosedLedgerHAS(); - - // Collect bucket references to pass to catchup _before_ starting - // the app, which may trigger garbage collection - std::set> retained; - for (auto const& b : has.allBuckets()) - { - auto bPtr = app->getBucketManager().getBucketByHash( - hexToBin256(b)); - releaseAssert(bPtr); - retained.insert(bPtr); - } - - // Start the app with LCL set to 0 - app->getLedgerManager().setupInMemoryStateRebuild(); - app->start(); - - // Set Herder to track the actual LCL - app->getHerder().setTrackingSCPState(lcl.header.ledgerSeq, - lcl.header.scpValue, true); - - // Schedule the catchup work that will rebuild state - auto cc = CatchupConfiguration(has, lcl); - app->getLedgerManager().startCatchup(cc, /* archive */ nullptr, - retained); - } - else - { - LedgerNumHashPair pair; - pair.first = startAtLedger; - pair.second = std::optional(hexToBin256(startAtHash)); - auto mode = CatchupConfiguration::Mode::OFFLINE_BASIC; - Json::Value catchupInfo; - int res = - catchup(app, CatchupConfiguration{pair, 0, mode}, catchupInfo, - /* archive */ nullptr); - if (res != 0) - { - return nullptr; - } - } - } - return app; } diff --git a/src/main/ApplicationUtils.h b/src/main/ApplicationUtils.h index 30d2cb0fed..140626bc7f 100644 --- a/src/main/ApplicationUtils.h +++ b/src/main/ApplicationUtils.h @@ -15,9 +15,7 @@ namespace stellar class CatchupConfiguration; // Create application and validate its configuration -Application::pointer setupApp(Config& cfg, VirtualClock& clock, - uint32_t startAtLedger, - std::string const& startAtHash); +Application::pointer setupApp(Config& cfg, VirtualClock& clock); int runApp(Application::pointer app); void setForceSCPFlag(); void initializeDatabase(Config cfg); diff --git a/src/main/CommandLine.cpp b/src/main/CommandLine.cpp index 0fa277417c..5622fdac23 100644 --- a/src/main/CommandLine.cpp +++ b/src/main/CommandLine.cpp @@ -353,54 +353,6 @@ maybeSetMetadataOutputStream(Config& cfg, std::string const& stream) } } -void -maybeEnableInMemoryMode(Config& config, bool inMemory, uint32_t startAtLedger, - std::string const& startAtHash, bool persistMinimalData) -{ - // First, ensure user parameters are valid - if (!inMemory) - { - if (startAtLedger != 0) - { - throw std::runtime_error("--start-at-ledger requires --in-memory"); - } - if (!startAtHash.empty()) - { - throw std::runtime_error("--start-at-hash requires --in-memory"); - } - return; - } - if (startAtLedger != 0 && startAtHash.empty()) - { - throw std::runtime_error("--start-at-ledger requires --start-at-hash"); - } - else if (startAtLedger == 0 && !startAtHash.empty()) - { - throw std::runtime_error("--start-at-hash requires --start-at-ledger"); - } - - // Adjust configs for live in-memory-replay mode - config.setInMemoryMode(); - - if (startAtLedger != 0 && !startAtHash.empty()) - { - config.MODE_AUTO_STARTS_OVERLAY = false; - } - - // Set database to a small sqlite database used to store minimal data needed - // to restore the ledger state - if (persistMinimalData) - { - config.DATABASE = SecretValue{minimalDBForInMemoryMode(config)}; - config.MODE_STORES_HISTORY_LEDGERHEADERS = true; - // Since this mode stores historical data (needed to restore - // ledger state in certain scenarios), set maintenance to run - // aggressively so that we only store a few ledgers worth of data - config.AUTOMATIC_MAINTENANCE_PERIOD = std::chrono::seconds(30); - config.AUTOMATIC_MAINTENANCE_COUNT = MAINTENANCE_LEDGER_COUNT; - } -} - clara::Opt ledgerHashParser(std::string& ledgerHash) { @@ -419,23 +371,21 @@ clara::Opt inMemoryParser(bool& inMemory) { return clara::Opt{inMemory}["--in-memory"]( - "(DEPRECATED) store working ledger in memory rather than database"); + "(DEPRECATED) flag is ignored and will be removed soon."); } clara::Opt startAtLedgerParser(uint32_t& startAtLedger) { return clara::Opt{startAtLedger, "LEDGER"}["--start-at-ledger"]( - "(DEPRECATED) start in-memory run with replay from historical ledger " - "number"); + "(DEPRECATED) flag is ignored and will be removed soon."); } clara::Opt startAtHashParser(std::string& startAtHash) { return clara::Opt{startAtHash, "HASH"}["--start-at-hash"]( - "(DEPRECATED) start in-memory run with replay from historical ledger " - "hash"); + "(DEPRECATED) flag is ignored and will be removed soon."); } clara::Opt @@ -870,7 +820,8 @@ runCatchup(CommandLineArgs const& args) trustedCheckpointHashesParser(trustedCheckpointHashesFile), outputFileParser(outputFile), disableBucketGCParser(disableBucketGC), validationParser(completeValidation), inMemoryParser(inMemory), - ledgerHashParser(hash), forceUntrustedCatchup(forceUntrusted), + ledgerHashParser(hash), ledgerHashParser(hash), + forceUntrustedCatchup(forceUntrusted), metadataOutputStreamParser(stream)}, [&] { auto config = configOption.getConfig(); @@ -891,10 +842,6 @@ runCatchup(CommandLineArgs const& args) config.AUTOMATIC_MAINTENANCE_COUNT = MAINTENANCE_LEDGER_COUNT; } - // --start-at-ledger and --start-at-hash aren't allowed in catchup, - // so pass defaults values - maybeEnableInMemoryMode(config, inMemory, 0, "", - /* persistMinimalData */ false); maybeSetMetadataOutputStream(config, stream); VirtualClock clock(VirtualClock::REAL_TIME); @@ -1221,13 +1168,12 @@ int runNewDB(CommandLineArgs const& args) { CommandLine::ConfigOption configOption; - bool minimalForInMemoryMode = false; + [[maybe_unused]] bool minimalForInMemoryMode = false; auto minimalDBParser = [](bool& minimalForInMemoryMode) { return clara::Opt{ minimalForInMemoryMode}["--minimal-for-in-memory-mode"]( - "Reset the special database used only for in-memory mode (see " - "--in-memory flag"); + "(DEPRECATED) flag is ignored and will be removed soon."); }; return runWithHelp(args, @@ -1235,11 +1181,6 @@ runNewDB(CommandLineArgs const& args) minimalDBParser(minimalForInMemoryMode)}, [&] { auto cfg = configOption.getConfig(); - if (minimalForInMemoryMode) - { - cfg.DATABASE = - SecretValue{minimalDBForInMemoryMode(cfg)}; - } initializeDatabase(cfg); return 0; }); @@ -1535,10 +1476,10 @@ run(CommandLineArgs const& args) CommandLine::ConfigOption configOption; auto disableBucketGC = false; std::string stream; - bool inMemory = false; bool waitForConsensus = false; - uint32_t startAtLedger = 0; - std::string startAtHash; + [[maybe_unused]] bool inMemory = false; + [[maybe_unused]] uint32_t startAtLedger = 0; + [[maybe_unused]] std::string startAtHash; return runWithHelp( args, @@ -1564,14 +1505,10 @@ run(CommandLineArgs const& args) { cfg.DATABASE = SecretValue{"sqlite3://:memory:"}; cfg.MODE_STORES_HISTORY_MISC = false; - cfg.MODE_USES_IN_MEMORY_LEDGER = false; cfg.MODE_ENABLES_BUCKETLIST = false; cfg.PREFETCH_BATCH_SIZE = 0; } - maybeEnableInMemoryMode(cfg, inMemory, startAtLedger, - startAtHash, - /* persistMinimalData */ true); maybeSetMetadataOutputStream(cfg, stream); cfg.FORCE_SCP = cfg.NODE_IS_VALIDATOR ? !waitForConsensus : false; @@ -1612,10 +1549,8 @@ run(CommandLineArgs const& args) } // Second, setup the app with the final configuration. - // Note that when in in-memory mode, additional setup may be - // required (such as database reset, catchup, etc) clock = std::make_shared(clockMode); - app = setupApp(cfg, *clock, startAtLedger, startAtHash); + app = setupApp(cfg, *clock); if (!app) { LOG_ERROR(DEFAULT_LOG, diff --git a/src/main/Config.cpp b/src/main/Config.cpp index a932d9217a..c7e50a13a8 100644 --- a/src/main/Config.cpp +++ b/src/main/Config.cpp @@ -117,7 +117,6 @@ Config::Config() : NODE_SEED(SecretKey::random()) // non configurable MODE_ENABLES_BUCKETLIST = true; - MODE_USES_IN_MEMORY_LEDGER = false; MODE_STORES_HISTORY_MISC = true; MODE_STORES_HISTORY_LEDGERHEADERS = true; MODE_DOES_CATCHUP = true; @@ -157,7 +156,6 @@ Config::Config() : NODE_SEED(SecretKey::random()) MANUAL_CLOSE = false; CATCHUP_COMPLETE = false; CATCHUP_RECENT = 0; - EXPERIMENTAL_PRECAUTION_DELAY_META = false; BACKGROUND_OVERLAY_PROCESSING = true; DEPRECATED_SQL_LEDGER_STATE = false; BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT = 14; // 2^14 == 16 kb @@ -307,6 +305,7 @@ Config::Config() : NODE_SEED(SecretKey::random()) #ifdef BUILD_TESTS TEST_CASES_ENABLED = false; CATCHUP_SKIP_KNOWN_RESULTS_FOR_TESTING = false; + MODE_USES_IN_MEMORY_LEDGER = false; #endif #ifdef BEST_OFFER_DEBUGGING @@ -1057,10 +1056,6 @@ Config::processConfig(std::shared_ptr t) [&]() { DISABLE_XDR_FSYNC = readBool(item); }}, {"METADATA_OUTPUT_STREAM", [&]() { METADATA_OUTPUT_STREAM = readString(item); }}, - {"EXPERIMENTAL_PRECAUTION_DELAY_META", - [&]() { - EXPERIMENTAL_PRECAUTION_DELAY_META = readBool(item); - }}, {"EXPERIMENTAL_BACKGROUND_OVERLAY_PROCESSING", [&]() { CLOG_WARNING(Overlay, @@ -2271,29 +2266,12 @@ Config::getExpectedLedgerCloseTime() const return Herder::EXP_LEDGER_TIMESPAN_SECONDS; } -void -Config::setInMemoryMode() -{ - MODE_USES_IN_MEMORY_LEDGER = true; - DATABASE = SecretValue{"sqlite3://:memory:"}; - MODE_STORES_HISTORY_MISC = false; - MODE_STORES_HISTORY_LEDGERHEADERS = false; - MODE_ENABLES_BUCKETLIST = true; - BACKGROUND_EVICTION_SCAN = false; -} - bool Config::modeDoesCatchupWithBucketList() const { return MODE_DOES_CATCHUP && MODE_ENABLES_BUCKETLIST; } -bool -Config::isInMemoryMode() const -{ - return MODE_USES_IN_MEMORY_LEDGER; -} - bool Config::isUsingBucketListDB() const { @@ -2313,12 +2291,6 @@ Config::isPersistingBucketListDBIndexes() const return isUsingBucketListDB() && BUCKETLIST_DB_PERSIST_INDEX; } -bool -Config::isInMemoryModeWithoutMinimalDB() const -{ - return MODE_USES_IN_MEMORY_LEDGER && !MODE_STORES_HISTORY_LEDGERHEADERS; -} - bool Config::modeStoresAllHistory() const { diff --git a/src/main/Config.h b/src/main/Config.h index df88ed4184..e8ab848765 100644 --- a/src/main/Config.h +++ b/src/main/Config.h @@ -143,31 +143,24 @@ class Config : public std::enable_shared_from_this // via applying valid TXs or manually adding entries to the BucketList. // BucketList state is not preserved over restarts. If this mode can be // used, it should be. - // 2. TESTDB_IN_MEMORY_NO_OFFERS: allows arbitrary ledger state writes via - // ltx root commits, but does not test the offers table. Suitable for + // 2. TESTDB_IN_MEMORY: allows arbitrary ledger state writes via + // ltx root commits. Suitable for // tests that required writes to the ledger state that cannot be achieved // via valid TX application, such as testing invalid TX error codes or // low level op testing. - // 3. TESTDB_IN_MEMORY_OFFERS: The same as TESTDB_IN_MEMORY_NO_OFFERS, but - // tests the offers table. Suitable for testing ops that interact with - // offers. - // 4. TESTDB_ON_DISK_SQLITE: Should only be used to test SQLITE specific + // 3. TESTDB_POSTGRESQL: Should only be used to test POSTGRESQL specific // database operations. - // 5. TESTDB_POSTGRESQL: Should only be used to test POSTGRESQL specific - // database operations. - // 6. TESTDB_BUCKET_DB_PERSISTENT: Same as TESTDB_BUCKET_DB_VOLATILE, but - // persists the BucketList over restart. This mode is very slow and - // should only be used for testing restart behavior or some low level - // BucketList features. + // 4. TESTDB_BUCKET_DB_PERSISTENT: Same as TESTDB_BUCKET_DB_VOLATILE, but + // persists the BucketList and SQL DB over restart. This mode is very + // slow and should only be used for testing restart behavior or some low + // level BucketList features or for testing SQLite DB specific behavior. enum TestDbMode { TESTDB_DEFAULT, - TESTDB_IN_MEMORY_OFFERS, - TESTDB_ON_DISK_SQLITE, + TESTDB_IN_MEMORY, #ifdef USE_POSTGRES TESTDB_POSTGRESQL, #endif - TESTDB_IN_MEMORY_NO_OFFERS, TESTDB_BUCKET_DB_VOLATILE, TESTDB_BUCKET_DB_PERSISTENT, TESTDB_MODES @@ -384,12 +377,6 @@ class Config : public std::enable_shared_from_this // be set to `false` only for testing purposes. bool MODE_ENABLES_BUCKETLIST; - // A config parameter that uses a never-committing ledger. This means that - // all ledger entries will be kept in memory, and not persisted to DB - // (relevant tables won't even be created). This should not be set for - // production validators. - bool MODE_USES_IN_MEMORY_LEDGER; - // A config parameter that can be set to true (in a captive-core // configuration) to delay emitting metadata by one ledger. bool EXPERIMENTAL_PRECAUTION_DELAY_META; @@ -705,6 +692,11 @@ class Config : public std::enable_shared_from_this // doing a graceful shutdown bool TEST_CASES_ENABLED; + // A config parameter that uses a never-committing ledger. This means that + // all ledger entries will be kept in memory, and not persisted to DB. + // Should only be used for testing. + bool MODE_USES_IN_MEMORY_LEDGER; + // Set QUORUM_SET using automatic quorum set configuration based on // `validators`. void @@ -737,10 +729,7 @@ class Config : public std::enable_shared_from_this std::chrono::seconds getExpectedLedgerCloseTime() const; - void setInMemoryMode(); bool modeDoesCatchupWithBucketList() const; - bool isInMemoryMode() const; - bool isInMemoryModeWithoutMinimalDB() const; bool isUsingBucketListDB() const; bool isUsingBackgroundEviction() const; bool isPersistingBucketListDBIndexes() const; diff --git a/src/main/test/ApplicationUtilsTests.cpp b/src/main/test/ApplicationUtilsTests.cpp index b36f9bf732..a7128d4d87 100644 --- a/src/main/test/ApplicationUtilsTests.cpp +++ b/src/main/test/ApplicationUtilsTests.cpp @@ -2,12 +2,14 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 +#include "bucket/test/BucketTestUtils.h" #include "crypto/Random.h" #include "history/HistoryArchiveManager.h" #include "history/HistoryManagerImpl.h" #include "history/test/HistoryTestsUtils.h" #include "invariant/BucketListIsConsistentWithDatabase.h" #include "ledger/LedgerTxn.h" +#include "ledger/test/LedgerTestUtils.h" #include "lib/catch.hpp" #include "main/Application.h" #include "main/ApplicationUtils.h" @@ -52,45 +54,6 @@ class TemporaryFileDamager } }; -class TemporarySQLiteDBDamager : public TemporaryFileDamager -{ - Config mConfig; - static std::filesystem::path - getSQLiteDBPath(Config const& cfg) - { - auto str = cfg.DATABASE.value; - std::string prefix = "sqlite3://"; - REQUIRE(str.find(prefix) == 0); - str = str.substr(prefix.size()); - REQUIRE(!str.empty()); - std::filesystem::path path(str); - REQUIRE(std::filesystem::exists(path)); - return path; - } - - public: - TemporarySQLiteDBDamager(Config const& cfg) - : TemporaryFileDamager(getSQLiteDBPath(cfg)), mConfig(cfg) - { - } - void - damageVictim() override - { - // Damage a database by bumping the root account's last-modified. - VirtualClock clock; - auto app = createTestApplication(clock, mConfig, /*newDB=*/false); - LedgerTxn ltx(app->getLedgerTxnRoot(), - /*shouldUpdateLastModified=*/false); - { - auto rootKey = accountKey( - stellar::txtest::getRoot(app->getNetworkID()).getPublicKey()); - auto rootLe = ltx.load(rootKey); - rootLe.current().lastModifiedLedgerSeq += 1; - } - ltx.commit(); - } -}; - // Logic to check the state of the bucket list with the state of the DB static bool checkState(Application& app) @@ -108,7 +71,7 @@ checkState(Application& app) blcOk = false; } - if (app.getConfig().isUsingBucketListDB()) + if (!app.getConfig().MODE_USES_IN_MEMORY_LEDGER) { auto checkBucket = [&blcOk](auto b) { if (!b->isEmpty() && !b->isIndexed()) @@ -307,82 +270,6 @@ class SimulationHelper { mSimulation->removeNode(mTestNodeID); } - - void - runStartupTest(bool triggerCatchup, uint32_t startFromLedger, - std::string startFromHash, uint32_t lclLedgerSeq) - { - bool isInMemoryMode = startFromLedger != 0 && !startFromHash.empty(); - if (isInMemoryMode) - { - REQUIRE(canRebuildInMemoryLedgerFromBuckets(startFromLedger, - lclLedgerSeq)); - } - - uint32_t checkpointFrequency = 8; - - // Depending on how many ledgers we buffer during bucket - // apply, core might trim some and only keep checkpoint - // ledgers. In this case, after bucket application, normal - // catchup will be triggered. - uint32_t delayBuckets = triggerCatchup ? (2 * checkpointFrequency) - : (checkpointFrequency / 2); - mTestCfg.ARTIFICIALLY_DELAY_BUCKET_APPLICATION_FOR_TESTING = - std::chrono::seconds(delayBuckets); - - // Start test app - auto app = mSimulation->addNode(mTestCfg.NODE_SEED, mQuorum, &mTestCfg, - false, startFromLedger, startFromHash); - mSimulation->addPendingConnection(mMainNodeID, mTestNodeID); - REQUIRE(app); - mSimulation->startAllNodes(); - - // Ensure nodes are connected - if (!app->getConfig().MODE_AUTO_STARTS_OVERLAY) - { - app->getOverlayManager().start(); - } - - if (isInMemoryMode) - { - REQUIRE(app->getLedgerManager().getState() == - LedgerManager::LM_CATCHING_UP_STATE); - } - - auto downloaded = - app->getCatchupManager().getCatchupMetrics().mCheckpointsDownloaded; - - Upgrades::UpgradeParameters scheduledUpgrades; - scheduledUpgrades.mUpgradeTime = - VirtualClock::from_time_t(mMainNode->getLedgerManager() - .getLastClosedLedgerHeader() - .header.scpValue.closeTime); - scheduledUpgrades.mProtocolVersion = - static_cast(SOROBAN_PROTOCOL_VERSION); - mMainNode->getHerder().setUpgrades(scheduledUpgrades); - - generateLoad(false); - generateLoad(true); - - // State has been rebuilt and node is properly in sync - REQUIRE(checkState(*app)); - REQUIRE(app->getLedgerManager().getLastClosedLedgerNum() + 1 >= - getMainNodeLCL().header.ledgerSeq); - REQUIRE(app->getLedgerManager().isSynced()); - - if (triggerCatchup) - { - REQUIRE(downloaded < app->getCatchupManager() - .getCatchupMetrics() - .mCheckpointsDownloaded); - } - else - { - REQUIRE(downloaded == app->getCatchupManager() - .getCatchupMetrics() - .mCheckpointsDownloaded); - } - } }; TEST_CASE("verify checkpoints command - wait condition", "[applicationutils]") @@ -399,7 +286,7 @@ TEST_CASE("verify checkpoints command - wait condition", "[applicationutils]") qSet.validators.push_back(vNode1NodeID); Config cfg1 = getTestConfig(1); - Config cfg2 = getTestConfig(2, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config cfg2 = getTestConfig(2, Config::TESTDB_IN_MEMORY); cfg2.FORCE_SCP = false; cfg2.NODE_IS_VALIDATOR = false; cfg2.MODE_DOES_CATCHUP = false; @@ -446,8 +333,8 @@ TEST_CASE("offline self-check works", "[applicationutils][selfcheck]") // Step 2: make a new application and catch it up part-way to the // archives (but behind). auto app = catchupSimulation.createCatchupApplication( - std::numeric_limits::max(), Config::TESTDB_ON_DISK_SQLITE, - "client"); + std::numeric_limits::max(), + Config::TESTDB_BUCKET_DB_PERSISTENT, "client"); catchupSimulation.catchupOffline(app, l1); chkConfig = app->getConfig(); victimBucketPath = app->getBucketManager() @@ -488,146 +375,14 @@ TEST_CASE("offline self-check works", "[applicationutils][selfcheck]") damage.damageVictim(); REQUIRE(selfCheck(chkConfig) == 1); } - { - // Damage the SQL ledger. - TemporarySQLiteDBDamager damage(chkConfig); - damage.damageVictim(); - REQUIRE(selfCheck(chkConfig) == 1); - } } TEST_CASE("application setup", "[applicationutils]") { VirtualClock clock; - - SECTION("SQL DB mode") - { - auto cfg = getTestConfig(); - auto app = setupApp(cfg, clock, 0, ""); - REQUIRE(checkState(*app)); - } - - auto testInMemoryMode = [&](Config& cfg1, Config& cfg2) { - // Publish a few checkpoints then shut down test node - auto simulation = SimulationHelper(cfg1, cfg2); - auto [startFromLedger, startFromHash] = - simulation.publishCheckpoints(2); - auto lcl = simulation.getTestNodeLCL(); - simulation.shutdownTestNode(); - - SECTION("minimal DB setup") - { - SECTION("not found") - { - // Remove `buckets` dir completely - fs::deltree(cfg2.BUCKET_DIR_PATH); - - // Initialize new minimal DB from scratch - auto app = setupApp(cfg2, clock, 0, ""); - REQUIRE(app); - REQUIRE(checkState(*app)); - } - SECTION("found") - { - // Found existing minimal DB, reset to genesis - auto app = setupApp(cfg2, clock, 0, ""); - REQUIRE(app); - REQUIRE(checkState(*app)); - } - } - SECTION("rebuild state") - { - SECTION("from buckets") - { - auto selectedLedger = lcl.header.ledgerSeq; - auto selectedHash = binToHex(lcl.hash); - - SECTION("replay buffered ledgers") - { - simulation.runStartupTest(false, selectedLedger, - selectedHash, - lcl.header.ledgerSeq); - } - SECTION("trigger catchup") - { - simulation.runStartupTest(true, selectedLedger, - selectedHash, - lcl.header.ledgerSeq); - } - SECTION("start from future ledger") - { - // Validator publishes more checkpoints while the - // captive-core instance is shutdown - auto [selectedLedger2, selectedHash2] = - simulation.publishCheckpoints(4); - simulation.runStartupTest(true, selectedLedger2, - selectedHash2, - lcl.header.ledgerSeq); - } - } - SECTION("via catchup") - { - // startAtLedger is behind LCL, reset to genesis and catchup - REQUIRE(!canRebuildInMemoryLedgerFromBuckets( - startFromLedger, lcl.header.ledgerSeq)); - auto app = - setupApp(cfg2, clock, startFromLedger, startFromHash); - REQUIRE(app); - REQUIRE(checkState(*app)); - REQUIRE(app->getLedgerManager().getLastClosedLedgerNum() == - startFromLedger); - REQUIRE(app->getLedgerManager().getState() == - LedgerManager::LM_CATCHING_UP_STATE); - } - - SECTION("bad hash") - { - // Create mismatch between start-from ledger and hash - auto app = - setupApp(cfg2, clock, startFromLedger + 1, startFromHash); - REQUIRE(!app); - } - } - SECTION("set meta stream") - { - TmpDirManager tdm(std::string("streamtmp-") + - binToHex(randomBytes(8))); - TmpDir td = tdm.tmpDir("streams"); - std::string path = td.getName() + "/stream.xdr"; - - // Remove `buckets` dir completely to ensure multiple apps are - // initialized during setup - fs::deltree(cfg2.BUCKET_DIR_PATH); - SECTION("file path") - { - cfg2.METADATA_OUTPUT_STREAM = path; - - auto app = setupApp(cfg2, clock, 0, ""); - REQUIRE(app); - REQUIRE(checkState(*app)); - } -#ifdef _WIN32 -#else - SECTION("fd") - { - int fd = ::open(path.c_str(), O_CREAT | O_WRONLY, 0644); - REQUIRE(fd != -1); - cfg2.METADATA_OUTPUT_STREAM = fmt::format("fd:{}", fd); - - auto app = setupApp(cfg2, clock, 0, ""); - REQUIRE(app); - REQUIRE(checkState(*app)); - } -#endif - } - }; - SECTION("in memory mode") - { - Config cfg1 = getTestConfig(1); - Config cfg2 = getTestConfig(2, Config::TESTDB_IN_MEMORY_NO_OFFERS); - cfg2.DATABASE = SecretValue{minimalDBForInMemoryMode(cfg2)}; - testInMemoryMode(cfg1, cfg2); - } + auto cfg = getTestConfig(); + auto app = setupApp(cfg, clock); + REQUIRE(checkState(*app)); } TEST_CASE("application major version numbers", "[applicationutils]") diff --git a/src/main/test/ExternalQueueTests.cpp b/src/main/test/ExternalQueueTests.cpp deleted file mode 100644 index c44713ea7f..0000000000 --- a/src/main/test/ExternalQueueTests.cpp +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2014 Stellar Development Foundation and contributors. Licensed -// under the Apache License, Version 2.0. See the COPYING file at the root -// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 - -#include "lib/catch.hpp" -#include "main/Application.h" -#include "main/CommandHandler.h" -#include "main/Config.h" -#include "main/ExternalQueue.h" -#include "simulation/Simulation.h" -#include "test/TestUtils.h" -#include "test/test.h" - -using namespace stellar; - -TEST_CASE("cursors", "[externalqueue]") -{ - VirtualClock clock; - Config const& cfg = getTestConfig(0, Config::TESTDB_ON_DISK_SQLITE); - Application::pointer app = createTestApplication(clock, cfg); - - ExternalQueue ps(*app); - std::map curMap; - app->getCommandHandler().manualCmd("setcursor?id=FOO&cursor=123"); - app->getCommandHandler().manualCmd("setcursor?id=BAR&cursor=456"); - - SECTION("get non-existent cursor") - { - ps.getCursorForResource("NONEXISTENT", curMap); - REQUIRE(curMap.size() == 0); - } - - SECTION("get single cursor") - { - ps.getCursorForResource("FOO", curMap); - REQUIRE(curMap.size() == 1); - } - - SECTION("get all cursors") - { - ps.getCursorForResource("", curMap); - REQUIRE(curMap.size() == 2); - } -} diff --git a/src/overlay/test/OverlayTests.cpp b/src/overlay/test/OverlayTests.cpp index 9c04a429a1..942f0a64a9 100644 --- a/src/overlay/test/OverlayTests.cpp +++ b/src/overlay/test/OverlayTests.cpp @@ -140,8 +140,8 @@ TEST_CASE("flow control byte capacity", "[overlay][flowcontrol]") { VirtualClock clock; - auto cfg1 = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); - auto cfg2 = getTestConfig(1, Config::TESTDB_IN_MEMORY_NO_OFFERS); + auto cfg1 = getTestConfig(0, Config::TESTDB_IN_MEMORY); + auto cfg2 = getTestConfig(1, Config::TESTDB_IN_MEMORY); REQUIRE(cfg1.PEER_FLOOD_READING_CAPACITY != cfg1.PEER_FLOOD_READING_CAPACITY_BYTES); diff --git a/src/simulation/Simulation.cpp b/src/simulation/Simulation.cpp index f6b91cbf2e..356e1dc4c1 100644 --- a/src/simulation/Simulation.cpp +++ b/src/simulation/Simulation.cpp @@ -91,8 +91,7 @@ Simulation::setCurrentVirtualTime(VirtualClock::system_time_point t) Application::pointer Simulation::addNode(SecretKey nodeKey, SCPQuorumSet qSet, Config const* cfg2, - bool newDB, uint32_t startAtLedger, - std::string const& startAtHash) + bool newDB) { auto cfg = cfg2 ? std::make_shared(*cfg2) : std::make_shared(newConfig()); @@ -140,7 +139,7 @@ Simulation::addNode(SecretKey nodeKey, SCPQuorumSet qSet, Config const* cfg2, } else { - app = setupApp(*cfg, *clock, startAtLedger, startAtHash); + app = setupApp(*cfg, *clock); } mNodes.emplace(nodeKey.getPublicKey(), Node{clock, app}); diff --git a/src/simulation/Simulation.h b/src/simulation/Simulation.h index 8743af37f2..e1385f374d 100644 --- a/src/simulation/Simulation.h +++ b/src/simulation/Simulation.h @@ -50,9 +50,8 @@ class Simulation // Add new node to the simulation. This function does not start the node. // Callers are expected to call `start` or `startAllNodes` manually. Application::pointer addNode(SecretKey nodeKey, SCPQuorumSet qSet, - Config const* cfg = nullptr, bool newDB = true, - uint32_t startAtLedger = 0, - std::string const& startAtHash = ""); + Config const* cfg = nullptr, + bool newDB = true); Application::pointer getNode(NodeID nodeID); std::vector getNodes(); std::vector getNodeIDs(); diff --git a/src/test/TestUtils.cpp b/src/test/TestUtils.cpp index a4305b03ab..672c8acf22 100644 --- a/src/test/TestUtils.cpp +++ b/src/test/TestUtils.cpp @@ -185,7 +185,8 @@ genesis(int minute, int second) void upgradeSorobanNetworkConfig(std::function modifyFn, - std::shared_ptr simulation) + std::shared_ptr simulation, + bool applyUpgrade) { auto nodes = simulation->getNodes(); auto& lg = nodes[0]->getLoadGenerator(); @@ -247,13 +248,17 @@ upgradeSorobanNetworkConfig(std::function modifyFn, scheduledUpgrades.mConfigUpgradeSetKey = upgradeSetKey; app->getHerder().setUpgrades(scheduledUpgrades); } - // Wait for upgrade to be applied - simulation->crankUntil( - [&]() { - auto netCfg = app.getLedgerManager().getSorobanNetworkConfig(); - return netCfg == cfg; - }, - 2 * Herder::EXP_LEDGER_TIMESPAN_SECONDS, false); + + if (applyUpgrade) + { + // Wait for upgrade to be applied + simulation->crankUntil( + [&]() { + auto netCfg = app.getLedgerManager().getSorobanNetworkConfig(); + return netCfg == cfg; + }, + 2 * Herder::EXP_LEDGER_TIMESPAN_SECONDS, false); + } } void diff --git a/src/test/TestUtils.h b/src/test/TestUtils.h index 5bf421efbd..96a7703604 100644 --- a/src/test/TestUtils.h +++ b/src/test/TestUtils.h @@ -113,7 +113,8 @@ void setSorobanNetworkConfigForTest(SorobanNetworkConfig& cfg); void overrideSorobanNetworkConfigForTest(Application& app); void upgradeSorobanNetworkConfig(std::function modifyFn, - std::shared_ptr simulation); + std::shared_ptr simulation, + bool applyUpgrade = true); void modifySorobanNetworkConfig(Application& app, std::function modifyFn); diff --git a/src/test/test.cpp b/src/test/test.cpp index 9fbb2e7ae4..4499c26a7e 100644 --- a/src/test/test.cpp +++ b/src/test/test.cpp @@ -194,10 +194,10 @@ getTestConfig(int instanceNumber, Config::TestDbMode mode) instanceNumber += gBaseInstance; if (mode == Config::TESTDB_DEFAULT) { - // by default, tests should be run with in memory SQLITE as it's faster - // you can change this by enabling the appropriate line below - // mode = Config::TESTDB_IN_MEMORY_OFFERS; - // mode = Config::TESTDB_ON_DISK_SQLITE; + // by default, tests should be run with volatile BucketList as it's + // faster. You can change this by enabling the appropriate line below + // mode = Config::TESTDB_IN_MEMORY; + // mode = Config::TESTDB_BUCKET_DB_PERSISTENT; // mode = Config::TESTDB_POSTGRESQL; mode = Config::TESTDB_BUCKET_DB_VOLATILE; } @@ -283,11 +283,11 @@ getTestConfig(int instanceNumber, Config::TestDbMode mode) switch (mode) { case Config::TESTDB_BUCKET_DB_VOLATILE: - case Config::TESTDB_IN_MEMORY_OFFERS: + case Config::TESTDB_IN_MEMORY: dbname << "sqlite3://:memory:"; + thisConfig.BACKGROUND_EVICTION_SCAN = false; break; case Config::TESTDB_BUCKET_DB_PERSISTENT: - case Config::TESTDB_ON_DISK_SQLITE: dbname << "sqlite3://" << rootDir << "test.db"; thisConfig.DISABLE_XDR_FSYNC = false; break; @@ -295,31 +295,19 @@ getTestConfig(int instanceNumber, Config::TestDbMode mode) case Config::TESTDB_POSTGRESQL: dbname << "postgresql://dbname=test" << instanceNumber; thisConfig.DISABLE_XDR_FSYNC = false; - break; - case Config::TESTDB_IN_MEMORY_NO_OFFERS: - thisConfig.MODE_USES_IN_MEMORY_LEDGER = true; + thisConfig.BACKGROUND_EVICTION_SCAN = false; break; #endif default: abort(); } - if (mode == Config::TESTDB_BUCKET_DB_VOLATILE || - mode == Config::TESTDB_BUCKET_DB_PERSISTENT) + if (mode == Config::TESTDB_IN_MEMORY) { - thisConfig.DEPRECATED_SQL_LEDGER_STATE = false; - thisConfig.BACKGROUND_EVICTION_SCAN = true; - } - else - { - thisConfig.DEPRECATED_SQL_LEDGER_STATE = true; - thisConfig.BACKGROUND_EVICTION_SCAN = false; + thisConfig.MODE_USES_IN_MEMORY_LEDGER = true; } - if (mode != Config::TESTDB_IN_MEMORY_NO_OFFERS) - { - thisConfig.DATABASE = SecretValue{dbname.str()}; - } + thisConfig.DATABASE = SecretValue{dbname.str()}; thisConfig.REPORT_METRICS = gTestMetrics; // disable maintenance diff --git a/src/transactions/test/AllowTrustTests.cpp b/src/transactions/test/AllowTrustTests.cpp index 398bee5e28..43c25f3824 100644 --- a/src/transactions/test/AllowTrustTests.cpp +++ b/src/transactions/test/AllowTrustTests.cpp @@ -82,7 +82,7 @@ template struct TestStub TrustFlagOp flagOp = V == 0 ? TrustFlagOp::ALLOW_TRUST : TrustFlagOp::SET_TRUST_LINE_FLAGS; - auto const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + auto const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); VirtualClock clock; auto app = createTestApplication(clock, cfg); @@ -377,7 +377,7 @@ template struct TestStub TrustFlagOp flagOp = V == 0 ? TrustFlagOp::ALLOW_TRUST : TrustFlagOp::SET_TRUST_LINE_FLAGS; - auto const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + auto const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); VirtualClock clock; auto app = createTestApplication(clock, cfg); diff --git a/src/transactions/test/BumpSequenceTests.cpp b/src/transactions/test/BumpSequenceTests.cpp index f8a43d42ca..9a09b171f2 100644 --- a/src/transactions/test/BumpSequenceTests.cpp +++ b/src/transactions/test/BumpSequenceTests.cpp @@ -25,7 +25,7 @@ using namespace stellar::txtest; TEST_CASE_VERSIONS("bump sequence", "[tx][bumpsequence]") { - Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); VirtualClock clock; auto app = createTestApplication(clock, cfg); diff --git a/src/transactions/test/ChangeTrustTests.cpp b/src/transactions/test/ChangeTrustTests.cpp index 72edb46160..3c62142f2e 100644 --- a/src/transactions/test/ChangeTrustTests.cpp +++ b/src/transactions/test/ChangeTrustTests.cpp @@ -23,7 +23,7 @@ using namespace stellar::txtest; TEST_CASE_VERSIONS("change trust", "[tx][changetrust]") { - Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); VirtualClock clock; auto app = createTestApplication(clock, cfg); @@ -304,7 +304,7 @@ TEST_CASE_VERSIONS("change trust", "[tx][changetrust]") TEST_CASE_VERSIONS("change trust pool share trustline", "[tx][changetrust][liquiditypool]") { - Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); VirtualClock clock; auto app = createTestApplication(clock, cfg); diff --git a/src/transactions/test/ClaimableBalanceTests.cpp b/src/transactions/test/ClaimableBalanceTests.cpp index b3d8980bb2..546eee4392 100644 --- a/src/transactions/test/ClaimableBalanceTests.cpp +++ b/src/transactions/test/ClaimableBalanceTests.cpp @@ -298,7 +298,7 @@ validateBalancesOnCreateAndClaim(TestAccount& createAcc, TestAccount& claimAcc, TEST_CASE_VERSIONS("claimableBalance", "[tx][claimablebalance]") { - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); VirtualClock clock; auto app = createTestApplication(clock, cfg); diff --git a/src/transactions/test/ClawbackClaimableBalanceTests.cpp b/src/transactions/test/ClawbackClaimableBalanceTests.cpp index c9f9e1dcc8..b11051995a 100644 --- a/src/transactions/test/ClawbackClaimableBalanceTests.cpp +++ b/src/transactions/test/ClawbackClaimableBalanceTests.cpp @@ -19,7 +19,7 @@ using namespace stellar::txtest; TEST_CASE_VERSIONS("clawbackClaimableBalance", "[tx][clawback][claimablebalance]") { - Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); VirtualClock clock; auto app = createTestApplication(clock, cfg); diff --git a/src/transactions/test/ClawbackTests.cpp b/src/transactions/test/ClawbackTests.cpp index eee797d441..f0238c35f1 100644 --- a/src/transactions/test/ClawbackTests.cpp +++ b/src/transactions/test/ClawbackTests.cpp @@ -17,7 +17,7 @@ using namespace stellar::txtest; TEST_CASE_VERSIONS("clawback", "[tx][clawback]") { - Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); VirtualClock clock; auto app = createTestApplication(clock, cfg); diff --git a/src/transactions/test/CreateAccountTests.cpp b/src/transactions/test/CreateAccountTests.cpp index ece78844dd..66cda7978e 100644 --- a/src/transactions/test/CreateAccountTests.cpp +++ b/src/transactions/test/CreateAccountTests.cpp @@ -31,7 +31,7 @@ TEST_CASE_VERSIONS("create account", "[tx][createaccount]") { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); // set up world auto root = TestAccount::createRoot(*app); diff --git a/src/transactions/test/EndSponsoringFutureReservesTests.cpp b/src/transactions/test/EndSponsoringFutureReservesTests.cpp index f321b37b93..b163d3bfd8 100644 --- a/src/transactions/test/EndSponsoringFutureReservesTests.cpp +++ b/src/transactions/test/EndSponsoringFutureReservesTests.cpp @@ -34,7 +34,7 @@ TEST_CASE_VERSIONS("confirm and clear sponsor", "[tx][sponsorship]") { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); auto root = TestAccount::createRoot(*app); int64_t minBalance = app->getLedgerManager().getLastMinBalance(0); diff --git a/src/transactions/test/FeeBumpTransactionTests.cpp b/src/transactions/test/FeeBumpTransactionTests.cpp index 573666e942..a020e6c058 100644 --- a/src/transactions/test/FeeBumpTransactionTests.cpp +++ b/src/transactions/test/FeeBumpTransactionTests.cpp @@ -66,7 +66,7 @@ TEST_CASE_VERSIONS("fee bump transactions", "[tx][feebump]") { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); auto& lm = app->getLedgerManager(); auto fee = lm.getLastClosedLedgerHeader().header.baseFee; diff --git a/src/transactions/test/InflationTests.cpp b/src/transactions/test/InflationTests.cpp index dbf2d8feef..f5cc0697f0 100644 --- a/src/transactions/test/InflationTests.cpp +++ b/src/transactions/test/InflationTests.cpp @@ -432,7 +432,7 @@ TEST_CASE_VERSIONS("inflation total coins", "[tx][inflation]") TEST_CASE_VERSIONS("inflation", "[tx][inflation]") { - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); VirtualClock::system_time_point inflationStart; // inflation starts on 1-jul-2014 diff --git a/src/transactions/test/InvokeHostFunctionTests.cpp b/src/transactions/test/InvokeHostFunctionTests.cpp index 441c22fba1..c0c980819f 100644 --- a/src/transactions/test/InvokeHostFunctionTests.cpp +++ b/src/transactions/test/InvokeHostFunctionTests.cpp @@ -2845,7 +2845,7 @@ TEST_CASE("state archival operation errors", "[tx][soroban]") TEST_CASE("settings upgrade command line utils", "[tx][soroban][upgrades]") { VirtualClock clock; - auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); cfg.ENABLE_SOROBAN_DIAGNOSTIC_EVENTS = true; auto app = createTestApplication(clock, cfg); auto root = TestAccount::createRoot(*app); diff --git a/src/transactions/test/LiquidityPoolDepositTests.cpp b/src/transactions/test/LiquidityPoolDepositTests.cpp index 2bf6cd413a..1b8b899eaf 100644 --- a/src/transactions/test/LiquidityPoolDepositTests.cpp +++ b/src/transactions/test/LiquidityPoolDepositTests.cpp @@ -18,7 +18,7 @@ TEST_CASE_VERSIONS("liquidity pool deposit", "[tx][liquiditypool]") { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); // set up world auto const& lm = app->getLedgerManager(); diff --git a/src/transactions/test/LiquidityPoolTradeTests.cpp b/src/transactions/test/LiquidityPoolTradeTests.cpp index 9cddacf59b..12b0ab3779 100644 --- a/src/transactions/test/LiquidityPoolTradeTests.cpp +++ b/src/transactions/test/LiquidityPoolTradeTests.cpp @@ -983,7 +983,7 @@ TEST_CASE_VERSIONS("liquidity pool trade", "[tx][liquiditypool]") { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); // set up world auto minBal = [&](int32_t n) { diff --git a/src/transactions/test/LiquidityPoolWithdrawTests.cpp b/src/transactions/test/LiquidityPoolWithdrawTests.cpp index a6cb9b6c77..df3acf8b3d 100644 --- a/src/transactions/test/LiquidityPoolWithdrawTests.cpp +++ b/src/transactions/test/LiquidityPoolWithdrawTests.cpp @@ -17,7 +17,7 @@ using namespace stellar::txtest; TEST_CASE_VERSIONS("liquidity pool withdraw", "[tx][liquiditypool]") { - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); VirtualClock clock; auto app = createTestApplication(clock, cfg); diff --git a/src/transactions/test/ManageBuyOfferTests.cpp b/src/transactions/test/ManageBuyOfferTests.cpp index 49e836fc8a..e280a13b62 100644 --- a/src/transactions/test/ManageBuyOfferTests.cpp +++ b/src/transactions/test/ManageBuyOfferTests.cpp @@ -47,7 +47,7 @@ TEST_CASE_VERSIONS("manage buy offer failure modes", "[tx][offers]") { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); int64_t const txfee = app->getLedgerManager().getLastTxFee(); int64_t const minBalancePlusFees = @@ -354,7 +354,7 @@ TEST_CASE_VERSIONS("manage buy offer liabilities", "[tx][offers]") { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); auto checkLiabilities = [&](std::string const& section, int64_t buyAmount, Price const& price, int64_t expectedBuying, @@ -438,7 +438,7 @@ TEST_CASE_VERSIONS("manage buy offer exactly crosses existing offers", { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); int64_t const txfee = app->getLedgerManager().getLastTxFee(); int64_t const minBalancePlusFees = @@ -491,7 +491,7 @@ TEST_CASE_VERSIONS( { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); int64_t const txfee = app->getLedgerManager().getLastTxFee(); int64_t const minBalancePlusFees = @@ -619,7 +619,7 @@ TEST_CASE_VERSIONS( { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); int64_t const txfee = app->getLedgerManager().getLastTxFee(); int64_t const minBalancePlusFees = @@ -774,7 +774,7 @@ TEST_CASE_VERSIONS( { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); int64_t const txfee = app->getLedgerManager().getLastTxFee(); int64_t const minBalancePlusFees = @@ -927,7 +927,7 @@ TEST_CASE_VERSIONS("manage buy offer with zero liabilities", "[tx][offers]") { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); int64_t const txfee = app->getLedgerManager().getLastTxFee(); int64_t const minBalancePlusFees = @@ -983,7 +983,7 @@ TEST_CASE_VERSIONS("manage buy offer releases liabilities before modify", { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); int64_t const txfee = app->getLedgerManager().getLastTxFee(); int64_t const minBalancePlusFees = diff --git a/src/transactions/test/ManageDataTests.cpp b/src/transactions/test/ManageDataTests.cpp index d1b5dbcfe4..770ba6f2e5 100644 --- a/src/transactions/test/ManageDataTests.cpp +++ b/src/transactions/test/ManageDataTests.cpp @@ -26,7 +26,7 @@ using namespace stellar::txtest; // add too much data TEST_CASE_VERSIONS("manage data", "[tx][managedata]") { - Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); VirtualClock clock; auto app = createTestApplication(clock, cfg); diff --git a/src/transactions/test/MergeTests.cpp b/src/transactions/test/MergeTests.cpp index c183024d59..ecbd55dd0b 100644 --- a/src/transactions/test/MergeTests.cpp +++ b/src/transactions/test/MergeTests.cpp @@ -34,7 +34,7 @@ using namespace stellar::txtest; // Merge when you have outstanding data entries TEST_CASE_VERSIONS("merge", "[tx][merge]") { - Config cfg(getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS)); + Config cfg(getTestConfig(0, Config::TESTDB_IN_MEMORY)); VirtualClock clock; auto app = createTestApplication(clock, cfg); diff --git a/src/transactions/test/OfferTests.cpp b/src/transactions/test/OfferTests.cpp index 65b1257716..b63e939c25 100644 --- a/src/transactions/test/OfferTests.cpp +++ b/src/transactions/test/OfferTests.cpp @@ -36,7 +36,7 @@ using namespace stellar::txtest; TEST_CASE_VERSIONS("create offer", "[tx][offers]") { - Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS); + Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); VirtualClock clock; auto app = createTestApplication(clock, cfg); diff --git a/src/transactions/test/PathPaymentStrictSendTests.cpp b/src/transactions/test/PathPaymentStrictSendTests.cpp index 6eb6a153a5..21fb6c48f1 100644 --- a/src/transactions/test/PathPaymentStrictSendTests.cpp +++ b/src/transactions/test/PathPaymentStrictSendTests.cpp @@ -178,7 +178,7 @@ TEST_CASE_VERSIONS("pathpayment strict send", "[tx][pathpayment]") { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); auto exchanged = [&](TestMarketOffer const& o, int64_t sold, int64_t bought) { @@ -2406,7 +2406,7 @@ TEST_CASE_VERSIONS("pathpayment strict send", "[tx][pathpayment]") TEST_CASE_VERSIONS("pathpayment strict send uses all offers in a loop", "[tx][pathpayment]") { - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); VirtualClock clock; auto app = createTestApplication(clock, cfg); diff --git a/src/transactions/test/PathPaymentTests.cpp b/src/transactions/test/PathPaymentTests.cpp index a36a8b460e..71f4fb5ccf 100644 --- a/src/transactions/test/PathPaymentTests.cpp +++ b/src/transactions/test/PathPaymentTests.cpp @@ -70,7 +70,7 @@ assetPathToString(const std::deque& assets) TEST_CASE_VERSIONS("pathpayment", "[tx][pathpayment]") { - auto const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS); + auto const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); VirtualClock clock; auto app = createTestApplication(clock, cfg); diff --git a/src/transactions/test/PaymentTests.cpp b/src/transactions/test/PaymentTests.cpp index e53faded26..d7bbf0807b 100644 --- a/src/transactions/test/PaymentTests.cpp +++ b/src/transactions/test/PaymentTests.cpp @@ -38,7 +38,7 @@ using namespace stellar::txtest; // path payment with a transfer rate TEST_CASE_VERSIONS("payment", "[tx][payment]") { - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); VirtualClock clock; auto app = createTestApplication(clock, cfg); @@ -1510,7 +1510,11 @@ TEST_CASE_VERSIONS("payment", "[tx][payment]") // Since a1 has a trustline, and there is only 1 trustline, we know // that gateway has no trustlines. - REQUIRE(app->getLedgerTxnRoot().countObjects(TRUSTLINE) == 1); + LedgerSnapshot lsg(*app); + LedgerKey trustKey(TRUSTLINE); + trustKey.trustLine().accountID = gateway.getPublicKey(); + trustKey.trustLine().asset = assetToTrustLineAsset(idr); + REQUIRE(!lsg.load(trustKey)); }); } SECTION("authorize flag") @@ -1930,7 +1934,7 @@ TEST_CASE_VERSIONS("payment fees", "[tx][payment]") SECTION("fee equal to base reserve") { - auto cfg = getTestConfig(1, Config::TESTDB_IN_MEMORY_NO_OFFERS); + auto cfg = getTestConfig(1, Config::TESTDB_IN_MEMORY); cfg.TESTING_UPGRADE_DESIRED_FEE = 100000000; VirtualClock clock; @@ -2040,7 +2044,7 @@ TEST_CASE_VERSIONS("payment fees", "[tx][payment]") SECTION("fee bigger than base reserve") { - auto cfg = getTestConfig(1, Config::TESTDB_IN_MEMORY_NO_OFFERS); + auto cfg = getTestConfig(1, Config::TESTDB_IN_MEMORY); cfg.TESTING_UPGRADE_DESIRED_FEE = 200000000; VirtualClock clock; diff --git a/src/transactions/test/RevokeSponsorshipTests.cpp b/src/transactions/test/RevokeSponsorshipTests.cpp index e88154e458..01fa18cb31 100644 --- a/src/transactions/test/RevokeSponsorshipTests.cpp +++ b/src/transactions/test/RevokeSponsorshipTests.cpp @@ -40,7 +40,7 @@ TEST_CASE_VERSIONS("update sponsorship", "[tx][sponsorship]") { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); auto minBal = [&](uint32_t n) { return app->getLedgerManager().getLastMinBalance(n); diff --git a/src/transactions/test/SetOptionsTests.cpp b/src/transactions/test/SetOptionsTests.cpp index d5a5d522ec..fedb026763 100644 --- a/src/transactions/test/SetOptionsTests.cpp +++ b/src/transactions/test/SetOptionsTests.cpp @@ -36,7 +36,7 @@ using namespace stellar::txtest; // minbalance TEST_CASE_VERSIONS("set options", "[tx][setoptions]") { - Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); VirtualClock clock; auto app = createTestApplication(clock, cfg); diff --git a/src/transactions/test/SetTrustLineFlagsTests.cpp b/src/transactions/test/SetTrustLineFlagsTests.cpp index d030c62f7e..05b13ac7d4 100644 --- a/src/transactions/test/SetTrustLineFlagsTests.cpp +++ b/src/transactions/test/SetTrustLineFlagsTests.cpp @@ -105,7 +105,7 @@ getNumOffers(Application& app, TestAccount const& account, Asset const& asset) TEST_CASE_VERSIONS("set trustline flags", "[tx][settrustlineflags]") { - auto const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + auto const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); VirtualClock clock; auto app = createTestApplication(clock, cfg); @@ -380,7 +380,7 @@ TEST_CASE_VERSIONS("revoke from pool", { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); // set up world auto root = TestAccount::createRoot(*app); diff --git a/src/transactions/test/TxEnvelopeTests.cpp b/src/transactions/test/TxEnvelopeTests.cpp index b60f3038ea..8f06cfc64c 100644 --- a/src/transactions/test/TxEnvelopeTests.cpp +++ b/src/transactions/test/TxEnvelopeTests.cpp @@ -86,7 +86,7 @@ TEST_CASE("txset - correct apply order", "[tx][envelope]") TEST_CASE_VERSIONS("txenvelope", "[tx][envelope]") { - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); VirtualClock clock; auto app = createTestApplication(clock, cfg); From 20f5feaede1228b0fdb47df02513a380b4d5344c Mon Sep 17 00:00:00 2001 From: Garand Tyson Date: Sun, 1 Dec 2024 11:13:04 -0800 Subject: [PATCH 04/17] in-memory mode rewritten for testing only --- docs/integration.md | 4 +- src/bucket/BucketApplicator.cpp | 2 + src/bucket/test/BucketTests.cpp | 76 ------------- src/herder/test/HerderTests.cpp | 106 +----------------- src/ledger/LedgerManager.h | 7 +- src/ledger/LedgerManagerImpl.cpp | 80 ++++++------- src/ledger/LedgerManagerImpl.h | 3 +- src/ledger/{ => test}/InMemoryLedgerTxn.cpp | 75 +++++-------- src/ledger/{ => test}/InMemoryLedgerTxn.h | 45 ++++++-- .../{ => test}/InMemoryLedgerTxnRoot.cpp | 2 +- src/ledger/{ => test}/InMemoryLedgerTxnRoot.h | 0 src/main/ApplicationImpl.cpp | 12 +- src/main/ApplicationImpl.h | 9 +- src/main/ApplicationUtils.cpp | 29 +---- src/main/Config.cpp | 7 +- src/main/Config.h | 4 +- src/main/test/ApplicationUtilsTests.cpp | 62 ---------- src/main/test/ExternalQueueTests.cpp | 46 ++++++++ src/simulation/Simulation.cpp | 17 +-- src/simulation/Simulation.h | 14 +++ src/test/TestUtils.cpp | 34 +++--- src/test/TestUtils.h | 5 + 22 files changed, 222 insertions(+), 417 deletions(-) rename src/ledger/{ => test}/InMemoryLedgerTxn.cpp (88%) rename src/ledger/{ => test}/InMemoryLedgerTxn.h (69%) rename src/ledger/{ => test}/InMemoryLedgerTxnRoot.cpp (98%) rename src/ledger/{ => test}/InMemoryLedgerTxnRoot.h (100%) create mode 100644 src/main/test/ExternalQueueTests.cpp diff --git a/docs/integration.md b/docs/integration.md index deb6147fc3..656479f4b3 100644 --- a/docs/integration.md +++ b/docs/integration.md @@ -17,9 +17,9 @@ stellar-core generates several types of data that can be used by applications, d ## Ledger State -Full [Ledger](ledger.md) snapshots are available in both: +Full [Ledger](ledger.md) snapshots are available via both: * [history archives](history.md) (checkpoints, every 64 ledgers, updated every 5 minutes) -* in the case of captive-core, the ledger is maintained within the stellar-core process and ledger-state need to be tracked as it changes via "meta" updates. +* a stellar-core instance, where the ledger is maintained within the stellar-core process and ledger-state need to be tracked as it changes via "meta" updates. ## Ledger State transition information (transactions, etc) diff --git a/src/bucket/BucketApplicator.cpp b/src/bucket/BucketApplicator.cpp index f9001d3113..4bcbf213ea 100644 --- a/src/bucket/BucketApplicator.cpp +++ b/src/bucket/BucketApplicator.cpp @@ -110,11 +110,13 @@ BucketApplicator::advance(BucketApplicator::Counters& counters) // directly instead of creating a temporary inner LedgerTxn // as "advance" commits changes during each step this does not introduce any // new failure mode +#ifdef BUILD_TESTS if (mApp.getConfig().MODE_USES_IN_MEMORY_LEDGER) { ltx = static_cast(&root); } else +#endif { innerLtx = std::make_unique(root, false); ltx = innerLtx.get(); diff --git a/src/bucket/test/BucketTests.cpp b/src/bucket/test/BucketTests.cpp index 30d82ff71c..6277f96f83 100644 --- a/src/bucket/test/BucketTests.cpp +++ b/src/bucket/test/BucketTests.cpp @@ -1011,79 +1011,3 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", } }); } - -TEST_CASE_VERSIONS("legacy bucket apply", "[bucket]") -{ - VirtualClock clock; - Config cfg(getTestConfig(0, Config::TESTDB_IN_MEMORY)); - for_versions_with_differing_bucket_logic(cfg, [&](Config const& cfg) { - Application::pointer app = createTestApplication(clock, cfg); - - std::vector live(10), noLive; - std::vector dead, noDead; - - for (auto& e : live) - { - e.data.type(ACCOUNT); - auto& a = e.data.account(); - a = LedgerTestUtils::generateValidAccountEntry(5); - a.balance = 1000000000; - dead.emplace_back(LedgerEntryKey(e)); - } - - std::shared_ptr birth = LiveBucket::fresh( - app->getBucketManager(), getAppLedgerVersion(app), {}, live, noDead, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); - - std::shared_ptr death = LiveBucket::fresh( - app->getBucketManager(), getAppLedgerVersion(app), {}, noLive, dead, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); - - CLOG_INFO(Bucket, "Applying bucket with {} live entries", live.size()); - birth->apply(*app); - { - auto count = app->getLedgerTxnRoot().countObjects(ACCOUNT); - REQUIRE(count == live.size() + 1 /* root account */); - } - - CLOG_INFO(Bucket, "Applying bucket with {} dead entries", dead.size()); - death->apply(*app); - { - auto count = app->getLedgerTxnRoot().countObjects(ACCOUNT); - REQUIRE(count == 1 /* root account */); - } - }); -} - -TEST_CASE("bucket apply bench", "[bucketbench][!hide]") -{ - auto runtest = [](Config::TestDbMode mode) { - VirtualClock clock; - Config cfg(getTestConfig(0, mode)); - Application::pointer app = createTestApplication(clock, cfg); - - std::vector live(100000); - std::vector noDead; - - for (auto& l : live) - { - l.data.type(ACCOUNT); - auto& a = l.data.account(); - a = LedgerTestUtils::generateValidAccountEntry(5); - } - - std::shared_ptr birth = LiveBucket::fresh( - app->getBucketManager(), getAppLedgerVersion(app), {}, live, noDead, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); - - CLOG_INFO(Bucket, "Applying bucket with {} live entries", live.size()); - // note: we do not wrap the `apply` call inside a transaction - // as bucket applicator commits to the database incrementally - birth->apply(*app); - }; - - runtest(Config::TESTDB_BUCKET_DB_PERSISTENT); -} diff --git a/src/herder/test/HerderTests.cpp b/src/herder/test/HerderTests.cpp index af32283d65..fc145aa906 100644 --- a/src/herder/test/HerderTests.cpp +++ b/src/herder/test/HerderTests.cpp @@ -3247,106 +3247,6 @@ TEST_CASE("soroban txs each parameter surge priced", "[soroban][herder]") } } -TEST_CASE("accept soroban txs after network upgrade", "[soroban][herder]") -{ - auto networkID = sha256(getTestConfig().NETWORK_PASSPHRASE); - - auto simulation = - Topologies::core(4, 1, Simulation::OVER_LOOPBACK, networkID, [](int i) { - auto cfg = getTestConfig(i, Config::TESTDB_IN_MEMORY); - cfg.TESTING_UPGRADE_MAX_TX_SET_SIZE = 100; - cfg.TESTING_UPGRADE_LEDGER_PROTOCOL_VERSION = - static_cast(SOROBAN_PROTOCOL_VERSION) - 1; - return cfg; - }); - - simulation->startAllNodes(); - auto nodes = simulation->getNodes(); - uint32_t numAccounts = 100; - auto& loadGen = nodes[0]->getLoadGenerator(); - - // Generate some accounts - auto& loadGenDone = - nodes[0]->getMetrics().NewMeter({"loadgen", "run", "complete"}, "run"); - auto currLoadGenCount = loadGenDone.count(); - loadGen.generateLoad( - GeneratedLoadConfig::createAccountsLoad(numAccounts, 1)); - simulation->crankUntil( - [&]() { return loadGenDone.count() > currLoadGenCount; }, - 10 * Herder::EXP_LEDGER_TIMESPAN_SECONDS, false); - - // Ensure more transactions get in the ledger post upgrade - ConfigUpgradeSetFrameConstPtr res; - Upgrades::UpgradeParameters scheduledUpgrades; - scheduledUpgrades.mUpgradeTime = - VirtualClock::from_time_t(nodes[0] - ->getLedgerManager() - .getLastClosedLedgerHeader() - .header.scpValue.closeTime + - 15); - scheduledUpgrades.mProtocolVersion = - static_cast(SOROBAN_PROTOCOL_VERSION); - for (auto const& app : nodes) - { - app->getHerder().setUpgrades(scheduledUpgrades); - } - - auto& secondLoadGen = nodes[1]->getLoadGenerator(); - auto& secondLoadGenDone = - nodes[1]->getMetrics().NewMeter({"loadgen", "run", "complete"}, "run"); - currLoadGenCount = loadGenDone.count(); - auto secondLoadGenCount = secondLoadGenDone.count(); - - // Generate classic txs from another node (with offset to prevent - // overlapping accounts) - secondLoadGen.generateLoad(GeneratedLoadConfig::txLoad(LoadGenMode::PAY, 50, - /* nTxs */ 100, 2, - /* offset */ 50)); - - // Crank a bit and verify that upgrade went through - simulation->crankForAtLeast(4 * Herder::EXP_LEDGER_TIMESPAN_SECONDS, false); - REQUIRE(nodes[0] - ->getLedgerManager() - .getLastClosedLedgerHeader() - .header.ledgerVersion == - static_cast(SOROBAN_PROTOCOL_VERSION)); - for (auto node : nodes) - { - overrideSorobanNetworkConfigForTest(*node); - } - // Now generate Soroban txs - auto sorobanConfig = - GeneratedLoadConfig::txLoad(LoadGenMode::SOROBAN_UPLOAD, 50, - /* nTxs */ 15, 1, /* offset */ 0); - sorobanConfig.skipLowFeeTxs = true; - loadGen.generateLoad(sorobanConfig); - auto& loadGenFailed = - nodes[0]->getMetrics().NewMeter({"loadgen", "run", "failed"}, "run"); - auto& secondLoadGenFailed = - nodes[1]->getMetrics().NewMeter({"loadgen", "run", "failed"}, "run"); - - simulation->crankUntil( - [&]() { - return loadGenDone.count() > currLoadGenCount && - secondLoadGenDone.count() > secondLoadGenCount; - }, - 200 * Herder::EXP_LEDGER_TIMESPAN_SECONDS, false); - REQUIRE(loadGenFailed.count() == 0); - REQUIRE(secondLoadGenFailed.count() == 0); - - // Ensure some Soroban txs got into the ledger - auto totalSoroban = - nodes[0] - ->getMetrics() - .NewMeter({"soroban", "host-fn-op", "success"}, "call") - .count() + - nodes[0] - ->getMetrics() - .NewMeter({"soroban", "host-fn-op", "failure"}, "call") - .count(); - REQUIRE(totalSoroban > 0); -} - TEST_CASE("overlay parallel processing") { auto networkID = sha256(getTestConfig().NETWORK_PASSPHRASE); @@ -3679,6 +3579,11 @@ herderExternalizesValuesWithProtocol(uint32_t version) Herder::State::HERDER_BOOTING_STATE); simulation->startAllNodes(); + upgradeSorobanNetworkConfig( + [&](SorobanNetworkConfig& cfg) { + cfg.mStateArchivalSettings.bucketListWindowSamplePeriod = 1; + }, + simulation); // After SCP is restored, Herder is tracking REQUIRE(getC()->getHerder().getState() == @@ -3758,7 +3663,6 @@ herderExternalizesValuesWithProtocol(uint32_t version) [&](SorobanNetworkConfig& cfg) { cfg.mLedgerMaxTransactionsSizeBytes = 1'000'000; cfg.mTxMaxSizeBytes = 500'000; - cfg.mStateArchivalSettings.bucketListWindowSamplePeriod = 1; }, simulation, /*applyUpgrade=*/false); diff --git a/src/ledger/LedgerManager.h b/src/ledger/LedgerManager.h index acf6e1ee62..e5d160a7d2 100644 --- a/src/ledger/LedgerManager.h +++ b/src/ledger/LedgerManager.h @@ -150,13 +150,10 @@ class LedgerManager virtual void startNewLedger() = 0; // loads the last ledger information from the database with the following - // parameters: + // parameter: // * restoreBucketlist indicates whether to restore the bucket list fully, // and restart merges - // * isLedgerStateReady indicates whether the ledger state is ready or is - // still being rebuilt (in which case we can't yet load ledger entries) - virtual void loadLastKnownLedger(bool restoreBucketlist, - bool isLedgerStateReady) = 0; + virtual void loadLastKnownLedger(bool restoreBucketlist) = 0; // Return true if core is currently rebuilding in-memory state via local // catchup diff --git a/src/ledger/LedgerManagerImpl.cpp b/src/ledger/LedgerManagerImpl.cpp index 6fa5019a0b..c5c9ac3cbb 100644 --- a/src/ledger/LedgerManagerImpl.cpp +++ b/src/ledger/LedgerManagerImpl.cpp @@ -281,8 +281,7 @@ setLedgerTxnHeader(LedgerHeader const& lh, Application& app) } void -LedgerManagerImpl::loadLastKnownLedger(bool restoreBucketlist, - bool isLedgerStateReady) +LedgerManagerImpl::loadLastKnownLedger(bool restoreBucketlist) { ZoneScoped; @@ -348,35 +347,41 @@ LedgerManagerImpl::loadLastKnownLedger(bool restoreBucketlist, releaseAssert(latestLedgerHeader.has_value()); - HistoryArchiveState has = getLastClosedLedgerHAS(); - auto missing = mApp.getBucketManager().checkForMissingBucketsFiles(has); - auto pubmissing = - mApp.getHistoryManager().getMissingBucketsReferencedByPublishQueue(); - missing.insert(missing.end(), pubmissing.begin(), pubmissing.end()); - if (!missing.empty()) + // Step 3. Restore BucketList if we're doing a full core startup + // (startServices=true), OR when using BucketListDB + if (restoreBucketlist || mApp.getConfig().isUsingBucketListDB()) { - CLOG_ERROR(Ledger, "{} buckets are missing from bucket directory '{}'", - missing.size(), mApp.getBucketManager().getBucketDir()); - throw std::runtime_error("Bucket directory is corrupt"); - } - - if (mApp.getConfig().MODE_ENABLES_BUCKETLIST) - { - // Only restart merges in full startup mode. Many modes in core - // (standalone offline commands, in-memory setup) do not need to - // spin up expensive merge processes. - auto assumeStateWork = - mApp.getWorkScheduler().executeWork( - has, latestLedgerHeader->ledgerVersion, restoreBucketlist); - if (assumeStateWork->getState() == BasicWork::State::WORK_SUCCESS) + HistoryArchiveState has = getLastClosedLedgerHAS(); + auto missing = mApp.getBucketManager().checkForMissingBucketsFiles(has); + auto pubmissing = mApp.getHistoryManager() + .getMissingBucketsReferencedByPublishQueue(); + missing.insert(missing.end(), pubmissing.begin(), pubmissing.end()); + if (!missing.empty()) { - CLOG_INFO(Ledger, "Assumed bucket-state for LCL: {}", - ledgerAbbrev(*latestLedgerHeader)); + CLOG_ERROR(Ledger, + "{} buckets are missing from bucket directory '{}'", + missing.size(), mApp.getBucketManager().getBucketDir()); + throw std::runtime_error("Bucket directory is corrupt"); } - else + + if (mApp.getConfig().MODE_ENABLES_BUCKETLIST) { - // Work should only fail during graceful shutdown - releaseAssertOrThrow(mApp.isStopping()); + // Only restart merges in full startup mode. Many modes in core + // (standalone offline commands, in-memory setup) do not need to + // spin up expensive merge processes. + auto assumeStateWork = + mApp.getWorkScheduler().executeWork( + has, latestLedgerHeader->ledgerVersion, restoreBucketlist); + if (assumeStateWork->getState() == BasicWork::State::WORK_SUCCESS) + { + CLOG_INFO(Ledger, "Assumed bucket-state for LCL: {}", + ledgerAbbrev(*latestLedgerHeader)); + } + else + { + // Work should only fail during graceful shutdown + releaseAssertOrThrow(mApp.isStopping()); + } } } @@ -391,23 +396,10 @@ LedgerManagerImpl::loadLastKnownLedger(bool restoreBucketlist, if (protocolVersionStartsFrom(latestLedgerHeader->ledgerVersion, SOROBAN_PROTOCOL_VERSION)) { - if (isLedgerStateReady) - { - // Step 5. If ledger state is ready and core is in v20, load network - // configs right away - LedgerTxn ltx(mApp.getLedgerTxnRoot()); - updateNetworkConfig(ltx); - } - else - { - // In some modes, e.g. in-memory, core's state is rebuilt - // asynchronously via catchup. In this case, we're not able to load - // the network config at this time, and instead must let catchup do - // it when ready. - CLOG_INFO(Ledger, - "Ledger state is being rebuilt, network config will " - "be loaded once the rebuild is done"); - } + // Step 5. If ledger state is ready and core is in v20, load network + // configs right away + LedgerTxn ltx(mApp.getLedgerTxnRoot()); + updateNetworkConfig(ltx); } } diff --git a/src/ledger/LedgerManagerImpl.h b/src/ledger/LedgerManagerImpl.h index 61caaf5490..f44dbeb3e8 100644 --- a/src/ledger/LedgerManagerImpl.h +++ b/src/ledger/LedgerManagerImpl.h @@ -173,8 +173,7 @@ class LedgerManagerImpl : public LedgerManager void startNewLedger(LedgerHeader const& genesisLedger); void startNewLedger() override; - void loadLastKnownLedger(bool restoreBucketlist, - bool isLedgerStateReady) override; + void loadLastKnownLedger(bool restoreBucketlist) override; virtual bool rebuildingInMemoryState() override; virtual void setupInMemoryStateRebuild() override; diff --git a/src/ledger/InMemoryLedgerTxn.cpp b/src/ledger/test/InMemoryLedgerTxn.cpp similarity index 88% rename from src/ledger/InMemoryLedgerTxn.cpp rename to src/ledger/test/InMemoryLedgerTxn.cpp index 4c7d47ae83..93a18733d7 100644 --- a/src/ledger/InMemoryLedgerTxn.cpp +++ b/src/ledger/test/InMemoryLedgerTxn.cpp @@ -2,11 +2,9 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 -#include "ledger/InMemoryLedgerTxn.h" +#include "ledger/test/InMemoryLedgerTxn.h" #include "ledger/LedgerTxn.h" -#include "ledger/LedgerTxnImpl.h" #include "transactions/TransactionUtils.h" -#include "util/GlobalChecks.h" namespace stellar { @@ -73,7 +71,7 @@ InMemoryLedgerTxn::FilteredEntryIteratorImpl::clone() const InMemoryLedgerTxn::InMemoryLedgerTxn(InMemoryLedgerTxnRoot& parent, Database& db, - AbstractLedgerTxnParent* realRoot) + AbstractLedgerTxnParent& realRoot) : LedgerTxn(parent), mDb(db), mRealRootForOffers(realRoot) { } @@ -144,13 +142,12 @@ InMemoryLedgerTxn::updateLedgerKeyMap(EntryIterator iter) // In addition to maintaining in-memory map, commit offers to "real" ltx // root to test SQL backed offers - if (mRealRootForOffers && - genKey.type() == InternalLedgerEntryType::LEDGER_ENTRY) + if (genKey.type() == InternalLedgerEntryType::LEDGER_ENTRY) { auto const& ledgerKey = genKey.ledgerKey(); if (ledgerKey.type() == OFFER) { - LedgerTxn ltx(*mRealRootForOffers); + LedgerTxn ltx(mRealRootForOffers); if (!iter.entryExists()) { ltx.erase(ledgerKey); @@ -365,79 +362,61 @@ InMemoryLedgerTxn::getPoolShareTrustLinesByAccountAndAsset( void InMemoryLedgerTxn::dropOffers(bool rebuild) { - if (mRealRootForOffers) - { - mRealRootForOffers->dropOffers(rebuild); - } - else - { - LedgerTxn::dropOffers(rebuild); - } + mRealRootForOffers.dropOffers(rebuild); } uint64_t InMemoryLedgerTxn::countObjects(LedgerEntryType let) const { - if (mRealRootForOffers) - { - return mRealRootForOffers->countObjects(let); - } - - return 0; + return mRealRootForOffers.countObjects(let); } uint64_t InMemoryLedgerTxn::countObjects(LedgerEntryType let, LedgerRange const& ledgers) const { - if (mRealRootForOffers) - { - return mRealRootForOffers->countObjects(let, ledgers); - } - - return 0; + return mRealRootForOffers.countObjects(let, ledgers); } void InMemoryLedgerTxn::deleteObjectsModifiedOnOrAfterLedger(uint32_t ledger) const { - if (mRealRootForOffers) - { - mRealRootForOffers->deleteObjectsModifiedOnOrAfterLedger(ledger); - } + mRealRootForOffers.deleteObjectsModifiedOnOrAfterLedger(ledger); } UnorderedMap InMemoryLedgerTxn::getAllOffers() { - if (mRealRootForOffers) - { - return mRealRootForOffers->getAllOffers(); - } - - return LedgerTxn::getAllOffers(); + return mRealRootForOffers.getAllOffers(); } std::shared_ptr InMemoryLedgerTxn::getBestOffer(Asset const& buying, Asset const& selling) { - if (mRealRootForOffers) - { - return mRealRootForOffers->getBestOffer(buying, selling); - } - - return LedgerTxn::getBestOffer(buying, selling); + return mRealRootForOffers.getBestOffer(buying, selling); } std::shared_ptr InMemoryLedgerTxn::getBestOffer(Asset const& buying, Asset const& selling, OfferDescriptor const& worseThan) { - if (mRealRootForOffers) - { - return mRealRootForOffers->getBestOffer(buying, selling, worseThan); - } + return mRealRootForOffers.getBestOffer(buying, selling, worseThan); +} - return LedgerTxn::getBestOffer(buying, selling, worseThan); +#ifdef BEST_OFFER_DEBUGGING +bool +InMemoryLedgerTxn::bestOfferDebuggingEnabled() const +{ + return mRealRootForOffers.bestOfferDebuggingEnabled(); +} + +std::shared_ptr +InMemoryLedgerTxn::getBestOfferSlow(Asset const& buying, Asset const& selling, + OfferDescriptor const* worseThan, + std::unordered_set& exclude) +{ + return mRealRootForOffers.getBestOfferSlow(buying, selling, worseThan, + exclude); } +#endif } diff --git a/src/ledger/InMemoryLedgerTxn.h b/src/ledger/test/InMemoryLedgerTxn.h similarity index 69% rename from src/ledger/InMemoryLedgerTxn.h rename to src/ledger/test/InMemoryLedgerTxn.h index 38917186cb..f7c754284f 100644 --- a/src/ledger/InMemoryLedgerTxn.h +++ b/src/ledger/test/InMemoryLedgerTxn.h @@ -5,14 +5,22 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "database/Database.h" -#include "ledger/InMemoryLedgerTxnRoot.h" #include "ledger/LedgerTxn.h" +#include "ledger/test/InMemoryLedgerTxnRoot.h" // This is a (very small) extension of LedgerTxn to help implement in-memory -// mode. In-memory mode only holds the _ledger_ contents in memory; it still has -// a "small" SQL database storing some additional tables, and we still want to -// have transactional atomicity on those tables in regions of code we have a -// LedgerTxn open. So that's the _purpose_. +// mode. Originally this want intended for production use, but is now deprecated +// and only used for a few tests. +// +// In-memory mode holds the _ledger_ contents in memory, allowing tests to +// directly change ledger state without actually committing a ledger. These +// direct changes are incompatible with BucketListDB, as the data structure is +// baked into consensus and arbitrary changes without closing ledgers makes the +// state machine _very_ unhappy. While we're slowly transitioning to tests that +// don't directly commit changes and bypass ledger close, we still have a number +// of older tests that have this assumption baked in. While it would be nice to +// deprecate this mode entirely, it's a significant undertaking: +// https://github.com/stellar/stellar-core/issues/4570. // // On to messy implementation details: in-memory mode is implemented by // replacing the normal LedgerTxnRoot with a stub class InMemoryLedgerTxnRoot @@ -32,9 +40,16 @@ // has no soci::transaction | has soci::transaction // // -// In other words, in-memory mode _moves_ the soci::transaction from the root +// In other words, in-memory mode _copies_ the soci::transaction from the root // to its first (never-closing) child, and commits to the DB when children // of that first never-closing child commit to it. +// +// Additionally, InMemoryLedgerTxn (not InMemoryLedgerTxnRoot) maintains a +// reference to the "real" LedgerTxnRoot that has an soci::transaction. Any +// offer related queries and writes are ignored by InMemoryLedgerTxn and passed +// through to this real, SQL backed root in order to test offer SQL queries. +// Unlike all other ledger entry types, offers are stored in SQL, which has no +// problem with arbitrary writes (unlike the BucketList). namespace stellar { @@ -46,9 +61,10 @@ class InMemoryLedgerTxn : public LedgerTxn // For some tests, we need to bypass ledger close and commit directly to the // in-memory ltx. However, we still want to test SQL backed offers. The - // "never" committing root sets this flag to true such that offer-related - // calls get based to the real SQL backed root - AbstractLedgerTxnParent* const mRealRootForOffers; + // "never" committing in-memory root maintains a reference to the real, SQL + // backed LedgerTxnRoot. All offer related queries and writes are forwarded + // to the real root in order to test offer SQL queries. + AbstractLedgerTxnParent& mRealRootForOffers; UnorderedMap> mOffersAndPoolShareTrustlineKeys; @@ -82,7 +98,7 @@ class InMemoryLedgerTxn : public LedgerTxn public: InMemoryLedgerTxn(InMemoryLedgerTxnRoot& parent, Database& db, - AbstractLedgerTxnParent* realRoot = nullptr); + AbstractLedgerTxnParent& realRoot); virtual ~InMemoryLedgerTxn(); void addChild(AbstractLedgerTxn& child, TransactionMode mode) override; @@ -124,6 +140,15 @@ class InMemoryLedgerTxn : public LedgerTxn LedgerRange const& ledgers) const override; void deleteObjectsModifiedOnOrAfterLedger(uint32_t ledger) const override; + +#ifdef BEST_OFFER_DEBUGGING + virtual bool bestOfferDebuggingEnabled() const override; + + virtual std::shared_ptr + getBestOfferSlow(Asset const& buying, Asset const& selling, + OfferDescriptor const* worseThan, + std::unordered_set& exclude) override; +#endif }; } diff --git a/src/ledger/InMemoryLedgerTxnRoot.cpp b/src/ledger/test/InMemoryLedgerTxnRoot.cpp similarity index 98% rename from src/ledger/InMemoryLedgerTxnRoot.cpp rename to src/ledger/test/InMemoryLedgerTxnRoot.cpp index 386ceb2e93..7da4f37e1c 100644 --- a/src/ledger/InMemoryLedgerTxnRoot.cpp +++ b/src/ledger/test/InMemoryLedgerTxnRoot.cpp @@ -2,7 +2,7 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 -#include "ledger/InMemoryLedgerTxnRoot.h" +#include "ledger/test/InMemoryLedgerTxnRoot.h" #include "ledger/LedgerRange.h" #include "ledger/LedgerTxn.h" #include "util/XDROperators.h" diff --git a/src/ledger/InMemoryLedgerTxnRoot.h b/src/ledger/test/InMemoryLedgerTxnRoot.h similarity index 100% rename from src/ledger/InMemoryLedgerTxnRoot.h rename to src/ledger/test/InMemoryLedgerTxnRoot.h diff --git a/src/main/ApplicationImpl.cpp b/src/main/ApplicationImpl.cpp index d5f8f7208b..5be20c7342 100644 --- a/src/main/ApplicationImpl.cpp +++ b/src/main/ApplicationImpl.cpp @@ -32,8 +32,6 @@ #include "invariant/LedgerEntryIsValid.h" #include "invariant/LiabilitiesMatchOffers.h" #include "invariant/SponsorshipCountIsValid.h" -#include "ledger/InMemoryLedgerTxn.h" -#include "ledger/InMemoryLedgerTxnRoot.h" #include "ledger/LedgerHeaderUtils.h" #include "ledger/LedgerManager.h" #include "ledger/LedgerTxn.h" @@ -62,6 +60,8 @@ #include "work/WorkScheduler.h" #ifdef BUILD_TESTS +#include "ledger/test/InMemoryLedgerTxn.h" +#include "ledger/test/InMemoryLedgerTxnRoot.h" #include "simulation/LoadGenerator.h" #endif @@ -400,7 +400,7 @@ ApplicationImpl::resetLedgerState() #endif ); mNeverCommittingLedgerTxn = std::make_unique( - *mInMemoryLedgerTxnRoot, getDatabase(), mLedgerTxnRoot.get()); + *mInMemoryLedgerTxnRoot, getDatabase(), *mLedgerTxnRoot); } else #endif @@ -598,8 +598,7 @@ ApplicationImpl::getJsonInfo(bool verbose) void ApplicationImpl::reportInfo(bool verbose) { - mLedgerManager->loadLastKnownLedger(/* restoreBucketlist */ false, - /* isLedgerStateReady */ true); + mLedgerManager->loadLastKnownLedger(/* restoreBucketlist */ false); LOG_INFO(DEFAULT_LOG, "Reporting application info"); std::cout << getJsonInfo(verbose).toStyledString() << std::endl; } @@ -918,8 +917,7 @@ ApplicationImpl::start() CLOG_INFO(Ledger, "Starting up application"); mStarted = true; - mLedgerManager->loadLastKnownLedger(/* restoreBucketlist */ true, - /* isLedgerStateReady */ true); + mLedgerManager->loadLastKnownLedger(/* restoreBucketlist */ true); startServices(); } diff --git a/src/main/ApplicationImpl.h b/src/main/ApplicationImpl.h index e1c0afd73d..1fc7ea989c 100644 --- a/src/main/ApplicationImpl.h +++ b/src/main/ApplicationImpl.h @@ -188,10 +188,11 @@ class ApplicationImpl : public Application // the "effective" in-memory root transaction, is returned when a client // requests the root. // - // Note that using this only works when the ledger can fit in RAM -- as it - // is held in the never-committing LedgerTxn in its entirety -- so if it - // ever grows beyond RAM-size you need to use a mode with some sort of - // database on secondary storage. + // This is only used in testing scenarios where we need to commit directly + // to the LedgerTxn root and bypass the normal ledger close process (since + // BucketListDB requires a full ledger close to update DB state). In the + // future, this should be removed in favor of tests that are all compatible + // with BucketListDB: https://github.com/stellar/stellar-core/issues/4570. #ifdef BUILD_TESTS std::unique_ptr mInMemoryLedgerTxnRoot; std::unique_ptr mNeverCommittingLedgerTxn; diff --git a/src/main/ApplicationUtils.cpp b/src/main/ApplicationUtils.cpp index bbc14086a6..2d9d811b53 100644 --- a/src/main/ApplicationUtils.cpp +++ b/src/main/ApplicationUtils.cpp @@ -125,23 +125,6 @@ setupApp(Config& cfg, VirtualClock& clock) return nullptr; } - // With in-memory testing mode, ledger state is not yet ready during this - // setup step - app->getLedgerManager().loadLastKnownLedger( - /* restoreBucketlist */ false, - /* isLedgerStateReady */ !cfg.MODE_USES_IN_MEMORY_LEDGER); - auto lcl = app->getLedgerManager().getLastClosedLedgerHeader(); - - if (cfg.MODE_USES_IN_MEMORY_LEDGER && - lcl.header.ledgerSeq == LedgerManager::GENESIS_LEDGER_SEQ) - { - // If ledger is genesis, rebuild genesis state from buckets - if (!applyBucketsForLCL(*app)) - { - return nullptr; - } - } - return app; } @@ -320,8 +303,7 @@ selfCheck(Config cfg) // We run self-checks from a "loaded but dormant" state where the // application is not started, but the LM has loaded the LCL. - app->getLedgerManager().loadLastKnownLedger(/* restoreBucketlist */ false, - /* isLedgerStateReady */ true); + app->getLedgerManager().loadLastKnownLedger(/* restoreBucketlist */ false); // First we schedule the cheap, asynchronous "online" checks that get run by // the HTTP "self-check" endpoint, and crank until they're done. @@ -402,8 +384,7 @@ mergeBucketList(Config cfg, std::string const& outputDir) auto& lm = app->getLedgerManager(); auto& bm = app->getBucketManager(); - lm.loadLastKnownLedger(/* restoreBucketlist */ false, - /* isLedgerStateReady */ true); + lm.loadLastKnownLedger(/* restoreBucketlist */ false); HistoryArchiveState has = lm.getLastClosedLedgerHAS(); auto bucket = bm.mergeBuckets(has); @@ -506,8 +487,7 @@ dumpStateArchivalStatistics(Config cfg) VirtualClock clock; cfg.setNoListen(); Application::pointer app = Application::create(clock, cfg, false); - app->getLedgerManager().loadLastKnownLedger(/* restoreBucketlist */ false, - /* isLedgerStateReady */ true); + app->getLedgerManager().loadLastKnownLedger(/* restoreBucketlist */ false); auto& lm = app->getLedgerManager(); auto& bm = app->getBucketManager(); HistoryArchiveState has = lm.getLastClosedLedgerHAS(); @@ -620,8 +600,7 @@ dumpLedger(Config cfg, std::string const& outputFile, Application::pointer app = Application::create(clock, cfg, false); auto& lm = app->getLedgerManager(); - lm.loadLastKnownLedger(/* restoreBucketlist */ false, - /* isLedgerStateReady */ true); + lm.loadLastKnownLedger(/* restoreBucketlist */ false); HistoryArchiveState has = lm.getLastClosedLedgerHAS(); std::optional minLedger; if (lastModifiedLedgerCount) diff --git a/src/main/Config.cpp b/src/main/Config.cpp index c7e50a13a8..25ba211fb1 100644 --- a/src/main/Config.cpp +++ b/src/main/Config.cpp @@ -2275,8 +2275,11 @@ Config::modeDoesCatchupWithBucketList() const bool Config::isUsingBucketListDB() const { - return !DEPRECATED_SQL_LEDGER_STATE && !MODE_USES_IN_MEMORY_LEDGER && - MODE_ENABLES_BUCKETLIST; + return !DEPRECATED_SQL_LEDGER_STATE +#ifdef BUILD_TESTS + && !MODE_USES_IN_MEMORY_LEDGER +#endif + && MODE_ENABLES_BUCKETLIST; } bool diff --git a/src/main/Config.h b/src/main/Config.h index e8ab848765..3b79fdeccc 100644 --- a/src/main/Config.h +++ b/src/main/Config.h @@ -693,7 +693,9 @@ class Config : public std::enable_shared_from_this bool TEST_CASES_ENABLED; // A config parameter that uses a never-committing ledger. This means that - // all ledger entries will be kept in memory, and not persisted to DB. + // all ledger entries, except for offers, will be kept in memory, and not + // persisted to DB. Since offers are backed by SQL and not BucketListDB, + // offers are still commited to the SQL DB even when this mode is enabled. // Should only be used for testing. bool MODE_USES_IN_MEMORY_LEDGER; diff --git a/src/main/test/ApplicationUtilsTests.cpp b/src/main/test/ApplicationUtilsTests.cpp index a7128d4d87..3e56609e37 100644 --- a/src/main/test/ApplicationUtilsTests.cpp +++ b/src/main/test/ApplicationUtilsTests.cpp @@ -2,26 +2,20 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 -#include "bucket/test/BucketTestUtils.h" -#include "crypto/Random.h" #include "history/HistoryArchiveManager.h" #include "history/HistoryManagerImpl.h" #include "history/test/HistoryTestsUtils.h" #include "invariant/BucketListIsConsistentWithDatabase.h" #include "ledger/LedgerTxn.h" -#include "ledger/test/LedgerTestUtils.h" #include "lib/catch.hpp" #include "main/Application.h" #include "main/ApplicationUtils.h" #include "main/CommandHandler.h" #include "main/Config.h" -#include "overlay/OverlayManager.h" #include "simulation/Simulation.h" #include "test/TestUtils.h" -#include "test/TxTests.h" #include "test/test.h" #include "transactions/TransactionUtils.h" -#include "util/Logging.h" #include #include @@ -54,54 +48,6 @@ class TemporaryFileDamager } }; -// Logic to check the state of the bucket list with the state of the DB -static bool -checkState(Application& app) -{ - BucketListIsConsistentWithDatabase blc(app); - bool blcOk = true; - try - { - blc.checkEntireBucketlist(); - } - catch (std::runtime_error& e) - { - LOG_ERROR(DEFAULT_LOG, "Error during bucket-list consistency check: {}", - e.what()); - blcOk = false; - } - - if (!app.getConfig().MODE_USES_IN_MEMORY_LEDGER) - { - auto checkBucket = [&blcOk](auto b) { - if (!b->isEmpty() && !b->isIndexed()) - { - LOG_ERROR(DEFAULT_LOG, - "Error during bucket-list consistency check: " - "unindexed bucket in BucketList"); - blcOk = false; - } - }; - - auto& bm = app.getBucketManager(); - for (uint32_t i = 0; i < bm.getLiveBucketList().kNumLevels && blcOk; - ++i) - { - auto& level = bm.getLiveBucketList().getLevel(i); - checkBucket(level.getCurr()); - checkBucket(level.getSnap()); - auto& nextFuture = level.getNext(); - if (nextFuture.hasOutputHash()) - { - auto hash = hexToBin256(nextFuture.getOutputHash()); - checkBucket(bm.getBucketByHash(hash)); - } - } - } - - return blcOk; -} - // Sets up a network with a main validator node that publishes checkpoints to // a test node. Tests startup behavior of the test node when up to date with // validator and out of sync. @@ -377,14 +323,6 @@ TEST_CASE("offline self-check works", "[applicationutils][selfcheck]") } } -TEST_CASE("application setup", "[applicationutils]") -{ - VirtualClock clock; - auto cfg = getTestConfig(); - auto app = setupApp(cfg, clock); - REQUIRE(checkState(*app)); -} - TEST_CASE("application major version numbers", "[applicationutils]") { CHECK(getStellarCoreMajorReleaseVersion("v19.0.0") == diff --git a/src/main/test/ExternalQueueTests.cpp b/src/main/test/ExternalQueueTests.cpp new file mode 100644 index 0000000000..e5af50427f --- /dev/null +++ b/src/main/test/ExternalQueueTests.cpp @@ -0,0 +1,46 @@ +#ifdef USE_POSTGRES +// Copyright 2014 Stellar Development Foundation and contributors. Licensed +// under the Apache License, Version 2.0. See the COPYING file at the root +// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 + +#include "lib/catch.hpp" +#include "main/Application.h" +#include "main/CommandHandler.h" +#include "main/Config.h" +#include "main/ExternalQueue.h" +#include "simulation/Simulation.h" +#include "test/TestUtils.h" +#include "test/test.h" + +using namespace stellar; + +TEST_CASE("cursors", "[externalqueue]") +{ + VirtualClock clock; + Config const& cfg = getTestConfig(0, Config::TESTDB_POSTGRESQL); + Application::pointer app = createTestApplication(clock, cfg); + + ExternalQueue ps(*app); + std::map curMap; + app->getCommandHandler().manualCmd("setcursor?id=FOO&cursor=123"); + app->getCommandHandler().manualCmd("setcursor?id=BAR&cursor=456"); + + SECTION("get non-existent cursor") + { + ps.getCursorForResource("NONEXISTENT", curMap); + REQUIRE(curMap.size() == 0); + } + + SECTION("get single cursor") + { + ps.getCursorForResource("FOO", curMap); + REQUIRE(curMap.size() == 1); + } + + SECTION("get all cursors") + { + ps.getCursorForResource("", curMap); + REQUIRE(curMap.size() == 2); + } +} +#endif diff --git a/src/simulation/Simulation.cpp b/src/simulation/Simulation.cpp index 356e1dc4c1..e4a86a5308 100644 --- a/src/simulation/Simulation.cpp +++ b/src/simulation/Simulation.cpp @@ -124,23 +124,16 @@ Simulation::addNode(SecretKey nodeKey, SCPQuorumSet qSet, Config const* cfg2, } Application::pointer app; - if (newDB) + if (mMode == OVER_LOOPBACK) { - if (mMode == OVER_LOOPBACK) - { - app = - createTestApplication( - *clock, *cfg, *this, newDB, false); - } - else - { - app = createTestApplication(*clock, *cfg, newDB, false); - } + app = createTestApplication( + *clock, *cfg, *this, newDB, false); } else { - app = setupApp(*cfg, *clock); + app = createTestApplication(*clock, *cfg, newDB, false); } + mNodes.emplace(nodeKey.getPublicKey(), Node{clock, app}); mPeerMap.emplace(app->getConfig().PEER_PORT, diff --git a/src/simulation/Simulation.h b/src/simulation/Simulation.h index e1385f374d..139c67449a 100644 --- a/src/simulation/Simulation.h +++ b/src/simulation/Simulation.h @@ -92,6 +92,18 @@ class Simulation // prevent overlay from automatically re-connecting to peers void stopOverlayTick(); + bool + isSetUpForSorobanUpgrade() const + { + return mSetupForSorobanUpgrade; + } + + void + markReadyForSorobanUpgrade() + { + mSetupForSorobanUpgrade = true; + } + private: void addLoopbackConnection(NodeID initiator, NodeID acceptor); void dropLoopbackConnection(NodeID initiator, NodeID acceptor); @@ -127,6 +139,8 @@ class Simulation // Map PEER_PORT to Application std::unordered_map> mPeerMap; + + bool mSetupForSorobanUpgrade{false}; }; class LoopbackOverlayManager : public OverlayManagerImpl diff --git a/src/test/TestUtils.cpp b/src/test/TestUtils.cpp index 672c8acf22..e752ad6a5b 100644 --- a/src/test/TestUtils.cpp +++ b/src/test/TestUtils.cpp @@ -9,6 +9,7 @@ #include "test/TxTests.h" #include "test/test.h" #include "work/WorkScheduler.h" +#include namespace stellar { @@ -195,31 +196,34 @@ upgradeSorobanNetworkConfig(std::function modifyFn, auto& complete = app.getMetrics().NewMeter({"loadgen", "run", "complete"}, "run"); auto completeCount = complete.count(); - // Only create an account if there are none aleady created. - uint32_t offset = 0; - if (app.getMetrics() - .NewMeter({"loadgen", "account", "created"}, "account") - .count() == 0) + + // Use large offset to avoid conflicts with tests using loadgen. + auto const offset = std::numeric_limits::max() - 1; + + // Only create an account if upgrade has not ran before. + if (!simulation->isSetUpForSorobanUpgrade()) { auto createAccountsLoadConfig = GeneratedLoadConfig::createAccountsLoad(1, 1); - offset = std::numeric_limits::max() - 1; createAccountsLoadConfig.offset = offset; lg.generateLoad(createAccountsLoadConfig); simulation->crankUntil( [&]() { return complete.count() == completeCount + 1; }, 300 * Herder::EXP_LEDGER_TIMESPAN_SECONDS, false); - } - // Create upload wasm transaction. - auto createUploadCfg = GeneratedLoadConfig::createSorobanUpgradeSetupLoad(); - createUploadCfg.offset = offset; - lg.generateLoad(createUploadCfg); - completeCount = complete.count(); - simulation->crankUntil( - [&]() { return complete.count() == completeCount + 1; }, - 300 * Herder::EXP_LEDGER_TIMESPAN_SECONDS, false); + // Create upload wasm transaction. + auto createUploadCfg = + GeneratedLoadConfig::createSorobanUpgradeSetupLoad(); + createUploadCfg.offset = offset; + lg.generateLoad(createUploadCfg); + completeCount = complete.count(); + simulation->crankUntil( + [&]() { return complete.count() == completeCount + 1; }, + 300 * Herder::EXP_LEDGER_TIMESPAN_SECONDS, false); + + simulation->markReadyForSorobanUpgrade(); + } // Create upgrade transaction. auto createUpgradeLoadGenConfig = GeneratedLoadConfig::txLoad( diff --git a/src/test/TestUtils.h b/src/test/TestUtils.h index 96a7703604..d0175a4e0e 100644 --- a/src/test/TestUtils.h +++ b/src/test/TestUtils.h @@ -111,6 +111,11 @@ void setSorobanNetworkConfigForTest(SorobanNetworkConfig& cfg); // for most of the unit tests (unless the test is meant to exercise the // configuration limits). void overrideSorobanNetworkConfigForTest(Application& app); + +// Runs loadgen to arm all nodes in simulation for the given upgrade. If +// applyUpgrade == true, close ledgers until the upgrade has been applied. +// Otherwise just arm the nodes without closing the ledger containing the +// upgrade. void upgradeSorobanNetworkConfig(std::function modifyFn, std::shared_ptr simulation, From 240e743288bd2062f64db402b732a02736c8cd61 Mon Sep 17 00:00:00 2001 From: Brett Boston Date: Tue, 26 Nov 2024 16:03:55 -0800 Subject: [PATCH 05/17] Extend nomination monte carlo simulations to analyze timeouts Closes #4561 This change adds a couple more tests to the nomination tests for the following scenarios: 1. Asymmetric quorums 2. Unresponsive validators It also removes some protocol gating in the test suite that's no longer necessary now that P22 is the current protocol in the build. --- src/herder/test/HerderTests.cpp | 688 ++++++++++++++++++++++++++------ 1 file changed, 558 insertions(+), 130 deletions(-) diff --git a/src/herder/test/HerderTests.cpp b/src/herder/test/HerderTests.cpp index fcc07519f4..27247b4e6c 100644 --- a/src/herder/test/HerderTests.cpp +++ b/src/herder/test/HerderTests.cpp @@ -5806,53 +5806,48 @@ testWeights(std::vector const& validators) VirtualClock clock; Application::pointer app = createTestApplication(clock, cfg); - for_versions_from( - static_cast( - APPLICATION_SPECIFIC_NOMINATION_LEADER_ELECTION_PROTOCOL_VERSION), - *app, [&]() { - // Collect info about orgs - ValidatorQuality maxQuality; - std::unordered_map orgQualities; - std::unordered_map orgSizes; - std::unordered_map orgQualityCounts; - collectOrgInfo(maxQuality, orgQualities, orgSizes, orgQualityCounts, - validators); - - // Check per-validator weights - HerderImpl& herder = dynamic_cast(app->getHerder()); - std::unordered_map normalizedOrgWeights; - for (ValidatorEntry const& validator : validators) - { - uint64_t weight = herder.getHerderSCPDriver().getNodeWeight( - validator.mKey, cfg.QUORUM_SET, false); - double normalizedWeight = - static_cast(weight) / UINT64_MAX; - normalizedOrgWeights[validator.mHomeDomain] += normalizedWeight; - - std::string const& org = validator.mHomeDomain; - REQUIRE_THAT(normalizedWeight, - Catch::Matchers::WithinAbs( - expectedNormalizedWeight( - orgQualityCounts, maxQuality, - orgQualities.at(org), orgSizes.at(org)), - 0.0001)); - } - // Check per-org weights - for (auto const& [org, weight] : normalizedOrgWeights) - { - REQUIRE_THAT(weight, Catch::Matchers::WithinAbs( - expectedOrgNormalizedWeight( - orgQualityCounts, maxQuality, - orgQualities.at(org)), - 0.0001)); - } - }); + // Collect info about orgs + ValidatorQuality maxQuality; + std::unordered_map orgQualities; + std::unordered_map orgSizes; + std::unordered_map orgQualityCounts; + collectOrgInfo(maxQuality, orgQualities, orgSizes, orgQualityCounts, + validators); + + // Check per-validator weights + HerderImpl& herder = dynamic_cast(app->getHerder()); + std::unordered_map normalizedOrgWeights; + for (ValidatorEntry const& validator : validators) + { + uint64_t weight = herder.getHerderSCPDriver().getNodeWeight( + validator.mKey, cfg.QUORUM_SET, false); + double normalizedWeight = static_cast(weight) / UINT64_MAX; + normalizedOrgWeights[validator.mHomeDomain] += normalizedWeight; + + std::string const& org = validator.mHomeDomain; + REQUIRE_THAT(normalizedWeight, + Catch::Matchers::WithinAbs( + expectedNormalizedWeight(orgQualityCounts, maxQuality, + orgQualities.at(org), + orgSizes.at(org)), + 0.0001)); + } + + // Check per-org weights + for (auto const& [org, weight] : normalizedOrgWeights) + { + REQUIRE_THAT( + weight, Catch::Matchers::WithinAbs( + expectedOrgNormalizedWeight( + orgQualityCounts, maxQuality, orgQualities.at(org)), + 0.0001)); + } } // Test that HerderSCPDriver::getNodeWeight produces weights that result in a // fair distribution of nomination wins. -TEST_CASE_VERSIONS("getNodeWeight", "[herder]") +TEST_CASE("getNodeWeight", "[herder]") { SECTION("3 tier 1 validators, 1 org") { @@ -5895,9 +5890,10 @@ class TestNominationProtocol : public NominationProtocol } std::set const& - updateRoundLeadersForTesting() + updateRoundLeadersForTesting( + std::optional const& previousValue = std::nullopt) { - mPreviousValue = getRandomValue(); + mPreviousValue = previousValue.value_or(getRandomValue()); updateRoundLeaders(); return getLeaders(); } @@ -5938,102 +5934,95 @@ testWinProbabilities(std::vector const& sks, VirtualClock clock; Application::pointer app = createTestApplication(clock, cfg); - for_versions_from( - static_cast( - APPLICATION_SPECIFIC_NOMINATION_LEADER_ELECTION_PROTOCOL_VERSION), - *app, [&]() { - // Run for `numLedgers` slots, recording the number of times each - // node wins nomination - UnorderedMap publishCounts; - HerderImpl& herder = dynamic_cast(app->getHerder()); - SCP& scp = herder.getSCP(); - int fastTimeouts = 0; - for (int i = 0; i < numLedgers; ++i) - { - auto s = std::make_shared(i, scp); - TestNominationProtocol np(*s); + // Run for `numLedgers` slots, recording the number of times each + // node wins nomination + UnorderedMap publishCounts; + HerderImpl& herder = dynamic_cast(app->getHerder()); + SCP& scp = herder.getSCP(); + int fastTimeouts = 0; + for (int i = 0; i < numLedgers; ++i) + { + auto s = std::make_shared(i, scp); + TestNominationProtocol np(*s); - std::set const& leaders = - np.updateRoundLeadersForTesting(); - REQUIRE(leaders.size() == 1); - for (NodeID const& leader : leaders) - { - ++publishCounts[leader]; - } + std::set const& leaders = np.updateRoundLeadersForTesting(); + REQUIRE(leaders.size() == 1); + for (NodeID const& leader : leaders) + { + ++publishCounts[leader]; + } - if (np.fastTimedOut()) - { - ++fastTimeouts; - } - } + if (np.fastTimedOut()) + { + ++fastTimeouts; + } + } - CLOG_INFO(Herder, "Fast Timeouts: {} ({}%)", fastTimeouts, - fastTimeouts * 100.0 / numLedgers); + CLOG_INFO(Herder, "Fast Timeouts: {} ({}%)", fastTimeouts, + fastTimeouts * 100.0 / numLedgers); - // Compute total expected normalized weight across all nodes - double totalNormalizedWeight = 0.0; - for (ValidatorEntry const& validator : validators) - { - totalNormalizedWeight += expectedNormalizedWeight( - orgQualityCounts, maxQuality, - orgQualities.at(validator.mHomeDomain), - orgSizes.at(validator.mHomeDomain)); - } + // Compute total expected normalized weight across all nodes + double totalNormalizedWeight = 0.0; + for (ValidatorEntry const& validator : validators) + { + totalNormalizedWeight += + expectedNormalizedWeight(orgQualityCounts, maxQuality, + orgQualities.at(validator.mHomeDomain), + orgSizes.at(validator.mHomeDomain)); + } - // Check validator win rates - std::map orgPublishCounts; - for (ValidatorEntry const& validator : validators) - { - NodeID const& nodeID = validator.mKey; - int publishCount = publishCounts[nodeID]; - - // Compute and report node's win rate - double winRate = static_cast(publishCount) / numLedgers; - CLOG_INFO(Herder, "Node {} win rate: {} (published {} ledgers)", - cfg.toShortString(nodeID), winRate, publishCount); - - // Expected win rate is `weight / total weight` - double expectedWinRate = - expectedNormalizedWeight( - orgQualityCounts, maxQuality, - orgQualities.at(validator.mHomeDomain), - orgSizes.at(validator.mHomeDomain)) / - totalNormalizedWeight; - - // Check that actual win rate is within .05 of expected win - // rate. - REQUIRE_THAT(winRate, - Catch::Matchers::WithinAbs(expectedWinRate, 0.05)); - - // Record org publish counts for the next set of checks - orgPublishCounts[validator.mHomeDomain] += publishCount; - } + // Check validator win rates + std::map orgPublishCounts; + for (ValidatorEntry const& validator : validators) + { + NodeID const& nodeID = validator.mKey; + int publishCount = publishCounts[nodeID]; - // Check org win rates - for (auto const& [org, count] : orgPublishCounts) - { - // Compute and report org's win rate - double winRate = static_cast(count) / numLedgers; - CLOG_INFO(Herder, "Org {} win rate: {} (published {} ledgers)", - org, winRate, count); - - // Expected win rate is `weight / total weight` - double expectedWinRate = - expectedOrgNormalizedWeight(orgQualityCounts, maxQuality, - orgQualities.at(org)) / - totalNormalizedWeight; - - // Check that actual win rate is within .05 of expected win - // rate. - REQUIRE_THAT(winRate, - Catch::Matchers::WithinAbs(expectedWinRate, 0.05)); - } - }); + // Compute and report node's win rate + double winRate = static_cast(publishCount) / numLedgers; + CLOG_INFO(Herder, "Node {} win rate: {} (published {} ledgers)", + cfg.toShortString(nodeID), winRate, publishCount); + + // Expected win rate is `weight / total weight` + double expectedWinRate = + expectedNormalizedWeight(orgQualityCounts, maxQuality, + orgQualities.at(validator.mHomeDomain), + orgSizes.at(validator.mHomeDomain)) / + totalNormalizedWeight; + + // Check that actual win rate is within .05 of expected win + // rate. + REQUIRE_THAT(winRate, + Catch::Matchers::WithinAbs(expectedWinRate, 0.05)); + + // Record org publish counts for the next set of checks + orgPublishCounts[validator.mHomeDomain] += publishCount; + } + + // Check org win rates + for (auto const& [org, count] : orgPublishCounts) + { + // Compute and report org's win rate + double winRate = static_cast(count) / numLedgers; + CLOG_INFO(Herder, "Org {} win rate: {} (published {} ledgers)", org, + winRate, count); + + // Expected win rate is `weight / total weight` + double expectedWinRate = + expectedOrgNormalizedWeight(orgQualityCounts, maxQuality, + orgQualities.at(org)) / + totalNormalizedWeight; + + // Check that actual win rate is within .05 of expected win + // rate. + REQUIRE_THAT(winRate, + Catch::Matchers::WithinAbs(expectedWinRate, 0.05)); + } } // Test that the nomination algorithm produces a fair distribution of ledger // publishers. -TEST_CASE_VERSIONS("Fair nomination win rates", "[herder]") +TEST_CASE("Fair nomination win rates", "[herder]") { SECTION("3 tier 1 validators, 1 org") { @@ -6062,3 +6051,442 @@ TEST_CASE_VERSIONS("Fair nomination win rates", "[herder]") } } } + +// Returns a new `Topology` with the last org in `t` replaced with a new org +// with 3 validators. Requires that the last org in `t` have 3 validators and be +// contiguous at the back of the validators vecto. +static Topology +replaceOneOrg(Topology const& t) +{ + Topology t2(t); // Copy the topology + auto& [sks, validators] = t2; + REQUIRE(sks.size() == validators.size()); + + // Give the org a unique name + std::string const orgName = "org-replaced"; + + // Double check that the new org name is unique + for (ValidatorEntry const& v : validators) + { + REQUIRE(v.mHomeDomain != orgName); + } + + // Remove the last org + constexpr int validatorsPerOrg = 3; + sks.resize(sks.size() - validatorsPerOrg); + validators.resize(validators.size() - validatorsPerOrg); + + // Add new org with 3 validators + int constexpr numValidators = 3; + for (int j = 0; j < numValidators; ++j) + { + SecretKey const& key = sks.emplace_back(SecretKey::random()); + ValidatorEntry& entry = validators.emplace_back(); + entry.mName = fmt::format("validator-replaced-{}", j); + entry.mHomeDomain = orgName; + entry.mQuality = ValidatorQuality::VALIDATOR_HIGH_QUALITY; + entry.mKey = key.getPublicKey(); + entry.mHasHistory = false; + } + + return {sks, validators}; +} + +// Add `orgsToAdd` new orgs to the topology `t`. Each org will have 3 +// validators. +static Topology +addOrgs(int orgsToAdd, Topology const& t) +{ + Topology t2(t); // Copy the topology + auto& [sks, validators] = t2; + REQUIRE(sks.size() == validators.size()); + + // Generate new orgs + for (int i = 0; i < orgsToAdd; ++i) + { + std::string const org = fmt::format("new-org-{}", i); + int constexpr numValidators = 3; + for (int j = 0; j < numValidators; ++j) + { + SecretKey const& key = sks.emplace_back(SecretKey::random()); + ValidatorEntry& entry = validators.emplace_back(); + entry.mName = fmt::format("new-validator-{}-{}", i, j); + entry.mHomeDomain = org; + entry.mQuality = ValidatorQuality::VALIDATOR_HIGH_QUALITY; + entry.mKey = key.getPublicKey(); + entry.mHasHistory = false; + } + } + return t2; +} + +// Returns `true` if the set intersection of `leaders1` and `leaders2` is not +// empty. +bool +leadersIntersect(std::set const& leaders1, + std::set const& leaders2) +{ + std::vector intersection; + std::set_intersection(leaders1.begin(), leaders1.end(), leaders2.begin(), + leaders2.end(), std::back_inserter(intersection)); + return !intersection.empty(); +} + +// Given two quorum sets consisting of validators in `validators1` and +// `validators2`, this function returns the probability that the two quorum sets +// will agree on a leader in the first round of nomination. +double +computeExpectedFirstRoundAgreementProbability( + std::vector const& validators1, + std::vector const& validators2) +{ + // Gather orgs + std::set orgs1; + std::transform(validators1.begin(), validators1.end(), + std::inserter(orgs1, orgs1.end()), + [](ValidatorEntry const& v) { return v.mHomeDomain; }); + std::set orgs2; + std::transform(validators2.begin(), validators2.end(), + std::inserter(orgs2, orgs2.end()), + [](ValidatorEntry const& v) { return v.mHomeDomain; }); + + // Compute overlap + std::vector sharedOrgs; + std::set_intersection(orgs1.begin(), orgs1.end(), orgs2.begin(), + orgs2.end(), std::back_inserter(sharedOrgs)); + + // Probability of agreement in first round is (orgs overlapping / orgs1) * + // (orgs overlapping / orgs2). That's the probability that the two sides + // will pick any overlapping org. The algorithm guarantees that if they pick + // overlapping validator, they'll pick the same validator. + double overlap = static_cast(sharedOrgs.size()); + return overlap / orgs1.size() * overlap / orgs2.size(); +} + +// Test that the nomination algorithm behaves as expected when the two quorum +// sets `qs1` and `qs2` are not equivalent. This function requires that both +// quorum sets overlap, and contain only a single quality level of validators. +// Runs simulation for `numLedgers` slots. +// NOTE: This test counts any failure to agree on a leader as a timeout. In +// practice, it's possible that one side of the split is large enough to proceed +// without the other side. In this case, the larger side might not experience a +// timeout and "drag" the other side through consensus with it. However, this +// test aims to analyze the worst case scenario where the two sides are fairly +// balanced and real-world networking conditions are in place (some nodes +// lagging, etc), such that disagreement always results in a timeout. +void +testAsymmetricTimeouts(Topology const& qs1, Topology const& qs2, + int const numLedgers) +{ + auto const& [sks1, validators1] = qs1; + auto const& [sks2, validators2] = qs2; + + REQUIRE(sks1.size() == validators1.size()); + REQUIRE(sks2.size() == validators2.size()); + + // Generate configs and nodes representing one validator with each quorum + // set + std::vector clocks(2); + std::vector apps; + for (int i = 0; i < 2; ++i) + { + Config cfg = getTestConfig(i); + cfg.ARTIFICIALLY_ACCELERATE_TIME_FOR_TESTING = true; + cfg.generateQuorumSetForTesting(i == 0 ? validators1 : validators2); + cfg.NODE_SEED = i == 0 ? sks1.back() : sks2.back(); + + auto app = apps.emplace_back(createTestApplication(clocks.at(i), cfg)); + } + + // Run the nomination algorithm for `numLedgers` slots. Simulate timeouts by + // re-running slots that don't agree on a leader until their leader + // elections overlap. Record the number of timeouts it takes for the two + // quorum sets to agree on a leader in `timeouts`, which is effectively a + // mapping from number of timeouts to the number of ledgers that experienced + // that many timeouts. + std::vector timeouts(std::max(validators1.size(), validators2.size())); + for (int i = 0; i < numLedgers; ++i) + { + Value const v = getRandomValue(); + SCP& scp1 = dynamic_cast(apps.at(0)->getHerder()).getSCP(); + SCP& scp2 = dynamic_cast(apps.at(1)->getHerder()).getSCP(); + auto s1 = std::make_shared(i, scp1); + auto s2 = std::make_shared(i, scp2); + + TestNominationProtocol np1(*s1); + TestNominationProtocol np2(*s2); + + for (int j = 0; j < timeouts.size(); ++j) + { + std::set const& leaders1 = + np1.updateRoundLeadersForTesting(v); + std::set const& leaders2 = + np2.updateRoundLeadersForTesting(v); + REQUIRE(leaders1.size() == j + 1); + REQUIRE(leaders2.size() == j + 1); + + if (leadersIntersect(leaders1, leaders2)) + { + // Agreed on a leader! Record the number of timeouts resulted. + ++timeouts.at(j); + break; + } + } + + // If leaders don't intersect after running through the loop then the + // two quorum sets have no overlap and the test is broken. + REQUIRE(leadersIntersect(np1.getLeaders(), np2.getLeaders())); + } + + // For the first round, we can easily compute the expected agreement + // probability. For subsequent rounds, we check only that the success rate + // increases over time (modulo some small epsilon). + double expectedSuccessRate = + computeExpectedFirstRoundAgreementProbability(validators1, validators2); + + // Allow for some small decrease in success rate from the theoretical value. + // We're working with probabilistic simulation here so we can't be too + // strict or the test will be flaky. + double constexpr epsilon = 0.1; + + // There's not enough data in the tail of the distribution to allow us to + // assert that the success rate is what's expected. To avoid sporadic test + // failures, we cut off `tailCutoffPoint` of the tail of the distribution + // for the purposes of asserting test values. However, the test will still + // log those success rates for manual examination. + double constexpr tailCutoffPoint = 0.05; + + int numLedgersRemaining = numLedgers; + for (int i = 0; i < timeouts.size(); ++i) + { + int const numTimeouts = timeouts.at(i); + if (numTimeouts == 0) + { + // Avoid cluttering output + continue; + } + + CLOG_INFO(Herder, "Ledgers with {} timeouts: {} ({}%)", i, numTimeouts, + static_cast(numTimeouts) * 100 / numLedgers); + + if (numLedgersRemaining > numLedgers * tailCutoffPoint) + { + // Check that success rate increases over time. Allow some epsilon + // decrease because this is a probabilistic simulation. Also stop + // checking when we're at the last `tailCutoffPoint` timeouts as the + // data is too sparse to be useful. + double successRate = + static_cast(timeouts.at(i)) / numLedgersRemaining; + REQUIRE(successRate > expectedSuccessRate - epsilon); + + // Take max of success rate and previous success rate to avoid + // accidentally accepting a declining success rate due to episilon. + expectedSuccessRate = std::max(successRate, expectedSuccessRate); + numLedgersRemaining -= numTimeouts; + } + } +} + +// Test timeouts with asymmetric quorums. This test serves two purposes: +// 1. It contains assertions checking for moderate (10%) deviations from the +// expected behavior of the nomination algorithm. These should detect any +// major issues/regressions with the algorithm. +// 2. It logs the distributions of timeouts for manual inspection. This is +// useful for understanding the behavior of the algorithm and for testing +// specific scenarios one might be interested in (e.g., if tier 1 disagrees +// on one org's presence in tier 1, what is the impact on nomination +// timeouts?). +// NOTE: This provides a worst-case analysis of timeouts. See the NOTE on +// `testAsymmetricTimeouts` for more details. +TEST_CASE("Asymmetric quorum timeouts", "[herder]") +{ + // Number of slots to run for + int constexpr numLedgers = 20000; + + SECTION("Tier 1-like topology with replaced org") + { + auto t = teir1Like(); + testAsymmetricTimeouts(t, replaceOneOrg(t), numLedgers); + } + + SECTION("Tier 1-like topology with 1 added org") + { + auto t = teir1Like(); + testAsymmetricTimeouts(t, addOrgs(1, t), numLedgers); + } + + SECTION("Tier 1-like topology with 3 added orgs") + { + auto t = teir1Like(); + testAsymmetricTimeouts(t, addOrgs(3, t), numLedgers); + } +} + +// Test that the nomination algorithm behaves as expected when a random +// `numUnresponsive` set of nodes in `qs` are unresponsive. Runs simulation for +// `numLedgers` slots. +void +testUnresponsiveTimeouts(Topology const& qs, int numUnresponsive, + int const numLedgers) +{ + auto const& [sks, validators] = qs; + REQUIRE(sks.size() == validators.size()); + REQUIRE(numUnresponsive < validators.size()); + + // extract and shuffle node ids. Choose `numUnresponsive` nodes to be the + // unresponsive nodes. + std::vector nodeIDs; + std::transform(validators.begin(), validators.end(), + std::back_inserter(nodeIDs), + [](ValidatorEntry const& v) { return v.mKey; }); + stellar::shuffle(nodeIDs.begin(), nodeIDs.end(), gRandomEngine); + std::set unresponsive(nodeIDs.begin(), + nodeIDs.begin() + numUnresponsive); + + // Collect info about orgs + ValidatorQuality maxQuality; + std::unordered_map orgQualities; + std::unordered_map orgSizes; + std::unordered_map orgQualityCounts; + collectOrgInfo(maxQuality, orgQualities, orgSizes, orgQualityCounts, + validators); + + // Compute total weight of all validators, as well as the total weight of + // unresponsive validators + double totalWeight = 0.0; + double unresponsiveWeight = 0.0; + for (ValidatorEntry const& validator : validators) + { + double normalizedWeight = + expectedNormalizedWeight(orgQualityCounts, maxQuality, + orgQualities.at(validator.mHomeDomain), + orgSizes.at(validator.mHomeDomain)); + totalWeight += normalizedWeight; + if (unresponsive.count(validator.mKey)) + { + unresponsiveWeight += normalizedWeight; + } + } + + // Compute the average weight of an unresponsive node + double avgUnresponsiveWeight = unresponsiveWeight / numUnresponsive; + + // Compute expected number of ledgers experiencing `n` timeouts where `n` is + // the index of the `timeouts` vector. This vector is a mapping from number + // of timeouts to expected number of ledgers experiencing that number of + // timeouts. + std::vector expectedTimeouts(numUnresponsive + 1); + double remainingWeight = totalWeight; + int remainingUnresponsive = numUnresponsive; + int remainingLedgers = numLedgers; + for (int i = 0; i < expectedTimeouts.size(); ++i) + { + double timeoutProb = + (avgUnresponsiveWeight * remainingUnresponsive) / remainingWeight; + // To get expected number of ledgers experiencing `i` timeouts, we take + // the probability a timeout does not occur and multiply it by the + // number of remaining ledgers. + int expectedLedgers = (1 - timeoutProb) * remainingLedgers; + expectedTimeouts.at(i) = expectedLedgers; + + // Remaining ledgers decreases by expected number of ledgers + // experiencing `i` timeouts + remainingLedgers -= expectedLedgers; + + // For `i+1` timeouts to occur, an unresponsive node must be chosen. + // Therefore, deduct the average weight of an unresponsive node from the + // total weight left in the network. + remainingWeight -= avgUnresponsiveWeight; + --remainingUnresponsive; + } + + // Generate a config + Config cfg = getTestConfig(); + cfg.ARTIFICIALLY_ACCELERATE_TIME_FOR_TESTING = true; + cfg.generateQuorumSetForTesting(validators); + cfg.NODE_SEED = sks.front(); + + // Create an application + VirtualClock clock; + Application::pointer app = createTestApplication(clock, cfg); + + // Run for `numLedgers` slots, recording the number of times each slot timed + // out due to unresponsive nodes before successfully electing a responsive + // leader. + SCP& scp = dynamic_cast(app->getHerder()).getSCP(); + std::vector timeouts(numUnresponsive + 1); + for (int i = 0; i < numLedgers; ++i) + { + Value const v = getRandomValue(); + auto s = std::make_shared(i, scp); + + TestNominationProtocol np(*s); + for (int i = 0; i < timeouts.size(); ++i) + { + std::set const& leaders = + np.updateRoundLeadersForTesting(v); + // If leaders is a subset of unresponsive, then a timeout occurs. + if (!std::includes(unresponsive.begin(), unresponsive.end(), + leaders.begin(), leaders.end())) + { + ++timeouts.at(i); + break; + } + } + } + + // Allow for some small multiplicative increase in timeouts from the + // theoretical value. We're working with probabilistic simulation here so + // we can't be too strict or the test will be flaky. + double constexpr epsilon = 1.1; + + // There's not enough data in the tail of the distribution to allow us to + // assert that the timeout values are what's expected. To avoid sporadic + // test failures, we cut off `tailCutoffPoint` of the tail of the + // distribution for the purposes of asserting test values. However, the test + // will still log those values for manual examination. + double constexpr tailCutoffPoint = 0.05; + + // Analyze timeouts + int numLedgersRemaining = numLedgers; + for (int i = 0; i < timeouts.size(); ++i) + { + int const numTimeouts = timeouts.at(i); + int const expectedNumTimeouts = expectedTimeouts.at(i); + + if (numLedgersRemaining > numLedgers * tailCutoffPoint) + { + // Check that timeouts are less than epsilon times the expected + // value. Also stop checking when we're at the last + // `tailCutoffPoint` timeouts as the data is too sparse to be + // useful. + REQUIRE(numTimeouts < expectedNumTimeouts * epsilon); + } + CLOG_INFO(Herder, "Ledgers with {} timeouts: {} ({}%)", i, numTimeouts, + numTimeouts * 100.0 / numLedgers); + numLedgersRemaining -= numTimeouts; + } +} + +// Test timeouts for a tier 1-like topology with 1-5 unresponsive nodes. This +// test serves two purposes: +// 1. It contains assertions checking for moderate (10%) deviations from the +// expected behavior of the nomination algorithm. These should detect any +// major issues/regressions with the algorithm. +// 2. It logs the distributions of timeouts for manual inspection. This is +// useful for understanding the behavior of the algorithm and for testing +// specific scenarios one might be interested in (e.g., if 3 tier 1 nodes +// are heavily lagging, what is the impact on nomination timeouts?). +TEST_CASE("Unresponsive quorum timeouts", "[herder]") +{ + // Number of slots to run for + int constexpr numLedgers = 20000; + + auto t = teir1Like(); + for (int i = 1; i <= 5; ++i) + { + CLOG_INFO(Herder, "Simulating nomination with {} unresponsive nodes", + i); + testUnresponsiveTimeouts(t, i, numLedgers); + } +} \ No newline at end of file From afca343d68ad950d28cd9e64c71d2cdefe935de7 Mon Sep 17 00:00:00 2001 From: Garand Tyson Date: Mon, 7 Oct 2024 14:50:05 -0700 Subject: [PATCH 06/17] Make background eviction mandatory --- docs/stellar-core_example.cfg | 5 - src/bucket/BucketManager.cpp | 10 - src/bucket/BucketManager.h | 4 +- src/bucket/LiveBucket.cpp | 91 --- src/bucket/LiveBucket.h | 13 - src/bucket/LiveBucketList.cpp | 53 -- src/bucket/LiveBucketList.h | 4 - src/bucket/test/BucketListTests.cpp | 751 +++++++++--------- src/bucket/test/BucketManagerTests.cpp | 10 +- src/bucket/test/BucketTestUtils.cpp | 15 +- src/ledger/LedgerManagerImpl.cpp | 16 +- src/main/ApplicationImpl.cpp | 45 +- src/main/Config.cpp | 26 +- src/main/Config.h | 4 - src/test/FuzzerImpl.cpp | 1 - src/test/test.cpp | 2 - .../test/InvokeHostFunctionTests.cpp | 196 ++--- 17 files changed, 469 insertions(+), 777 deletions(-) diff --git a/docs/stellar-core_example.cfg b/docs/stellar-core_example.cfg index c8325b7476..3669b85acd 100644 --- a/docs/stellar-core_example.cfg +++ b/docs/stellar-core_example.cfg @@ -258,11 +258,6 @@ BUCKETLIST_DB_INDEX_CUTOFF = 20 # this value is ingnored and indexes are never persisted. BUCKETLIST_DB_PERSIST_INDEX = true -# BACKGROUND_EVICTION_SCAN (bool) default true -# Determines whether eviction scans occur in the background thread. Requires -# that DEPRECATED_SQL_LEDGER_STATE is set to false. -BACKGROUND_EVICTION_SCAN = true - # EXPERIMENTAL_BACKGROUND_OVERLAY_PROCESSING (bool) default false # Determines whether some of overlay processing occurs in the background # thread. diff --git a/src/bucket/BucketManager.cpp b/src/bucket/BucketManager.cpp index 57ca4cdfcf..b68600ac80 100644 --- a/src/bucket/BucketManager.cpp +++ b/src/bucket/BucketManager.cpp @@ -1056,16 +1056,6 @@ BucketManager::maybeSetIndex(std::shared_ptr b, } } -void -BucketManager::scanForEvictionLegacy(AbstractLedgerTxn& ltx, uint32_t ledgerSeq) -{ - ZoneScoped; - releaseAssert(protocolVersionStartsFrom(ltx.getHeader().ledgerVersion, - SOROBAN_PROTOCOL_VERSION)); - mLiveBucketList->scanForEvictionLegacy( - mApp, ltx, ledgerSeq, mBucketListEvictionCounters, mEvictionStatistics); -} - void BucketManager::startBackgroundEvictionScan(uint32_t ledgerSeq) { diff --git a/src/bucket/BucketManager.h b/src/bucket/BucketManager.h index 09f4e1818f..a1e3ffbe87 100644 --- a/src/bucket/BucketManager.h +++ b/src/bucket/BucketManager.h @@ -290,10 +290,8 @@ class BucketManager : NonMovableOrCopyable std::unique_ptr&& index); // Scans BucketList for non-live entries to evict starting at the entry - // pointed to by EvictionIterator. Scans until `maxEntriesToEvict` entries + // pointed to by EvictionIterator. Evicts until `maxEntriesToEvict` entries // have been evicted or maxEvictionScanSize bytes have been scanned. - void scanForEvictionLegacy(AbstractLedgerTxn& ltx, uint32_t ledgerSeq); - void startBackgroundEvictionScan(uint32_t ledgerSeq); void resolveBackgroundEvictionScan(AbstractLedgerTxn& ltx, uint32_t ledgerSeq, diff --git a/src/bucket/LiveBucket.cpp b/src/bucket/LiveBucket.cpp index f640422fcd..b5b5257cd8 100644 --- a/src/bucket/LiveBucket.cpp +++ b/src/bucket/LiveBucket.cpp @@ -419,97 +419,6 @@ LiveBucket::checkProtocolLegality(BucketEntry const& entry, } } -Loop -LiveBucket::scanForEvictionLegacy( - AbstractLedgerTxn& ltx, EvictionIterator& iter, uint32_t& bytesToScan, - uint32_t& remainingEntriesToEvict, uint32_t ledgerSeq, - medida::Counter& entriesEvictedCounter, - medida::Counter& bytesScannedForEvictionCounter, - std::shared_ptr stats) const -{ - ZoneScoped; - releaseAssert(stats); - - if (isEmpty() || - protocolVersionIsBefore(getBucketVersion(), SOROBAN_PROTOCOL_VERSION)) - { - // EOF, need to continue reading next bucket - return Loop::INCOMPLETE; - } - - if (remainingEntriesToEvict == 0 || bytesToScan == 0) - { - // Reached end of scan region - return Loop::COMPLETE; - } - - XDRInputFileStream stream{}; - stream.open(mFilename.string()); - stream.seek(iter.bucketFileOffset); - - BucketEntry be; - while (stream.readOne(be)) - { - if (be.type() == INITENTRY || be.type() == LIVEENTRY) - { - auto const& le = be.liveEntry(); - if (isTemporaryEntry(le.data)) - { - ZoneNamedN(maybeEvict, "maybe evict entry", true); - - auto ttlKey = getTTLKey(le); - uint32_t liveUntilLedger = 0; - auto shouldEvict = [&] { - auto ttlLtxe = ltx.loadWithoutRecord(ttlKey); - if (!ttlLtxe) - { - // Entry was already deleted either manually or by an - // earlier eviction scan, do nothing - return false; - } - - releaseAssert(ttlLtxe); - liveUntilLedger = - ttlLtxe.current().data.ttl().liveUntilLedgerSeq; - return !isLive(ttlLtxe.current(), ledgerSeq); - }; - - if (shouldEvict()) - { - ZoneNamedN(evict, "evict entry", true); - auto age = ledgerSeq - liveUntilLedger; - stats->recordEvictedEntry(age); - - ltx.erase(ttlKey); - ltx.erase(LedgerEntryKey(le)); - entriesEvictedCounter.inc(); - --remainingEntriesToEvict; - } - } - } - - auto newPos = stream.pos(); - auto bytesRead = newPos - iter.bucketFileOffset; - iter.bucketFileOffset = newPos; - bytesScannedForEvictionCounter.inc(bytesRead); - if (bytesRead >= bytesToScan) - { - // Reached end of scan region - bytesToScan = 0; - return Loop::COMPLETE; - } - else if (remainingEntriesToEvict == 0) - { - return Loop::COMPLETE; - } - - bytesToScan -= bytesRead; - } - - // Hit eof - return Loop::INCOMPLETE; -} - LiveBucket::LiveBucket(std::string const& filename, Hash const& hash, std::unique_ptr&& index) : BucketBase(filename, hash, std::move(index)) diff --git a/src/bucket/LiveBucket.h b/src/bucket/LiveBucket.h index a34a3d8393..cc3fe34c0b 100644 --- a/src/bucket/LiveBucket.h +++ b/src/bucket/LiveBucket.h @@ -97,19 +97,6 @@ class LiveBucket : public BucketBase, void apply(Application& app) const; #endif - // Returns Loop::INCOMPLETE if eof reached, Loop::COMPLETE otherwise. - // Modifies iter as the bucket is scanned. Also modifies bytesToScan and - // maxEntriesToEvict such that after this function returns: - // bytesToScan -= amount_bytes_scanned - // maxEntriesToEvict -= entries_evicted - Loop scanForEvictionLegacy(AbstractLedgerTxn& ltx, EvictionIterator& iter, - uint32_t& bytesToScan, - uint32_t& remainingEntriesToEvict, - uint32_t ledgerSeq, - medida::Counter& entriesEvictedCounter, - medida::Counter& bytesScannedForEvictionCounter, - std::shared_ptr stats) const; - // Create a fresh bucket from given vectors of init (created) and live // (updated) LedgerEntries, and dead LedgerEntryKeys. The bucket will // be sorted, hashed, and adopted in the provided BucketManager. diff --git a/src/bucket/LiveBucketList.cpp b/src/bucket/LiveBucketList.cpp index 240f0d5876..a2cbb456d3 100644 --- a/src/bucket/LiveBucketList.cpp +++ b/src/bucket/LiveBucketList.cpp @@ -145,57 +145,4 @@ LiveBucketList::checkIfEvictionScanIsStuck(EvictionIterator const& evictionIter, counters.incompleteBucketScan.inc(); } } - -// To avoid noisy data, only count metrics that encompass a complete -// eviction cycle. If a node joins the network mid cycle, metrics will be -// nullopt and be initialized at the start of the next cycle. -void -LiveBucketList::scanForEvictionLegacy(Application& app, AbstractLedgerTxn& ltx, - uint32_t ledgerSeq, - EvictionCounters& counters, - std::shared_ptr stats) -{ - releaseAssert(stats); - - auto getBucketFromIter = [&levels = mLevels](EvictionIterator const& iter) { - auto& level = levels.at(iter.bucketListLevel); - return iter.isCurrBucket ? level.getCurr() : level.getSnap(); - }; - - auto const& networkConfig = - app.getLedgerManager().getSorobanNetworkConfig(); - auto const firstScanLevel = - networkConfig.stateArchivalSettings().startingEvictionScanLevel; - auto evictionIter = networkConfig.evictionIterator(); - auto scanSize = networkConfig.stateArchivalSettings().evictionScanSize; - auto maxEntriesToEvict = - networkConfig.stateArchivalSettings().maxEntriesToArchive; - - updateStartingEvictionIterator(evictionIter, firstScanLevel, ledgerSeq); - - auto startIter = evictionIter; - auto b = getBucketFromIter(evictionIter); - - while (b->scanForEvictionLegacy( - ltx, evictionIter, scanSize, maxEntriesToEvict, ledgerSeq, - counters.entriesEvicted, counters.bytesScannedForEviction, - stats) == Loop::INCOMPLETE) - { - - if (updateEvictionIterAndRecordStats(evictionIter, startIter, - firstScanLevel, ledgerSeq, stats, - counters)) - { - break; - } - - b = getBucketFromIter(evictionIter); - checkIfEvictionScanIsStuck( - evictionIter, - networkConfig.stateArchivalSettings().evictionScanSize, b, - counters); - } - - networkConfig.updateEvictionIterator(ltx, evictionIter); -} } diff --git a/src/bucket/LiveBucketList.h b/src/bucket/LiveBucketList.h index 683337e096..0f2a6ac268 100644 --- a/src/bucket/LiveBucketList.h +++ b/src/bucket/LiveBucketList.h @@ -36,10 +36,6 @@ class LiveBucketList : public BucketListBase std::shared_ptr b, EvictionCounters& counters); - void scanForEvictionLegacy(Application& app, AbstractLedgerTxn& ltx, - uint32_t ledgerSeq, EvictionCounters& counters, - std::shared_ptr stats); - // Add a batch of initial (created), live (updated) and dead entries to the // bucketlist, representing the entries effected by closing // `currLedger`. The bucketlist will incorporate these into the smallest diff --git a/src/bucket/test/BucketListTests.cpp b/src/bucket/test/BucketListTests.cpp index 7cc5a6a64b..65c84b50ed 100644 --- a/src/bucket/test/BucketListTests.cpp +++ b/src/bucket/test/BucketListTests.cpp @@ -953,342 +953,371 @@ TEST_CASE_VERSIONS("eviction scan", "[bucketlist]") Config cfg(getTestConfig()); cfg.USE_CONFIG_FOR_GENESIS = true; - auto test = [&](bool backgroundScan) { - // BucketTestApplication writes directly to BL and circumvents LedgerTxn - // interface, so we have to use BucketListDB for lookups - cfg.DEPRECATED_SQL_LEDGER_STATE = false; - cfg.BACKGROUND_EVICTION_SCAN = backgroundScan; - - auto app = createTestApplication(clock, cfg); - for_versions_from(20, *app, [&] { - LedgerManagerForBucketTests& lm = app->getLedgerManager(); - auto& bm = app->getBucketManager(); - auto& bl = bm.getLiveBucketList(); - - auto& networkCfg = [&]() -> SorobanNetworkConfig& { - LedgerTxn ltx(app->getLedgerTxnRoot()); - return app->getLedgerManager().getMutableSorobanNetworkConfig(); - }(); - - auto& stateArchivalSettings = networkCfg.stateArchivalSettings(); - auto& evictionIter = networkCfg.evictionIterator(); - auto const levelToScan = 3; - uint32_t ledgerSeq = 1; - - stateArchivalSettings.minTemporaryTTL = 1; - stateArchivalSettings.minPersistentTTL = 1; - - // Because this test uses BucketTestApplication, we must manually - // add the Network Config LedgerEntries to the BucketList with - // setNextLedgerEntryBatchForBucketTesting whenever state archival - // settings or the eviction iterator is manually changed - auto getNetworkCfgLE = [&] { - std::vector result; - LedgerEntry sesLE; - sesLE.data.type(CONFIG_SETTING); - sesLE.data.configSetting().configSettingID( - ConfigSettingID::CONFIG_SETTING_STATE_ARCHIVAL); - sesLE.data.configSetting().stateArchivalSettings() = - stateArchivalSettings; - result.emplace_back(sesLE); - - LedgerEntry iterLE; - iterLE.data.type(CONFIG_SETTING); - iterLE.data.configSetting().configSettingID( - ConfigSettingID::CONFIG_SETTING_EVICTION_ITERATOR); - iterLE.data.configSetting().evictionIterator() = evictionIter; - result.emplace_back(iterLE); - - return result; - }; + // BucketTestApplication writes directly to BL and circumvents LedgerTxn + // interface, so we have to use BucketListDB for lookups + cfg.DEPRECATED_SQL_LEDGER_STATE = false; - auto updateNetworkCfg = [&] { - lm.setNextLedgerEntryBatchForBucketTesting( - {}, getNetworkCfgLE(), {}); - closeLedger(*app); - ++ledgerSeq; - }; + auto app = createTestApplication(clock, cfg); + for_versions_from(20, *app, [&] { + LedgerManagerForBucketTests& lm = app->getLedgerManager(); + auto& bm = app->getBucketManager(); + auto& bl = bm.getLiveBucketList(); - auto checkIfEntryExists = [&](std::set const& keys, - bool shouldExist) { - LedgerTxn ltx(app->getLedgerTxnRoot()); - for (auto const& key : keys) - { - auto txle = ltx.loadWithoutRecord(key); - REQUIRE(static_cast(txle) == shouldExist); + auto& networkCfg = [&]() -> SorobanNetworkConfig& { + LedgerTxn ltx(app->getLedgerTxnRoot()); + return app->getLedgerManager().getMutableSorobanNetworkConfig(); + }(); + + auto& stateArchivalSettings = networkCfg.stateArchivalSettings(); + auto& evictionIter = networkCfg.evictionIterator(); + auto const levelToScan = 3; + uint32_t ledgerSeq = 1; + + stateArchivalSettings.minTemporaryTTL = 1; + stateArchivalSettings.minPersistentTTL = 1; + + // Because this test uses BucketTestApplication, we must manually + // add the Network Config LedgerEntries to the BucketList with + // setNextLedgerEntryBatchForBucketTesting whenever state archival + // settings or the eviction iterator is manually changed + auto getNetworkCfgLE = [&] { + std::vector result; + LedgerEntry sesLE; + sesLE.data.type(CONFIG_SETTING); + sesLE.data.configSetting().configSettingID( + ConfigSettingID::CONFIG_SETTING_STATE_ARCHIVAL); + sesLE.data.configSetting().stateArchivalSettings() = + stateArchivalSettings; + result.emplace_back(sesLE); + + LedgerEntry iterLE; + iterLE.data.type(CONFIG_SETTING); + iterLE.data.configSetting().configSettingID( + ConfigSettingID::CONFIG_SETTING_EVICTION_ITERATOR); + iterLE.data.configSetting().evictionIterator() = evictionIter; + result.emplace_back(iterLE); + + return result; + }; - auto TTLTxle = ltx.loadWithoutRecord(getTTLKey(key)); - REQUIRE(static_cast(TTLTxle) == shouldExist); - } - }; + auto updateNetworkCfg = [&] { + lm.setNextLedgerEntryBatchForBucketTesting({}, getNetworkCfgLE(), + {}); + closeLedger(*app); + ++ledgerSeq; + }; - std::set tempEntries; - std::set persistentEntries; - std::vector entries; - for (auto& e : - LedgerTestUtils::generateValidUniqueLedgerEntriesWithTypes( - {CONTRACT_DATA}, 50)) + auto checkIfEntryExists = [&](std::set const& keys, + bool shouldExist) { + LedgerTxn ltx(app->getLedgerTxnRoot()); + for (auto const& key : keys) { - // Set half of the entries to be persistent, half temporary - if (tempEntries.empty() || rand_flip()) - { - e.data.contractData().durability = TEMPORARY; - tempEntries.emplace(LedgerEntryKey(e)); - } - else - { - e.data.contractData().durability = PERSISTENT; - persistentEntries.emplace(LedgerEntryKey(e)); - } + auto txle = ltx.loadWithoutRecord(key); + REQUIRE(static_cast(txle) == shouldExist); - LedgerEntry TTLEntry; - TTLEntry.data.type(TTL); - TTLEntry.data.ttl().keyHash = getTTLKey(e).ttl().keyHash; - TTLEntry.data.ttl().liveUntilLedgerSeq = ledgerSeq + 1; + auto TTLTxle = ltx.loadWithoutRecord(getTTLKey(key)); + REQUIRE(static_cast(TTLTxle) == shouldExist); + } + }; - entries.emplace_back(e); - entries.emplace_back(TTLEntry); + std::set tempEntries; + std::set persistentEntries; + std::vector entries; + for (auto& e : + LedgerTestUtils::generateValidUniqueLedgerEntriesWithTypes( + {CONTRACT_DATA}, 50)) + { + // Set half of the entries to be persistent, half temporary + if (tempEntries.empty() || rand_flip()) + { + e.data.contractData().durability = TEMPORARY; + tempEntries.emplace(LedgerEntryKey(e)); + } + else + { + e.data.contractData().durability = PERSISTENT; + persistentEntries.emplace(LedgerEntryKey(e)); } - lm.setNextLedgerEntryBatchForBucketTesting(entries, - getNetworkCfgLE(), {}); + LedgerEntry TTLEntry; + TTLEntry.data.type(TTL); + TTLEntry.data.ttl().keyHash = getTTLKey(e).ttl().keyHash; + TTLEntry.data.ttl().liveUntilLedgerSeq = ledgerSeq + 1; + + entries.emplace_back(e); + entries.emplace_back(TTLEntry); + } + + lm.setNextLedgerEntryBatchForBucketTesting(entries, getNetworkCfgLE(), + {}); + closeLedger(*app); + ++ledgerSeq; + + // Iterate until entries reach the level where eviction will start + for (; bl.getLevel(levelToScan).getCurr()->isEmpty(); ++ledgerSeq) + { + checkIfEntryExists(tempEntries, true); + checkIfEntryExists(persistentEntries, true); + lm.setNextLedgerEntryBatchForBucketTesting({}, {}, {}); closeLedger(*app); - ++ledgerSeq; + } - // Iterate until entries reach the level where eviction will start - for (; bl.getLevel(levelToScan).getCurr()->isEmpty(); ++ledgerSeq) + SECTION("basic eviction test") + { + // Set eviction to start at level where the entries + // currently are + stateArchivalSettings.startingEvictionScanLevel = levelToScan; + updateNetworkCfg(); + + // All entries should be evicted at once + closeLedger(*app); + ++ledgerSeq; + checkIfEntryExists(tempEntries, false); + checkIfEntryExists(persistentEntries, true); + + auto& entriesEvictedCounter = bm.getEntriesEvictedCounter(); + REQUIRE(entriesEvictedCounter.count() == tempEntries.size()); + + // Close ledgers until evicted DEADENTRYs merge with + // original INITENTRYs. This checks that BucketList + // invariants are respected + for (auto initialDeadMerges = + bm.readMergeCounters().mOldInitEntriesMergedWithNewDead; + bm.readMergeCounters().mOldInitEntriesMergedWithNewDead < + initialDeadMerges + tempEntries.size(); + ++ledgerSeq) { - checkIfEntryExists(tempEntries, true); - checkIfEntryExists(persistentEntries, true); - lm.setNextLedgerEntryBatchForBucketTesting({}, {}, {}); closeLedger(*app); } - SECTION("basic eviction test") - { - // Set eviction to start at level where the entries - // currently are - stateArchivalSettings.startingEvictionScanLevel = levelToScan; - updateNetworkCfg(); + REQUIRE(entriesEvictedCounter.count() == tempEntries.size()); + } - // All entries should be evicted at once - closeLedger(*app); - ++ledgerSeq; - checkIfEntryExists(tempEntries, false); - checkIfEntryExists(persistentEntries, true); - - auto& entriesEvictedCounter = bm.getEntriesEvictedCounter(); - REQUIRE(entriesEvictedCounter.count() == tempEntries.size()); - - // Close ledgers until evicted DEADENTRYs merge with - // original INITENTRYs. This checks that BucketList - // invariants are respected - for (auto initialDeadMerges = - bm.readMergeCounters() - .mOldInitEntriesMergedWithNewDead; - bm.readMergeCounters().mOldInitEntriesMergedWithNewDead < - initialDeadMerges + tempEntries.size(); - ++ledgerSeq) + SECTION("shadowed entries not evicted") + { + // Set eviction to start at level where the entries + // currently are + stateArchivalSettings.startingEvictionScanLevel = levelToScan; + updateNetworkCfg(); + + // Shadow non-live entries with updated, live versions + for (auto& e : entries) + { + // Only need to update TTLEntries + if (e.data.type() == TTL) { - closeLedger(*app); + e.data.ttl().liveUntilLedgerSeq = ledgerSeq + 10; } - - REQUIRE(entriesEvictedCounter.count() == tempEntries.size()); } + lm.setNextLedgerEntryBatchForBucketTesting({}, entries, {}); - SECTION("shadowed entries not evicted") - { - // Set eviction to start at level where the entries - // currently are - stateArchivalSettings.startingEvictionScanLevel = levelToScan; - updateNetworkCfg(); + // Close two ledgers to give eviction scan opportunity to + // process new entries + closeLedger(*app); + closeLedger(*app); - // Shadow non-live entries with updated, live versions - for (auto& e : entries) - { - // Only need to update TTLEntries - if (e.data.type() == TTL) - { - e.data.ttl().liveUntilLedgerSeq = ledgerSeq + 10; - } - } - lm.setNextLedgerEntryBatchForBucketTesting({}, entries, {}); + // Entries are shadowed, should not be evicted + checkIfEntryExists(tempEntries, true); + checkIfEntryExists(persistentEntries, true); + } - // Close two ledgers to give eviction scan opportunity to - // process new entries - closeLedger(*app); + SECTION("maxEntriesToArchive") + { + // Check that we only evict one entry at a time + stateArchivalSettings.maxEntriesToArchive = 1; + stateArchivalSettings.startingEvictionScanLevel = levelToScan; + updateNetworkCfg(); + + auto& entriesEvictedCounter = bm.getEntriesEvictedCounter(); + auto prevIter = evictionIter; + for (auto prevCount = entriesEvictedCounter.count(); + prevCount < tempEntries.size();) + { closeLedger(*app); - // Entries are shadowed, should not be evicted - checkIfEntryExists(tempEntries, true); - checkIfEntryExists(persistentEntries, true); + // Make sure we evict all entries without circling back + // through the BucketList + auto didAdvance = + prevIter.bucketFileOffset < evictionIter.bucketFileOffset || + prevIter.bucketListLevel < evictionIter.bucketListLevel || + // assert isCurrBucket goes from true -> false + // true > false == 1 > 0 + prevIter.isCurrBucket > evictionIter.isCurrBucket; + REQUIRE(didAdvance); + + // Check that we only evict at most maxEntriesToArchive + // per ledger + auto newCount = entriesEvictedCounter.count(); + REQUIRE((newCount == prevCount || newCount == prevCount + 1)); + prevCount = newCount; } - SECTION("maxEntriesToArchive") - { - // Check that we only evict one entry at a time - stateArchivalSettings.maxEntriesToArchive = 1; - stateArchivalSettings.startingEvictionScanLevel = levelToScan; - updateNetworkCfg(); + // All entries should have been evicted + checkIfEntryExists(tempEntries, false); + checkIfEntryExists(persistentEntries, true); + } - auto& entriesEvictedCounter = bm.getEntriesEvictedCounter(); - auto prevIter = evictionIter; - for (auto prevCount = entriesEvictedCounter.count(); - prevCount < tempEntries.size();) - { - closeLedger(*app); + SECTION("maxEntriesToArchive with entry modified on eviction ledger") + { - // Make sure we evict all entries without circling back - // through the BucketList - auto didAdvance = - prevIter.bucketFileOffset < - evictionIter.bucketFileOffset || - prevIter.bucketListLevel < - evictionIter.bucketListLevel || - // assert isCurrBucket goes from true -> false - // true > false == 1 > 0 - prevIter.isCurrBucket > evictionIter.isCurrBucket; - REQUIRE(didAdvance); - - // Check that we only evict at most maxEntriesToArchive - // per ledger - auto newCount = entriesEvictedCounter.count(); - REQUIRE( - (newCount == prevCount || newCount == prevCount + 1)); - prevCount = newCount; - } + // This test is for an edge case in background eviction. + // We want to test that if entry n should be the last entry + // evicted due to maxEntriesToArchive, but that entry is + // updated on the eviction ledger, background eviction + // should still evict entry n + 1 + stateArchivalSettings.maxEntriesToArchive = 1; + stateArchivalSettings.startingEvictionScanLevel = levelToScan; + updateNetworkCfg(); - // All entries should have been evicted - checkIfEntryExists(tempEntries, false); - checkIfEntryExists(persistentEntries, true); - } + // First temp entry in Bucket will be updated with live TTL + std::optional entryToUpdate{}; + + // Second temp entry in bucket should be evicted + LedgerKey entryToEvict; + std::optional expectedEndIterPosition{}; - SECTION( - "maxEntriesToArchive with entry modified on eviction ledger") + for (LiveBucketInputIterator in(bl.getLevel(levelToScan).getCurr()); + in; ++in) { - if (backgroundScan) + // Temp entries should be sorted before persistent in + // the Bucket + auto be = *in; + if (be.type() == INITENTRY || be.type() == LIVEENTRY) { - // This test is for an edge case in background eviction. - // We want to test that if entry n should be the last entry - // evicted due to maxEntriesToArchive, but that entry is - // updated on the eviction ledger, background eviction - // should still evict entry n + 1 - stateArchivalSettings.maxEntriesToArchive = 1; - stateArchivalSettings.startingEvictionScanLevel = - levelToScan; - updateNetworkCfg(); - - // First temp entry in Bucket will be updated with live TTL - std::optional entryToUpdate{}; - - // Second temp entry in bucket should be evicted - LedgerKey entryToEvict; - std::optional expectedEndIterPosition{}; - - for (LiveBucketInputIterator in( - bl.getLevel(levelToScan).getCurr()); - in; ++in) + auto le = be.liveEntry(); + if (le.data.type() == CONTRACT_DATA && + le.data.contractData().durability == TEMPORARY) { - // Temp entries should be sorted before persistent in - // the Bucket - auto be = *in; - if (be.type() == INITENTRY || be.type() == LIVEENTRY) + if (!entryToUpdate) { - auto le = be.liveEntry(); - if (le.data.type() == CONTRACT_DATA && - le.data.contractData().durability == TEMPORARY) - { - if (!entryToUpdate) - { - entryToUpdate = LedgerEntryKey(le); - } - else - { - entryToEvict = LedgerEntryKey(le); - expectedEndIterPosition = in.pos(); - break; - } - } + entryToUpdate = LedgerEntryKey(le); + } + else + { + entryToEvict = LedgerEntryKey(le); + expectedEndIterPosition = in.pos(); + break; } } + } + } - REQUIRE(expectedEndIterPosition.has_value()); + REQUIRE(expectedEndIterPosition.has_value()); - // Update first evictable entry with new TTL - auto ttlKey = getTTLKey(*entryToUpdate); - LedgerEntry ttlLe; - ttlLe.data.type(TTL); - ttlLe.data.ttl().keyHash = ttlKey.ttl().keyHash; - ttlLe.data.ttl().liveUntilLedgerSeq = ledgerSeq + 1; + // Update first evictable entry with new TTL + auto ttlKey = getTTLKey(*entryToUpdate); + LedgerEntry ttlLe; + ttlLe.data.type(TTL); + ttlLe.data.ttl().keyHash = ttlKey.ttl().keyHash; + ttlLe.data.ttl().liveUntilLedgerSeq = ledgerSeq + 1; - lm.setNextLedgerEntryBatchForBucketTesting({}, {ttlLe}, {}); - closeLedger(*app); + lm.setNextLedgerEntryBatchForBucketTesting({}, {ttlLe}, {}); + closeLedger(*app); - LedgerTxn ltx(app->getLedgerTxnRoot()); - auto firstEntry = ltx.loadWithoutRecord(*entryToUpdate); - REQUIRE(static_cast(firstEntry)); + LedgerTxn ltx(app->getLedgerTxnRoot()); + auto firstEntry = ltx.loadWithoutRecord(*entryToUpdate); + REQUIRE(static_cast(firstEntry)); - auto evictedEntry = ltx.loadWithoutRecord(entryToEvict); - REQUIRE(!static_cast(evictedEntry)); + auto evictedEntry = ltx.loadWithoutRecord(entryToEvict); + REQUIRE(!static_cast(evictedEntry)); - REQUIRE(evictionIter.bucketFileOffset == - *expectedEndIterPosition); - REQUIRE(evictionIter.bucketListLevel == levelToScan); - REQUIRE(evictionIter.isCurrBucket == true); - } - } + REQUIRE(evictionIter.bucketFileOffset == *expectedEndIterPosition); + REQUIRE(evictionIter.bucketListLevel == levelToScan); + REQUIRE(evictionIter.isCurrBucket == true); + } - auto constexpr xdrOverheadBytes = 4; + auto constexpr xdrOverheadBytes = 4; - LiveBucketInputIterator metaIn(bl.getLevel(0).getCurr()); - BucketEntry be(METAENTRY); - be.metaEntry() = metaIn.getMetadata(); - auto const metadataSize = xdr::xdr_size(be) + xdrOverheadBytes; + LiveBucketInputIterator metaIn(bl.getLevel(0).getCurr()); + BucketEntry be(METAENTRY); + be.metaEntry() = metaIn.getMetadata(); + auto const metadataSize = xdr::xdr_size(be) + xdrOverheadBytes; - SECTION("evictionScanSize") - { - // Set smallest possible scan size so eviction iterator - // scans one entry per scan - stateArchivalSettings.evictionScanSize = 1; - stateArchivalSettings.startingEvictionScanLevel = levelToScan; - updateNetworkCfg(); + SECTION("evictionScanSize") + { + // Set smallest possible scan size so eviction iterator + // scans one entry per scan + stateArchivalSettings.evictionScanSize = 1; + stateArchivalSettings.startingEvictionScanLevel = levelToScan; + updateNetworkCfg(); - // First eviction scan will only read meta + // First eviction scan will only read meta + closeLedger(*app); + ++ledgerSeq; + + REQUIRE(evictionIter.bucketFileOffset == metadataSize); + REQUIRE(evictionIter.bucketListLevel == levelToScan); + REQUIRE(evictionIter.isCurrBucket == true); + + size_t prevOff = evictionIter.bucketFileOffset; + // Check that each scan only reads one entry + for (LiveBucketInputIterator in(bl.getLevel(levelToScan).getCurr()); + in; ++in) + { + auto startingOffset = evictionIter.bucketFileOffset; closeLedger(*app); ++ledgerSeq; - REQUIRE(evictionIter.bucketFileOffset == metadataSize); - REQUIRE(evictionIter.bucketListLevel == levelToScan); - REQUIRE(evictionIter.isCurrBucket == true); - - size_t prevOff = evictionIter.bucketFileOffset; - // Check that each scan only reads one entry - for (LiveBucketInputIterator in( - bl.getLevel(levelToScan).getCurr()); - in; ++in) + // If the BL receives an incoming merge, the scan will + // reset; break at that point. + if (evictionIter.bucketFileOffset < prevOff) { - auto startingOffset = evictionIter.bucketFileOffset; - closeLedger(*app); - ++ledgerSeq; - - // If the BL receives an incoming merge, the scan will - // reset; break at that point. - if (evictionIter.bucketFileOffset < prevOff) - { - break; - } - prevOff = evictionIter.bucketFileOffset; - REQUIRE(evictionIter.bucketFileOffset == - xdr::xdr_size(*in) + startingOffset + - xdrOverheadBytes); - REQUIRE(evictionIter.bucketListLevel == levelToScan); - REQUIRE(evictionIter.isCurrBucket == true); + break; } + prevOff = evictionIter.bucketFileOffset; + REQUIRE(evictionIter.bucketFileOffset == + xdr::xdr_size(*in) + startingOffset + xdrOverheadBytes); + REQUIRE(evictionIter.bucketListLevel == levelToScan); + REQUIRE(evictionIter.isCurrBucket == true); } + } - SECTION("scans across multiple buckets") + SECTION("scans across multiple buckets") + { + for (; bl.getLevel(2).getSnap()->getSize() < 1'000; ++ledgerSeq) { - for (; bl.getLevel(2).getSnap()->getSize() < 1'000; ++ledgerSeq) + lm.setNextLedgerEntryBatchForBucketTesting( + {}, + LedgerTestUtils::generateValidLedgerEntriesWithExclusions( + {CONFIG_SETTING, CONTRACT_DATA, CONTRACT_CODE}, 10), + {}); + closeLedger(*app); + } + + // Reset iterator to level 2 curr bucket that we just populated + stateArchivalSettings.startingEvictionScanLevel = 2; + + // Scan size should scan all of curr bucket and one entry in + // snap per scan + stateArchivalSettings.evictionScanSize = + bl.getLevel(2).getCurr()->getSize() + 1; + + // Reset iterator + evictionIter.bucketFileOffset = 0; + evictionIter.bucketListLevel = 2; + evictionIter.isCurrBucket = true; + updateNetworkCfg(); + + closeLedger(*app); + ++ledgerSeq; + + // Iter should have advanced to snap and read first entry only + REQUIRE(evictionIter.bucketFileOffset == metadataSize); + REQUIRE(evictionIter.bucketListLevel == 2); + REQUIRE(evictionIter.isCurrBucket == false); + } + + SECTION("iterator resets when bucket changes") + { + auto testIterReset = [&](bool isCurr) { + auto const levelToTest = 1; + auto bucket = [&]() { + return isCurr ? bl.getLevel(levelToTest).getCurr() + : bl.getLevel(levelToTest).getSnap(); + }; + + // Iterate until entries spill into level 1 bucket + for (; bucket()->getSize() < 1'000; ++ledgerSeq) { lm.setNextLedgerEntryBatchForBucketTesting( {}, @@ -1300,128 +1329,70 @@ TEST_CASE_VERSIONS("eviction scan", "[bucketlist]") closeLedger(*app); } - // Reset iterator to level 2 curr bucket that we just populated - stateArchivalSettings.startingEvictionScanLevel = 2; - - // Scan size should scan all of curr bucket and one entry in - // snap per scan - stateArchivalSettings.evictionScanSize = - bl.getLevel(2).getCurr()->getSize() + 1; + // Scan meta entry + one other entry in initial scan + stateArchivalSettings.evictionScanSize = metadataSize + 1; - // Reset iterator + // Reset eviction iter start of bucket being tested + stateArchivalSettings.startingEvictionScanLevel = levelToTest; evictionIter.bucketFileOffset = 0; - evictionIter.bucketListLevel = 2; - evictionIter.isCurrBucket = true; + evictionIter.isCurrBucket = isCurr; + evictionIter.bucketListLevel = 1; updateNetworkCfg(); - closeLedger(*app); - ++ledgerSeq; - - // Iter should have advanced to snap and read first entry only - REQUIRE(evictionIter.bucketFileOffset == metadataSize); - REQUIRE(evictionIter.bucketListLevel == 2); - REQUIRE(evictionIter.isCurrBucket == false); - } - - SECTION("iterator resets when bucket changes") - { - auto testIterReset = [&](bool isCurr) { - auto const levelToTest = 1; - auto bucket = [&]() { - return isCurr ? bl.getLevel(levelToTest).getCurr() - : bl.getLevel(levelToTest).getSnap(); - }; - - // Iterate until entries spill into level 1 bucket - for (; bucket()->getSize() < 1'000; ++ledgerSeq) - { - lm.setNextLedgerEntryBatchForBucketTesting( - {}, - LedgerTestUtils:: - generateValidLedgerEntriesWithExclusions( - {CONFIG_SETTING, CONTRACT_DATA, - CONTRACT_CODE}, - 10), - {}); - closeLedger(*app); - } - - // Scan meta entry + one other entry in initial scan - stateArchivalSettings.evictionScanSize = metadataSize + 1; - - // Reset eviction iter start of bucket being tested - stateArchivalSettings.startingEvictionScanLevel = - levelToTest; - evictionIter.bucketFileOffset = 0; - evictionIter.isCurrBucket = isCurr; - evictionIter.bucketListLevel = 1; - updateNetworkCfg(); - - // Advance until one ledger before bucket is updated - auto ledgersUntilUpdate = - LiveBucketList::bucketUpdatePeriod(levelToTest, - isCurr) - - 1; // updateNetworkCfg closes a ledger that we need to - // count - for (uint32_t i = 0; i < ledgersUntilUpdate - 1; ++i) - { - auto startingIter = evictionIter; - closeLedger(*app); - ++ledgerSeq; - - // Check that iterator is making progress correctly - REQUIRE(evictionIter.bucketFileOffset > - startingIter.bucketFileOffset); - REQUIRE(evictionIter.bucketListLevel == levelToTest); - REQUIRE(evictionIter.isCurrBucket == isCurr); - } - - // Next ledger close should update bucket - auto startingHash = bucket()->getHash(); + // Advance until one ledger before bucket is updated + auto ledgersUntilUpdate = + LiveBucketList::bucketUpdatePeriod(levelToTest, + isCurr) - + 1; // updateNetworkCfg closes a ledger that we need to + // count + for (uint32_t i = 0; i < ledgersUntilUpdate - 1; ++i) + { + auto startingIter = evictionIter; closeLedger(*app); ++ledgerSeq; - // Check that bucket actually changed - REQUIRE(bucket()->getHash() != startingHash); + // Check that iterator is making progress correctly + REQUIRE(evictionIter.bucketFileOffset > + startingIter.bucketFileOffset); + REQUIRE(evictionIter.bucketListLevel == levelToTest); + REQUIRE(evictionIter.isCurrBucket == isCurr); + } - // The iterator retroactively checks if the Bucket has - // changed, so close one additional ledger to check if the - // iterator has reset - closeLedger(*app); - ++ledgerSeq; + // Next ledger close should update bucket + auto startingHash = bucket()->getHash(); + closeLedger(*app); + ++ledgerSeq; - LiveBucketInputIterator in(bucket()); + // Check that bucket actually changed + REQUIRE(bucket()->getHash() != startingHash); - // Check that iterator has reset to beginning of bucket and - // read meta entry + one additional entry - REQUIRE(evictionIter.bucketFileOffset == - metadataSize + xdr::xdr_size(*in) + - xdrOverheadBytes); - REQUIRE(evictionIter.bucketListLevel == levelToTest); - REQUIRE(evictionIter.isCurrBucket == isCurr); - }; + // The iterator retroactively checks if the Bucket has + // changed, so close one additional ledger to check if the + // iterator has reset + closeLedger(*app); + ++ledgerSeq; - SECTION("curr bucket") - { - testIterReset(true); - } + LiveBucketInputIterator in(bucket()); - SECTION("snap bucket") - { - testIterReset(false); - } + // Check that iterator has reset to beginning of bucket and + // read meta entry + one additional entry + REQUIRE(evictionIter.bucketFileOffset == + metadataSize + xdr::xdr_size(*in) + xdrOverheadBytes); + REQUIRE(evictionIter.bucketListLevel == levelToTest); + REQUIRE(evictionIter.isCurrBucket == isCurr); + }; + + SECTION("curr bucket") + { + testIterReset(true); } - }); - }; - SECTION("legacy scan") - { - test(/*backgroundScan=*/false); - } - SECTION("background scan") - { - test(/*backgroundScan=*/true); - } + SECTION("snap bucket") + { + testIterReset(false); + } + } + }); } TEST_CASE_VERSIONS("Searchable BucketListDB snapshots", "[bucketlist]") diff --git a/src/bucket/test/BucketManagerTests.cpp b/src/bucket/test/BucketManagerTests.cpp index 62fb33af2a..4e6b53016b 100644 --- a/src/bucket/test/BucketManagerTests.cpp +++ b/src/bucket/test/BucketManagerTests.cpp @@ -54,13 +54,9 @@ clearFutures(Application::pointer app, LiveBucketList& bl) size_t n = static_cast(app->getConfig().WORKER_THREADS); - // If background eviction is enabled, we have one fewer worker thread for - // bucket merges - if (app->getConfig().isUsingBackgroundEviction()) - { - releaseAssert(n != 0); - --n; - } + // Background eviction takes up one worker thread. + releaseAssert(n != 0); + --n; std::mutex mutex; std::condition_variable cv, cv2; diff --git a/src/bucket/test/BucketTestUtils.cpp b/src/bucket/test/BucketTestUtils.cpp index ffecf52ef3..fe6a67aeb9 100644 --- a/src/bucket/test/BucketTestUtils.cpp +++ b/src/bucket/test/BucketTestUtils.cpp @@ -233,16 +233,8 @@ LedgerManagerForBucketTests::transferLedgerEntriesToBucketList( } LedgerTxn ltxEvictions(ltx); - if (mApp.getConfig().isUsingBackgroundEviction()) - { - mApp.getBucketManager().resolveBackgroundEvictionScan( - ltxEvictions, lh.ledgerSeq, keys); - } - else - { - mApp.getBucketManager().scanForEvictionLegacy(ltxEvictions, - lh.ledgerSeq); - } + mApp.getBucketManager().resolveBackgroundEvictionScan( + ltxEvictions, lh.ledgerSeq, keys); if (ledgerCloseMeta) { @@ -261,8 +253,7 @@ LedgerManagerForBucketTests::transferLedgerEntriesToBucketList( // Add dead entries from ltx to entries that will be added to BucketList // so we can test background eviction properly if (protocolVersionStartsFrom(initialLedgerVers, - SOROBAN_PROTOCOL_VERSION) && - mApp.getConfig().isUsingBackgroundEviction()) + SOROBAN_PROTOCOL_VERSION)) { for (auto const& k : dead) { diff --git a/src/ledger/LedgerManagerImpl.cpp b/src/ledger/LedgerManagerImpl.cpp index d72b4e616e..f0f762f213 100644 --- a/src/ledger/LedgerManagerImpl.cpp +++ b/src/ledger/LedgerManagerImpl.cpp @@ -1044,8 +1044,7 @@ LedgerManagerImpl::closeLedger(LedgerCloseData const& ledgerData) // step 5 if (protocolVersionStartsFrom(initialLedgerVers, - SOROBAN_PROTOCOL_VERSION) && - mApp.getConfig().isUsingBackgroundEviction()) + SOROBAN_PROTOCOL_VERSION)) { mApp.getBucketManager().startBackgroundEvictionScan(ledgerSeq + 1); } @@ -1686,17 +1685,8 @@ LedgerManagerImpl::transferLedgerEntriesToBucketList( { auto keys = ltx.getAllTTLKeysWithoutSealing(); LedgerTxn ltxEvictions(ltx); - - if (mApp.getConfig().isUsingBackgroundEviction()) - { - mApp.getBucketManager().resolveBackgroundEvictionScan( - ltxEvictions, lh.ledgerSeq, keys); - } - else - { - mApp.getBucketManager().scanForEvictionLegacy(ltxEvictions, - lh.ledgerSeq); - } + mApp.getBucketManager().resolveBackgroundEvictionScan( + ltxEvictions, lh.ledgerSeq, keys); if (ledgerCloseMeta) { diff --git a/src/main/ApplicationImpl.cpp b/src/main/ApplicationImpl.cpp index 5be20c7342..9c6c636406 100644 --- a/src/main/ApplicationImpl.cpp +++ b/src/main/ApplicationImpl.cpp @@ -80,12 +80,8 @@ ApplicationImpl::ApplicationImpl(VirtualClock& clock, Config const& cfg) : mVirtualClock(clock) , mConfig(cfg) // Allocate one worker to eviction when background eviction enabled - , mWorkerIOContext(mConfig.isUsingBackgroundEviction() - ? mConfig.WORKER_THREADS - 1 - : mConfig.WORKER_THREADS) - , mEvictionIOContext(mConfig.isUsingBackgroundEviction() - ? std::make_unique(1) - : nullptr) + , mWorkerIOContext(mConfig.WORKER_THREADS - 1) + , mEvictionIOContext(std::make_unique(1)) , mWork(std::make_unique(mWorkerIOContext)) , mEvictionWork( mEvictionIOContext @@ -153,19 +149,16 @@ ApplicationImpl::ApplicationImpl(VirtualClock& clock, Config const& cfg) auto t = mConfig.WORKER_THREADS; LOG_DEBUG(DEFAULT_LOG, "Application constructing (worker threads: {})", t); - if (mConfig.isUsingBackgroundEviction()) - { - releaseAssert(mConfig.WORKER_THREADS > 0); - releaseAssert(mEvictionIOContext); + releaseAssert(mConfig.WORKER_THREADS > 0); + releaseAssert(mEvictionIOContext); - // Allocate one thread for Eviction scan - mEvictionThread = std::thread{[this]() { - runCurrentThreadWithMediumPriority(); - mEvictionIOContext->run(); - }}; + // Allocate one thread for Eviction scan + mEvictionThread = std::thread{[this]() { + runCurrentThreadWithMediumPriority(); + mEvictionIOContext->run(); + }}; - --t; - } + --t; while (t--) { @@ -802,24 +795,6 @@ ApplicationImpl::validateAndLogConfig() } } - if (mConfig.BACKGROUND_EVICTION_SCAN) - { - if (!mConfig.isUsingBucketListDB()) - { - throw std::invalid_argument( - "BACKGROUND_EVICTION_SCAN set to true but " - "DEPRECATED_SQL_LEDGER_STATE is set to true. " - "DEPRECATED_SQL_LEDGER_STATE must be set to false to enable " - "background eviction."); - } - - if (mConfig.WORKER_THREADS < 2) - { - throw std::invalid_argument("BACKGROUND_EVICTION_SCAN requires " - "WORKER_THREADS > 1"); - } - } - if (mConfig.HTTP_QUERY_PORT != 0) { if (isNetworkedValidator) diff --git a/src/main/Config.cpp b/src/main/Config.cpp index f8980fd2d6..7d290a008d 100644 --- a/src/main/Config.cpp +++ b/src/main/Config.cpp @@ -161,7 +161,6 @@ Config::Config() : NODE_SEED(SecretKey::random()) BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT = 14; // 2^14 == 16 kb BUCKETLIST_DB_INDEX_CUTOFF = 20; // 20 mb BUCKETLIST_DB_PERSIST_INDEX = true; - BACKGROUND_EVICTION_SCAN = true; PUBLISH_TO_ARCHIVE_DELAY = std::chrono::seconds{0}; // automatic maintenance settings: // short and prime with 1 hour which will cause automatic maintenance to @@ -1066,16 +1065,21 @@ Config::processConfig(std::shared_ptr t) }}, {"BACKGROUND_OVERLAY_PROCESSING", [&]() { BACKGROUND_OVERLAY_PROCESSING = readBool(item); }}, + // TODO: Flags are no longer supported, remove in next release. {"BACKGROUND_EVICTION_SCAN", - [&]() { BACKGROUND_EVICTION_SCAN = readBool(item); }}, - // TODO: Flag is no longer supported, remove in next release. + [&]() { + CLOG_WARNING( + Bucket, + "BACKGROUND_EVICTION_SCAN is deprecated and ignored. " + "Please remove this from config"); + }}, {"EXPERIMENTAL_BACKGROUND_EVICTION_SCAN", [&]() { CLOG_WARNING( Bucket, "EXPERIMENTAL_BACKGROUND_EVICTION_SCAN is deprecated " "and " - "is ignored. Use BACKGROUND_EVICTION_SCAN instead"); + "is ignored. Please remove from config"); }}, {"DEPRECATED_SQL_LEDGER_STATE", [&]() { DEPRECATED_SQL_LEDGER_STATE = readBool(item); }}, @@ -1867,14 +1871,6 @@ Config::processConfig(std::shared_ptr t) throw std::runtime_error(msg); } - // If DEPRECATED_SQL_LEDGER_STATE is set to false and - // BACKGROUND_EVICTION_SCAN is not set, override default value to false - // so that nodes still running SQL ledger don't crash on startup - if (!isUsingBucketListDB() && !t->contains("BACKGROUND_EVICTION_SCAN")) - { - BACKGROUND_EVICTION_SCAN = false; - } - // process elements that potentially depend on others if (t->contains("VALIDATORS")) { @@ -2398,12 +2394,6 @@ Config::isUsingBucketListDB() const && MODE_ENABLES_BUCKETLIST; } -bool -Config::isUsingBackgroundEviction() const -{ - return isUsingBucketListDB() && BACKGROUND_EVICTION_SCAN; -} - bool Config::isPersistingBucketListDBIndexes() const { diff --git a/src/main/Config.h b/src/main/Config.h index 7ca4c082a4..6c0e3dfc5f 100644 --- a/src/main/Config.h +++ b/src/main/Config.h @@ -469,10 +469,6 @@ class Config : public std::enable_shared_from_this // persisted. bool BUCKETLIST_DB_PERSIST_INDEX; - // When set to true, eviction scans occur on the background thread, - // increasing performance. Requires EXPERIMENTAL_BUCKETLIST_DB. - bool BACKGROUND_EVICTION_SCAN; - // A config parameter that stores historical data, such as transactions, // fees, and scp history in the database bool MODE_STORES_HISTORY_MISC; diff --git a/src/test/FuzzerImpl.cpp b/src/test/FuzzerImpl.cpp index 23e4c400df..afa750f206 100644 --- a/src/test/FuzzerImpl.cpp +++ b/src/test/FuzzerImpl.cpp @@ -864,7 +864,6 @@ getFuzzConfig(int instanceNumber) Config cfg = getTestConfig(instanceNumber); cfg.MANUAL_CLOSE = true; cfg.CATCHUP_COMPLETE = false; - cfg.BACKGROUND_EVICTION_SCAN = false; cfg.CATCHUP_RECENT = 0; cfg.ARTIFICIALLY_GENERATE_LOAD_FOR_TESTING = false; cfg.ARTIFICIALLY_SET_CLOSE_TIME_FOR_TESTING = UINT32_MAX; diff --git a/src/test/test.cpp b/src/test/test.cpp index 4499c26a7e..d7ea4668e5 100644 --- a/src/test/test.cpp +++ b/src/test/test.cpp @@ -285,7 +285,6 @@ getTestConfig(int instanceNumber, Config::TestDbMode mode) case Config::TESTDB_BUCKET_DB_VOLATILE: case Config::TESTDB_IN_MEMORY: dbname << "sqlite3://:memory:"; - thisConfig.BACKGROUND_EVICTION_SCAN = false; break; case Config::TESTDB_BUCKET_DB_PERSISTENT: dbname << "sqlite3://" << rootDir << "test.db"; @@ -295,7 +294,6 @@ getTestConfig(int instanceNumber, Config::TestDbMode mode) case Config::TESTDB_POSTGRESQL: dbname << "postgresql://dbname=test" << instanceNumber; thisConfig.DISABLE_XDR_FSYNC = false; - thisConfig.BACKGROUND_EVICTION_SCAN = false; break; #endif default: diff --git a/src/transactions/test/InvokeHostFunctionTests.cpp b/src/transactions/test/InvokeHostFunctionTests.cpp index c0c980819f..ee9189b437 100644 --- a/src/transactions/test/InvokeHostFunctionTests.cpp +++ b/src/transactions/test/InvokeHostFunctionTests.cpp @@ -2559,149 +2559,113 @@ TEST_CASE("charge rent fees for storage resize", "[tx][soroban]") TEST_CASE("temp entry eviction", "[tx][soroban]") { - auto test = [](bool enableBucketListDB, bool backgroundEviction) { - if (backgroundEviction && !enableBucketListDB) - { - throw "testing error: backgroundEviction requires " - "enableBucketListDB == true"; - } - - Config cfg = getTestConfig(); - TmpDirManager tdm(std::string("soroban-storage-meta-") + - binToHex(randomBytes(8))); - TmpDir td = tdm.tmpDir("soroban-meta-ok"); - std::string metaPath = td.getName() + "/stream.xdr"; - - cfg.METADATA_OUTPUT_STREAM = metaPath; - cfg.DEPRECATED_SQL_LEDGER_STATE = !enableBucketListDB; - cfg.BACKGROUND_EVICTION_SCAN = backgroundEviction; - - // overrideSorobanNetworkConfigForTest commits directly to the - // database, will not work if BucketListDB is enabled so we must use - // the cfg override - if (enableBucketListDB) - { - cfg.TESTING_SOROBAN_HIGH_LIMIT_OVERRIDE = true; - } - - SorobanTest test(cfg); - ContractStorageTestClient client(test); - auto const& contractKeys = client.getContract().getKeys(); - // Extend Wasm and instance - test.invokeExtendOp(contractKeys, 10'000); + Config cfg = getTestConfig(); + TmpDirManager tdm(std::string("soroban-storage-meta-") + + binToHex(randomBytes(8))); + TmpDir td = tdm.tmpDir("soroban-meta-ok"); + std::string metaPath = td.getName() + "/stream.xdr"; - auto invocation = client.getContract().prepareInvocation( - "put_temporary", {makeSymbolSCVal("key"), makeU64SCVal(123)}, - client.writeKeySpec("key", ContractDataDurability::TEMPORARY)); - REQUIRE(invocation.withExactNonRefundableResourceFee().invoke()); - auto lk = client.getContract().getDataKey( - makeSymbolSCVal("key"), ContractDataDurability::TEMPORARY); + cfg.METADATA_OUTPUT_STREAM = metaPath; - auto expectedLiveUntilLedger = - test.getLCLSeq() + - test.getNetworkCfg().stateArchivalSettings().minTemporaryTTL - 1; - REQUIRE(test.getTTL(lk) == expectedLiveUntilLedger); - auto const evictionLedger = 4097; + SorobanTest test(cfg); + ContractStorageTestClient client(test); + auto const& contractKeys = client.getContract().getKeys(); + + // Extend Wasm and instance + test.invokeExtendOp(contractKeys, 10'000); + + auto invocation = client.getContract().prepareInvocation( + "put_temporary", {makeSymbolSCVal("key"), makeU64SCVal(123)}, + client.writeKeySpec("key", ContractDataDurability::TEMPORARY)); + REQUIRE(invocation.withExactNonRefundableResourceFee().invoke()); + auto lk = client.getContract().getDataKey( + makeSymbolSCVal("key"), ContractDataDurability::TEMPORARY); + + auto expectedLiveUntilLedger = + test.getLCLSeq() + + test.getNetworkCfg().stateArchivalSettings().minTemporaryTTL - 1; + REQUIRE(test.getTTL(lk) == expectedLiveUntilLedger); + auto const evictionLedger = 4097; + + // Close ledgers until temp entry is evicted + for (uint32_t i = test.getLCLSeq(); i < evictionLedger - 2; ++i) + { + closeLedgerOn(test.getApp(), i, 2, 1, 2016); + } - // Close ledgers until temp entry is evicted - for (uint32_t i = test.getLCLSeq(); i < evictionLedger - 2; ++i) - { - closeLedgerOn(test.getApp(), i, 2, 1, 2016); - } + REQUIRE(test.getTTL(lk) == expectedLiveUntilLedger); - REQUIRE(test.getTTL(lk) == expectedLiveUntilLedger); + // This should be a noop + test.invokeExtendOp({lk}, 10'000, 0); + REQUIRE(test.getTTL(lk) == expectedLiveUntilLedger); - // This should be a noop - test.invokeExtendOp({lk}, 10'000, 0); - REQUIRE(test.getTTL(lk) == expectedLiveUntilLedger); + // This will fail because the entry is expired + REQUIRE(client.extend("key", ContractDataDurability::TEMPORARY, 10'000, + 10'000) == INVOKE_HOST_FUNCTION_TRAPPED); + REQUIRE(test.getTTL(lk) == expectedLiveUntilLedger); - // This will fail because the entry is expired - REQUIRE(client.extend("key", ContractDataDurability::TEMPORARY, 10'000, - 10'000) == INVOKE_HOST_FUNCTION_TRAPPED); - REQUIRE(test.getTTL(lk) == expectedLiveUntilLedger); + REQUIRE(!test.isEntryLive(lk, test.getLCLSeq())); - REQUIRE(!test.isEntryLive(lk, test.getLCLSeq())); + SECTION("eviction") + { + // close one more ledger to trigger the eviction + closeLedgerOn(test.getApp(), evictionLedger, 2, 1, 2016); - SECTION("eviction") { - // close one more ledger to trigger the eviction - closeLedgerOn(test.getApp(), evictionLedger, 2, 1, 2016); - - { - LedgerTxn ltx(test.getApp().getLedgerTxnRoot()); - REQUIRE(!ltx.load(lk)); - } - - XDRInputFileStream in; - in.open(metaPath); - LedgerCloseMeta lcm; - bool evicted = false; - while (in.readOne(lcm)) - { - REQUIRE(lcm.v() == 1); - if (lcm.v1().ledgerHeader.header.ledgerSeq == evictionLedger) - { - REQUIRE(lcm.v1().evictedTemporaryLedgerKeys.size() == 2); - auto sortedKeys = lcm.v1().evictedTemporaryLedgerKeys; - std::sort(sortedKeys.begin(), sortedKeys.end()); - REQUIRE(sortedKeys[0] == lk); - REQUIRE(sortedKeys[1] == getTTLKey(lk)); - evicted = true; - } - else - { - REQUIRE(lcm.v1().evictedTemporaryLedgerKeys.empty()); - } - } - - REQUIRE(evicted); + LedgerTxn ltx(test.getApp().getLedgerTxnRoot()); + REQUIRE(!ltx.load(lk)); } - SECTION( - "Create temp entry with same key as an expired entry on eviction " - "ledger") + XDRInputFileStream in; + in.open(metaPath); + LedgerCloseMeta lcm; + bool evicted = false; + while (in.readOne(lcm)) { - REQUIRE(client.put("key", ContractDataDurability::TEMPORARY, 234) == - INVOKE_HOST_FUNCTION_SUCCESS); + REQUIRE(lcm.v() == 1); + if (lcm.v1().ledgerHeader.header.ledgerSeq == evictionLedger) { - LedgerTxn ltx(test.getApp().getLedgerTxnRoot()); - REQUIRE(ltx.load(lk)); + REQUIRE(lcm.v1().evictedTemporaryLedgerKeys.size() == 2); + auto sortedKeys = lcm.v1().evictedTemporaryLedgerKeys; + std::sort(sortedKeys.begin(), sortedKeys.end()); + REQUIRE(sortedKeys[0] == lk); + REQUIRE(sortedKeys[1] == getTTLKey(lk)); + evicted = true; } - - // Verify that we're on the ledger where the entry would get evicted - // it wasn't recreated. - REQUIRE(test.getLCLSeq() == evictionLedger); - - // Entry is live again - REQUIRE(test.isEntryLive(lk, test.getLCLSeq())); - - // Verify that we didn't emit an eviction - XDRInputFileStream in; - in.open(metaPath); - LedgerCloseMeta lcm; - while (in.readOne(lcm)) + else { REQUIRE(lcm.v1().evictedTemporaryLedgerKeys.empty()); } } - }; - SECTION("sql") - { - test(/*enableBucketListDB=*/false, /*backgroundEviction=*/false); + REQUIRE(evicted); } - SECTION("BucketListDB") + SECTION("Create temp entry with same key as an expired entry on eviction " + "ledger") { - SECTION("legacy main thread scan") + REQUIRE(client.put("key", ContractDataDurability::TEMPORARY, 234) == + INVOKE_HOST_FUNCTION_SUCCESS); { - test(/*enableBucketListDB=*/true, /*backgroundEviction=*/false); + LedgerTxn ltx(test.getApp().getLedgerTxnRoot()); + REQUIRE(ltx.load(lk)); } - SECTION("background scan") + // Verify that we're on the ledger where the entry would get evicted + // it wasn't recreated. + REQUIRE(test.getLCLSeq() == evictionLedger); + + // Entry is live again + REQUIRE(test.isEntryLive(lk, test.getLCLSeq())); + + // Verify that we didn't emit an eviction + XDRInputFileStream in; + in.open(metaPath); + LedgerCloseMeta lcm; + while (in.readOne(lcm)) { - test(/*enableBucketListDB=*/true, /*backgroundEviction=*/true); + REQUIRE(lcm.v1().evictedTemporaryLedgerKeys.empty()); } } } From eaf955825a8e2480e9cc979c6486d3a2da43d524 Mon Sep 17 00:00:00 2001 From: Garand Tyson Date: Tue, 8 Oct 2024 15:42:53 -0700 Subject: [PATCH 07/17] Remove DEPRECATED_SQL_LEDGER_STATE flag --- docs/stellar-core_example.cfg | 8 - docs/stellar-core_example_validators.cfg | 1 - docs/stellar-core_standalone.cfg | 1 - docs/stellar-core_testnet.cfg | 1 - docs/stellar-core_testnet_legacy.cfg | 1 - docs/stellar-core_testnet_validator.cfg | 1 - src/bucket/BucketApplicator.cpp | 118 ++--- src/bucket/BucketApplicator.h | 2 - src/bucket/BucketBase.cpp | 3 +- src/bucket/BucketIndexImpl.cpp | 5 +- src/bucket/BucketManager.cpp | 47 +- src/bucket/BucketOutputIterator.cpp | 17 +- src/bucket/BucketOutputIterator.h | 1 - src/bucket/HotArchiveBucket.cpp | 3 +- src/bucket/LiveBucket.cpp | 15 +- src/bucket/readme.md | 2 - src/bucket/test/BucketIndexTests.cpp | 4 - src/bucket/test/BucketListTests.cpp | 5 - src/bucket/test/BucketManagerTests.cpp | 1 - src/bucket/test/BucketTestUtils.cpp | 35 +- src/catchup/ApplyBucketsWork.cpp | 135 ++---- src/catchup/ApplyBucketsWork.h | 7 - src/catchup/AssumeStateWork.cpp | 5 +- src/catchup/CatchupWork.cpp | 26 +- src/catchup/IndexBucketsWork.cpp | 2 +- src/database/Database.cpp | 9 +- .../BucketListIsConsistentWithDatabase.cpp | 3 +- ...ucketListIsConsistentWithDatabaseTests.cpp | 6 +- src/ledger/LedgerManagerImpl.cpp | 76 ++-- src/ledger/LedgerStateSnapshot.cpp | 8 +- src/ledger/LedgerTxn.cpp | 410 ++---------------- src/ledger/LedgerTxn.h | 24 +- src/ledger/LedgerTxnImpl.h | 132 +----- src/ledger/NetworkConfig.cpp | 2 +- src/ledger/test/InMemoryLedgerTxn.cpp | 15 +- src/ledger/test/InMemoryLedgerTxn.h | 9 +- src/ledger/test/InMemoryLedgerTxnRoot.cpp | 11 +- src/ledger/test/InMemoryLedgerTxnRoot.h | 6 +- src/main/ApplicationImpl.cpp | 209 ++------- src/main/ApplicationUtils.cpp | 11 +- src/main/ApplicationUtils.h | 2 - src/main/CommandHandler.cpp | 3 +- src/main/CommandLine.cpp | 10 +- src/main/Config.cpp | 57 +-- src/main/Config.h | 6 - src/main/PersistentState.cpp | 20 +- src/main/PersistentState.h | 6 +- src/main/test/ConfigTests.cpp | 6 +- src/simulation/test/LoadGeneratorTests.cpp | 3 - src/test/TestUtils.cpp | 2 +- src/test/TxTests.cpp | 10 +- 51 files changed, 292 insertions(+), 1210 deletions(-) diff --git a/docs/stellar-core_example.cfg b/docs/stellar-core_example.cfg index 3669b85acd..d98a0f8fe5 100644 --- a/docs/stellar-core_example.cfg +++ b/docs/stellar-core_example.cfg @@ -229,14 +229,6 @@ FLOOD_DEMAND_BACKOFF_DELAY_MS = 500 # against each other. MAX_DEX_TX_OPERATIONS_IN_TX_SET = 0 -# DEPRECATED_SQL_LEDGER_STATE (bool) default false -# When set to true, SQL is used to store all ledger state instead of -# BucketListDB. This is not recommended and may cause performance degregradation. -# This is deprecated and will be removed in the future. Note that offers table -# is still maintained in SQL when this is set to false, but all other ledger -# state tables are dropped. -DEPRECATED_SQL_LEDGER_STATE = false - # BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT (Integer) default 14 # Determines page size used by BucketListDB for range indexes, where # pageSize == 2^BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT. If set to diff --git a/docs/stellar-core_example_validators.cfg b/docs/stellar-core_example_validators.cfg index 10d6ced3ee..a1203e1047 100644 --- a/docs/stellar-core_example_validators.cfg +++ b/docs/stellar-core_example_validators.cfg @@ -4,7 +4,6 @@ PUBLIC_HTTP_PORT=false NETWORK_PASSPHRASE="Example configuration" DATABASE="sqlite3://example.db" -DEPRECATED_SQL_LEDGER_STATE = false NODE_SEED="SA7FGJMMUIHNE3ZPI2UO5I632A7O5FBAZTXFAIEVFA4DSSGLHXACLAIT a3" NODE_HOME_DOMAIN="domainA" diff --git a/docs/stellar-core_standalone.cfg b/docs/stellar-core_standalone.cfg index b9fd80a509..858e97d002 100644 --- a/docs/stellar-core_standalone.cfg +++ b/docs/stellar-core_standalone.cfg @@ -12,7 +12,6 @@ NODE_IS_VALIDATOR=true #DATABASE="postgresql://dbname=stellar user=postgres password=password host=localhost" DATABASE="sqlite3://stellar.db" -DEPRECATED_SQL_LEDGER_STATE = false COMMANDS=["ll?level=debug"] diff --git a/docs/stellar-core_testnet.cfg b/docs/stellar-core_testnet.cfg index 77c834eb62..981105b7a6 100644 --- a/docs/stellar-core_testnet.cfg +++ b/docs/stellar-core_testnet.cfg @@ -4,7 +4,6 @@ PUBLIC_HTTP_PORT=false NETWORK_PASSPHRASE="Test SDF Network ; September 2015" DATABASE="sqlite3://stellar.db" -DEPRECATED_SQL_LEDGER_STATE = false # Stellar Testnet validators [[HOME_DOMAINS]] diff --git a/docs/stellar-core_testnet_legacy.cfg b/docs/stellar-core_testnet_legacy.cfg index 946e7c8bc9..0ff9909c9f 100644 --- a/docs/stellar-core_testnet_legacy.cfg +++ b/docs/stellar-core_testnet_legacy.cfg @@ -9,7 +9,6 @@ KNOWN_PEERS=[ "core-testnet3.stellar.org"] DATABASE="sqlite3://stellar.db" -DEPRECATED_SQL_LEDGER_STATE = false UNSAFE_QUORUM=true FAILURE_SAFETY=1 diff --git a/docs/stellar-core_testnet_validator.cfg b/docs/stellar-core_testnet_validator.cfg index c5d5768e87..fa329c0c43 100644 --- a/docs/stellar-core_testnet_validator.cfg +++ b/docs/stellar-core_testnet_validator.cfg @@ -4,7 +4,6 @@ PUBLIC_HTTP_PORT=false NETWORK_PASSPHRASE="Test SDF Network ; September 2015" DATABASE="sqlite3://stellar.db" -DEPRECATED_SQL_LEDGER_STATE = false # Configuring the node as a validator # note that this is an unsafe configuration in this particular setup: diff --git a/src/bucket/BucketApplicator.cpp b/src/bucket/BucketApplicator.cpp index 4bcbf213ea..91f86531c6 100644 --- a/src/bucket/BucketApplicator.cpp +++ b/src/bucket/BucketApplicator.cpp @@ -9,6 +9,7 @@ #include "ledger/LedgerTxn.h" #include "ledger/LedgerTxnEntry.h" #include "main/Application.h" +#include "util/GlobalChecks.h" #include "util/Logging.h" #include "util/types.h" #include @@ -21,14 +22,12 @@ BucketApplicator::BucketApplicator(Application& app, uint32_t minProtocolVersionSeen, uint32_t level, std::shared_ptr bucket, - std::function filter, std::unordered_set& seenKeys) : mApp(app) , mMaxProtocolVersion(maxProtocolVersion) , mMinProtocolVersionSeen(minProtocolVersionSeen) , mLevel(level) , mBucketIter(bucket) - , mEntryTypeFilter(filter) , mSeenKeys(seenKeys) { auto protocolVersion = mBucketIter.getMetadata().ledgerVersion; @@ -40,8 +39,8 @@ BucketApplicator::BucketApplicator(Application& app, protocolVersion, mMaxProtocolVersion)); } - // Only apply offers if BucketListDB is enabled - if (mApp.getConfig().isUsingBucketListDB() && !bucket->isEmpty()) + // Only apply offers + if (!bucket->isEmpty()) { auto offsetOp = bucket->getOfferRange(); if (offsetOp) @@ -62,10 +61,8 @@ BucketApplicator::operator bool() const { // There is more work to do (i.e. (bool) *this == true) iff: // 1. The underlying bucket iterator is not EOF and - // 2. Either BucketListDB is not enabled (so we must apply all entry types) - // or BucketListDB is enabled and we have offers still remaining. - return static_cast(mBucketIter) && - (!mApp.getConfig().isUsingBucketListDB() || mOffersRemaining); + // 2. We have offers still remaining. + return static_cast(mBucketIter) && mOffersRemaining; } size_t @@ -81,12 +78,11 @@ BucketApplicator::size() const } static bool -shouldApplyEntry(std::function const& filter, - BucketEntry const& e) +shouldApplyEntry(BucketEntry const& e) { if (e.type() == LIVEENTRY || e.type() == INITENTRY) { - return filter(e.liveEntry().data.type()); + return BucketIndex::typeNotSupported(e.liveEntry().data.type()); } if (e.type() != DEADENTRY) @@ -94,7 +90,7 @@ shouldApplyEntry(std::function const& filter, throw std::runtime_error( "Malformed bucket: unexpected non-INIT/LIVE/DEAD entry."); } - return filter(e.deadEntry().type()); + return BucketIndex::typeNotSupported(e.deadEntry().type()); } size_t @@ -129,8 +125,7 @@ BucketApplicator::advance(BucketApplicator::Counters& counters) // returns the file offset at the end of the currently loaded entry. // This means we must read until pos is strictly greater than the upper // bound so that we don't skip the last offer in the range. - auto isUsingBucketListDB = mApp.getConfig().isUsingBucketListDB(); - if (isUsingBucketListDB && mBucketIter.pos() > mUpperBoundOffset) + if (mBucketIter.pos() > mUpperBoundOffset) { mOffersRemaining = false; break; @@ -139,89 +134,64 @@ BucketApplicator::advance(BucketApplicator::Counters& counters) BucketEntry const& e = *mBucketIter; LiveBucket::checkProtocolLegality(e, mMaxProtocolVersion); - if (shouldApplyEntry(mEntryTypeFilter, e)) + if (shouldApplyEntry(e)) { - if (isUsingBucketListDB) + if (e.type() == LIVEENTRY || e.type() == INITENTRY) { - if (e.type() == LIVEENTRY || e.type() == INITENTRY) - { - auto [_, wasInserted] = - mSeenKeys.emplace(LedgerEntryKey(e.liveEntry())); + auto [_, wasInserted] = + mSeenKeys.emplace(LedgerEntryKey(e.liveEntry())); - // Skip seen keys - if (!wasInserted) - { - continue; - } - } - else + // Skip seen keys + if (!wasInserted) { - // Only apply INIT and LIVE entries - mSeenKeys.emplace(e.deadEntry()); continue; } } + else + { + // Only apply INIT and LIVE entries + mSeenKeys.emplace(e.deadEntry()); + continue; + } counters.mark(e); - if (e.type() == LIVEENTRY || e.type() == INITENTRY) + // DEAD and META entries skipped + releaseAssert(e.type() == LIVEENTRY || e.type() == INITENTRY); + // The last level can have live entries, but at that point we + // know that they are actually init entries because the earliest + // state of all entries is init, so we mark them as such here + if (mLevel == LiveBucketList::kNumLevels - 1 && + e.type() == LIVEENTRY) { - // The last level can have live entries, but at that point we - // know that they are actually init entries because the earliest - // state of all entries is init, so we mark them as such here - if (mLevel == LiveBucketList::kNumLevels - 1 && - e.type() == LIVEENTRY) - { - ltx->createWithoutLoading(e.liveEntry()); - } - else if ( - protocolVersionIsBefore( - mMinProtocolVersionSeen, - LiveBucket:: - FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY)) + ltx->createWithoutLoading(e.liveEntry()); + } + else if (protocolVersionIsBefore( + mMinProtocolVersionSeen, + LiveBucket:: + FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY)) + { + // Prior to protocol 11, INITENTRY didn't exist, so we need + // to check ltx to see if this is an update or a create + auto key = InternalLedgerEntry(e.liveEntry()).toKey(); + if (ltx->getNewestVersion(key)) { - // Prior to protocol 11, INITENTRY didn't exist, so we need - // to check ltx to see if this is an update or a create - auto key = InternalLedgerEntry(e.liveEntry()).toKey(); - if (ltx->getNewestVersion(key)) - { - ltx->updateWithoutLoading(e.liveEntry()); - } - else - { - ltx->createWithoutLoading(e.liveEntry()); - } + ltx->updateWithoutLoading(e.liveEntry()); } else { - if (e.type() == LIVEENTRY) - { - ltx->updateWithoutLoading(e.liveEntry()); - } - else - { - ltx->createWithoutLoading(e.liveEntry()); - } + ltx->createWithoutLoading(e.liveEntry()); } } else { - releaseAssertOrThrow(!isUsingBucketListDB); - if (protocolVersionIsBefore( - mMinProtocolVersionSeen, - LiveBucket:: - FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY)) + if (e.type() == LIVEENTRY) { - // Prior to protocol 11, DEAD entries could exist - // without LIVE entries in between - if (ltx->getNewestVersion(e.deadEntry())) - { - ltx->eraseWithoutLoading(e.deadEntry()); - } + ltx->updateWithoutLoading(e.liveEntry()); } else { - ltx->eraseWithoutLoading(e.deadEntry()); + ltx->createWithoutLoading(e.liveEntry()); } } diff --git a/src/bucket/BucketApplicator.h b/src/bucket/BucketApplicator.h index 7495d35a02..9fa3217ba4 100644 --- a/src/bucket/BucketApplicator.h +++ b/src/bucket/BucketApplicator.h @@ -26,7 +26,6 @@ class BucketApplicator uint32_t mLevel; LiveBucketInputIterator mBucketIter; size_t mCount{0}; - std::function mEntryTypeFilter; std::unordered_set& mSeenKeys; std::streamoff mUpperBoundOffset{0}; bool mOffersRemaining{true}; @@ -73,7 +72,6 @@ class BucketApplicator BucketApplicator(Application& app, uint32_t maxProtocolVersion, uint32_t minProtocolVersionSeen, uint32_t level, std::shared_ptr bucket, - std::function filter, std::unordered_set& seenKeys); operator bool() const; size_t advance(Counters& counters); diff --git a/src/bucket/BucketBase.cpp b/src/bucket/BucketBase.cpp index becc41d501..0917b20a82 100644 --- a/src/bucket/BucketBase.cpp +++ b/src/bucket/BucketBase.cpp @@ -397,8 +397,7 @@ BucketBase::merge(BucketManager& bucketManager, uint32_t maxProtocolVersion, MergeKey mk{keepTombstoneEntries, oldBucket->getHash(), newBucket->getHash(), shadowHashes}; - return out.getBucket(bucketManager, - bucketManager.getConfig().isUsingBucketListDB(), &mk); + return out.getBucket(bucketManager, &mk); } template std::shared_ptr BucketBase::merge( diff --git a/src/bucket/BucketIndexImpl.cpp b/src/bucket/BucketIndexImpl.cpp index 38de02875c..c402be8408 100644 --- a/src/bucket/BucketIndexImpl.cpp +++ b/src/bucket/BucketIndexImpl.cpp @@ -239,7 +239,7 @@ BucketIndexImpl::BucketIndexImpl(BucketManager& bm, ZoneValue(static_cast(count)); } - if (bm.getConfig().isPersistingBucketListDBIndexes()) + if (bm.getConfig().BUCKETLIST_DB_PERSIST_INDEX) { saveToDisk(bm, hash, ctx); } @@ -260,7 +260,7 @@ BucketIndexImpl::saveToDisk( BucketManager& bm, Hash const& hash, asio::io_context& ctx) const { ZoneScoped; - releaseAssert(bm.getConfig().isPersistingBucketListDBIndexes()); + releaseAssert(bm.getConfig().BUCKETLIST_DB_PERSIST_INDEX); auto timer = LogSlowExecution("Saving index", LogSlowExecution::Mode::AUTOMATIC_RAII, "took", std::chrono::milliseconds(100)); @@ -381,7 +381,6 @@ BucketIndex::createIndex(BucketManager& bm, ZoneScoped; auto const& cfg = bm.getConfig(); - releaseAssertOrThrow(cfg.isUsingBucketListDB()); releaseAssertOrThrow(!filename.empty()); auto pageSize = effectivePageSize(cfg, fs::size(filename.string())); diff --git a/src/bucket/BucketManager.cpp b/src/bucket/BucketManager.cpp index b68600ac80..59f6980b00 100644 --- a/src/bucket/BucketManager.cpp +++ b/src/bucket/BucketManager.cpp @@ -97,17 +97,13 @@ BucketManager::initialize() { mLiveBucketList = std::make_unique(); mHotArchiveBucketList = std::make_unique(); - - if (mConfig.isUsingBucketListDB()) - { - mSnapshotManager = std::make_unique( - mApp, - std::make_unique>( - *mLiveBucketList, LedgerHeader()), - std::make_unique>( - *mHotArchiveBucketList, LedgerHeader()), - mConfig.QUERY_SNAPSHOT_LEDGERS); - } + mSnapshotManager = std::make_unique( + mApp, + std::make_unique>(*mLiveBucketList, + LedgerHeader()), + std::make_unique>( + *mHotArchiveBucketList, LedgerHeader()), + mConfig.QUERY_SNAPSHOT_LEDGERS); } // Create persistent publish directories @@ -321,7 +317,6 @@ BucketManager::getHotArchiveBucketList() BucketSnapshotManager& BucketManager::getBucketSnapshotManager() const { - releaseAssertOrThrow(mConfig.isUsingBucketListDB()); releaseAssert(mSnapshotManager); return *mSnapshotManager; } @@ -942,11 +937,7 @@ BucketManager::addLiveBatch(Application& app, LedgerHeader header, mLiveBucketList->addBatch(app, header.ledgerSeq, header.ledgerVersion, initEntries, liveEntries, deadEntries); mLiveBucketListSizeCounter.set_count(mLiveBucketList->getSize()); - - if (app.getConfig().isUsingBucketListDB()) - { - reportBucketEntryCountMetrics(); - } + reportBucketEntryCountMetrics(); } void @@ -1059,7 +1050,6 @@ BucketManager::maybeSetIndex(std::shared_ptr b, void BucketManager::startBackgroundEvictionScan(uint32_t ledgerSeq) { - releaseAssert(mConfig.isUsingBucketListDB()); releaseAssert(mSnapshotManager); releaseAssert(!mEvictionFuture.valid()); releaseAssert(mEvictionStatistics); @@ -1249,16 +1239,12 @@ BucketManager::assumeState(HistoryArchiveState const& has, } } - // Buckets on the BucketList should always be indexed when - // BucketListDB enabled - if (mConfig.isUsingBucketListDB()) + // Buckets on the BucketList should always be indexed + releaseAssert(curr->isEmpty() || curr->isIndexed()); + releaseAssert(snap->isEmpty() || snap->isIndexed()); + if (nextBucket) { - releaseAssert(curr->isEmpty() || curr->isIndexed()); - releaseAssert(snap->isEmpty() || snap->isIndexed()); - if (nextBucket) - { - releaseAssert(nextBucket->isEmpty() || nextBucket->isIndexed()); - } + releaseAssert(nextBucket->isEmpty() || nextBucket->isIndexed()); } mLiveBucketList->getLevel(i).setCurr(curr); @@ -1387,7 +1373,7 @@ BucketManager::mergeBuckets(HistoryArchiveState const& has) be.liveEntry() = pair.second; out.put(be); } - return out.getBucket(*this, /*shouldSynchronouslyIndex=*/false); + return out.getBucket(*this); } static bool @@ -1597,7 +1583,6 @@ BucketManager::getConfig() const std::shared_ptr BucketManager::getSearchableLiveBucketListSnapshot() { - releaseAssert(mConfig.isUsingBucketListDB()); // Any other threads must maintain their own snapshot releaseAssert(threadIsMain()); if (!mSearchableBucketListSnapshot) @@ -1612,10 +1597,6 @@ BucketManager::getSearchableLiveBucketListSnapshot() void BucketManager::reportBucketEntryCountMetrics() { - if (!mConfig.isUsingBucketListDB()) - { - return; - } auto bucketEntryCounters = mLiveBucketList->sumBucketEntryCounters(); for (auto [type, count] : bucketEntryCounters.entryTypeCounts) { diff --git a/src/bucket/BucketOutputIterator.cpp b/src/bucket/BucketOutputIterator.cpp index dbc9e7c159..fa45db495c 100644 --- a/src/bucket/BucketOutputIterator.cpp +++ b/src/bucket/BucketOutputIterator.cpp @@ -165,7 +165,6 @@ BucketOutputIterator::put(typename BucketT::EntryT const& e) template std::shared_ptr BucketOutputIterator::getBucket(BucketManager& bucketManager, - bool shouldSynchronouslyIndex, MergeKey* mergeKey) { ZoneScoped; @@ -193,17 +192,13 @@ BucketOutputIterator::getBucket(BucketManager& bucketManager, auto hash = mHasher.finish(); std::unique_ptr index{}; - // If this bucket needs to be indexed and is not already indexed - if (shouldSynchronouslyIndex) + // either it's a new bucket or we just reconstructed a bucket + // we already have, in any case ensure we have an index + if (auto b = bucketManager.getBucketIfExists(hash); + !b || !b->isIndexed()) { - // either it's a new bucket or we just reconstructed a bucket - // we already have, in any case ensure we have an index - if (auto b = bucketManager.getBucketIfExists(hash); - !b || !b->isIndexed()) - { - index = BucketIndex::createIndex(bucketManager, mFilename, - hash, mCtx); - } + index = BucketIndex::createIndex(bucketManager, mFilename, + hash, mCtx); } return bucketManager.adoptFileAsBucket(mFilename.string(), hash, diff --git a/src/bucket/BucketOutputIterator.h b/src/bucket/BucketOutputIterator.h index 9e72bebacf..f3baaf8a7f 100644 --- a/src/bucket/BucketOutputIterator.h +++ b/src/bucket/BucketOutputIterator.h @@ -53,7 +53,6 @@ template class BucketOutputIterator void put(typename BucketT::EntryT const& e); std::shared_ptr getBucket(BucketManager& bucketManager, - bool shouldSynchronouslyIndex, MergeKey* mergeKey = nullptr); }; } diff --git a/src/bucket/HotArchiveBucket.cpp b/src/bucket/HotArchiveBucket.cpp index b96cc98da1..6ce3ed7041 100644 --- a/src/bucket/HotArchiveBucket.cpp +++ b/src/bucket/HotArchiveBucket.cpp @@ -39,8 +39,7 @@ HotArchiveBucket::fresh(BucketManager& bucketManager, uint32_t protocolVersion, bucketManager.incrMergeCounters(mc); } - return out.getBucket(bucketManager, - bucketManager.getConfig().isUsingBucketListDB()); + return out.getBucket(bucketManager); } std::vector diff --git a/src/bucket/LiveBucket.cpp b/src/bucket/LiveBucket.cpp index b5b5257cd8..7001baa9cc 100644 --- a/src/bucket/LiveBucket.cpp +++ b/src/bucket/LiveBucket.cpp @@ -293,16 +293,6 @@ void LiveBucket::apply(Application& app) const { ZoneScoped; - - auto filter = [&](LedgerEntryType t) { - if (app.getConfig().isUsingBucketListDB()) - { - return t == OFFER; - } - - return true; - }; - std::unordered_set emptySet; BucketApplicator applicator( app, app.getConfig().LEDGER_PROTOCOL_VERSION, @@ -310,7 +300,7 @@ LiveBucket::apply(Application& app) const 0 /*set to a level that's not the bottom so we don't treat live entries as init*/ , - shared_from_this(), filter, emptySet); + shared_from_this(), emptySet); BucketApplicator::Counters counters(app.getClock().now()); while (applicator) { @@ -400,8 +390,7 @@ LiveBucket::fresh(BucketManager& bucketManager, uint32_t protocolVersion, bucketManager.incrMergeCounters(mc); } - return out.getBucket(bucketManager, - bucketManager.getConfig().isUsingBucketListDB()); + return out.getBucket(bucketManager); } void diff --git a/src/bucket/readme.md b/src/bucket/readme.md index 29d4b81bd8..34439828e7 100644 --- a/src/bucket/readme.md +++ b/src/bucket/readme.md @@ -83,8 +83,6 @@ for smaller memory overhead. Because the `BucketIndex`'s must be in memory, there is a tradeoff between BucketList lookup speed and memory overhead. The following configuration flags control these options: -- `DEPRECATED_SQL_LEDGER_STATE` - - When set to false, the `BucketList` is indexed and used for ledger entry lookup - `BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT` - Page size used for `RangeIndex`, where `pageSize == 2^BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT`. diff --git a/src/bucket/test/BucketIndexTests.cpp b/src/bucket/test/BucketIndexTests.cpp index 42b8755b2b..708f7ed5c7 100644 --- a/src/bucket/test/BucketIndexTests.cpp +++ b/src/bucket/test/BucketIndexTests.cpp @@ -510,7 +510,6 @@ testAllIndexTypes(std::function f) SECTION("individual index only") { Config cfg(getTestConfig()); - cfg.DEPRECATED_SQL_LEDGER_STATE = false; cfg.BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT = 0; f(cfg); } @@ -518,7 +517,6 @@ testAllIndexTypes(std::function f) SECTION("individual and range index") { Config cfg(getTestConfig()); - cfg.DEPRECATED_SQL_LEDGER_STATE = false; // First 3 levels individual, last 3 range index cfg.BUCKETLIST_DB_INDEX_CUTOFF = 1; @@ -528,7 +526,6 @@ testAllIndexTypes(std::function f) SECTION("range index only") { Config cfg(getTestConfig()); - cfg.DEPRECATED_SQL_LEDGER_STATE = false; cfg.BUCKETLIST_DB_INDEX_CUTOFF = 0; f(cfg); } @@ -610,7 +607,6 @@ TEST_CASE("serialize bucket indexes", "[bucket][bucketindex]") // All levels use range config cfg.BUCKETLIST_DB_INDEX_CUTOFF = 0; - cfg.DEPRECATED_SQL_LEDGER_STATE = false; cfg.BUCKETLIST_DB_PERSIST_INDEX = true; cfg.INVARIANT_CHECKS = {}; diff --git a/src/bucket/test/BucketListTests.cpp b/src/bucket/test/BucketListTests.cpp index 65c84b50ed..6f33d9e717 100644 --- a/src/bucket/test/BucketListTests.cpp +++ b/src/bucket/test/BucketListTests.cpp @@ -953,10 +953,6 @@ TEST_CASE_VERSIONS("eviction scan", "[bucketlist]") Config cfg(getTestConfig()); cfg.USE_CONFIG_FOR_GENESIS = true; - // BucketTestApplication writes directly to BL and circumvents LedgerTxn - // interface, so we have to use BucketListDB for lookups - cfg.DEPRECATED_SQL_LEDGER_STATE = false; - auto app = createTestApplication(clock, cfg); for_versions_from(20, *app, [&] { LedgerManagerForBucketTests& lm = app->getLedgerManager(); @@ -1399,7 +1395,6 @@ TEST_CASE_VERSIONS("Searchable BucketListDB snapshots", "[bucketlist]") { VirtualClock clock; Config cfg(getTestConfig()); - cfg.DEPRECATED_SQL_LEDGER_STATE = false; auto app = createTestApplication(clock, cfg); LedgerManagerForBucketTests& lm = app->getLedgerManager(); diff --git a/src/bucket/test/BucketManagerTests.cpp b/src/bucket/test/BucketManagerTests.cpp index 4e6b53016b..745a2ad955 100644 --- a/src/bucket/test/BucketManagerTests.cpp +++ b/src/bucket/test/BucketManagerTests.cpp @@ -200,7 +200,6 @@ TEST_CASE_VERSIONS("bucketmanager ownership", "[bucket][bucketmanager]") if (bucketListDB) { // Enable BucketListDB with persistent indexes - cfg.DEPRECATED_SQL_LEDGER_STATE = false; cfg.NODE_IS_VALIDATOR = false; cfg.FORCE_SCP = false; } diff --git a/src/bucket/test/BucketTestUtils.cpp b/src/bucket/test/BucketTestUtils.cpp index fe6a67aeb9..e511f069d7 100644 --- a/src/bucket/test/BucketTestUtils.cpp +++ b/src/bucket/test/BucketTestUtils.cpp @@ -42,17 +42,15 @@ addLiveBatchAndUpdateSnapshot(Application& app, LedgerHeader header, auto& liveBl = app.getBucketManager().getLiveBucketList(); liveBl.addBatch(app, header.ledgerSeq, header.ledgerVersion, initEntries, liveEntries, deadEntries); - if (app.getConfig().isUsingBucketListDB()) - { - auto liveSnapshot = - std::make_unique>(liveBl, header); - auto hotArchiveSnapshot = - std::make_unique>( - app.getBucketManager().getHotArchiveBucketList(), header); - app.getBucketManager().getBucketSnapshotManager().updateCurrentSnapshot( - std::move(liveSnapshot), std::move(hotArchiveSnapshot)); - } + auto liveSnapshot = + std::make_unique>(liveBl, header); + auto hotArchiveSnapshot = + std::make_unique>( + app.getBucketManager().getHotArchiveBucketList(), header); + + app.getBucketManager().getBucketSnapshotManager().updateCurrentSnapshot( + std::move(liveSnapshot), std::move(hotArchiveSnapshot)); } void @@ -65,17 +63,14 @@ addHotArchiveBatchAndUpdateSnapshot( auto& hotArchiveBl = app.getBucketManager().getHotArchiveBucketList(); hotArchiveBl.addBatch(app, header.ledgerSeq, header.ledgerVersion, archiveEntries, restoredEntries, deletedEntries); - if (app.getConfig().isUsingBucketListDB()) - { - auto liveSnapshot = std::make_unique>( - app.getBucketManager().getLiveBucketList(), header); - auto hotArchiveSnapshot = - std::make_unique>(hotArchiveBl, - header); + auto liveSnapshot = std::make_unique>( + app.getBucketManager().getLiveBucketList(), header); + auto hotArchiveSnapshot = + std::make_unique>(hotArchiveBl, + header); - app.getBucketManager().getBucketSnapshotManager().updateCurrentSnapshot( - std::move(liveSnapshot), std::move(hotArchiveSnapshot)); - } + app.getBucketManager().getBucketSnapshotManager().updateCurrentSnapshot( + std::move(liveSnapshot), std::move(hotArchiveSnapshot)); } void diff --git a/src/catchup/ApplyBucketsWork.cpp b/src/catchup/ApplyBucketsWork.cpp index ac564c2b04..099f10d70d 100644 --- a/src/catchup/ApplyBucketsWork.cpp +++ b/src/catchup/ApplyBucketsWork.cpp @@ -51,25 +51,15 @@ class TempLedgerVersionSetter : NonMovableOrCopyable } }; -uint32_t -ApplyBucketsWork::startingLevel() -{ - return mApp.getConfig().isUsingBucketListDB() - ? 0 - : LiveBucketList::kNumLevels - 1; -} - ApplyBucketsWork::ApplyBucketsWork( Application& app, std::map> const& buckets, - HistoryArchiveState const& applyState, uint32_t maxProtocolVersion, - std::function onlyApply) + HistoryArchiveState const& applyState, uint32_t maxProtocolVersion) : Work(app, "apply-buckets", BasicWork::RETRY_NEVER) , mBuckets(buckets) , mApplyState(applyState) - , mEntryTypeFilter(onlyApply) , mTotalSize(0) - , mLevel(startingLevel()) + , mLevel(0) , mMaxProtocolVersion(maxProtocolVersion) , mCounters(app.getClock().now()) , mIsApplyInvariantEnabled( @@ -77,15 +67,6 @@ ApplyBucketsWork::ApplyBucketsWork( { } -ApplyBucketsWork::ApplyBucketsWork( - Application& app, - std::map> const& buckets, - HistoryArchiveState const& applyState, uint32_t maxProtocolVersion) - : ApplyBucketsWork(app, buckets, applyState, maxProtocolVersion, - [](LedgerEntryType) { return true; }) -{ -} - std::shared_ptr ApplyBucketsWork::getBucket(std::string const& hash) { @@ -120,32 +101,10 @@ ApplyBucketsWork::doReset() if (!isAborting()) { - if (mApp.getConfig().isUsingBucketListDB()) - { - // The current size of this set is 1.6 million during BucketApply - // (as of 12/20/23). There's not a great way to estimate this, so - // reserving with some extra wiggle room - mSeenKeys.reserve(2'000'000); - } - - // When applying buckets with accounts, we have to make sure that the - // root account has been removed. This comes into play, for example, - // when applying buckets from genesis the root account already exists. - if (mEntryTypeFilter(ACCOUNT)) - { - TempLedgerVersionSetter tlvs(mApp, mMaxProtocolVersion); - { - SecretKey skey = SecretKey::fromSeed(mApp.getNetworkID()); - - LedgerTxn ltx(mApp.getLedgerTxnRoot()); - auto rootAcc = loadAccount(ltx, skey.getPublicKey()); - if (rootAcc) - { - rootAcc.erase(); - } - ltx.commit(); - } - } + // The current size of this set is 1.6 million during BucketApply + // (as of 12/20/23). There's not a great way to estimate this, so + // reserving with some extra wiggle room + mSeenKeys.reserve(2'000'000); auto addBucket = [this](std::shared_ptr const& bucket) { if (bucket->getSize() > 0) @@ -155,30 +114,16 @@ ApplyBucketsWork::doReset() } mBucketsToApply.emplace_back(bucket); }; - // If using bucketlist DB, we iterate through the live BucketList in + + // We iterate through the live BucketList in // order (i.e. L0 curr, L0 snap, L1 curr, etc) as we are just applying - // offers (and can keep track of all seen keys). Otherwise, we iterate - // in reverse order (i.e. L N snap, L N curr, L N-1 snap, etc.) as we - // are applying all entry types and cannot keep track of all seen keys - // as it would be too large. - if (mApp.getConfig().isUsingBucketListDB()) + // offers (and can keep track of all seen keys). + for (auto const& hsb : mApplyState.currentBuckets) { - for (auto const& hsb : mApplyState.currentBuckets) - { - addBucket(getBucket(hsb.curr)); - addBucket(getBucket(hsb.snap)); - } - } - else - { - for (auto iter = mApplyState.currentBuckets.rbegin(); - iter != mApplyState.currentBuckets.rend(); ++iter) - { - auto const& hsb = *iter; - addBucket(getBucket(hsb.snap)); - addBucket(getBucket(hsb.curr)); - } + addBucket(getBucket(hsb.curr)); + addBucket(getBucket(hsb.snap)); } + // estimate the number of ledger entries contained in those buckets // use accounts as a rough approximator as to overestimate a bit // (default BucketEntry contains a default AccountEntry) @@ -215,7 +160,7 @@ ApplyBucketsWork::startBucket() // Create a new applicator for the bucket. mBucketApplicator = std::make_unique( mApp, mMaxProtocolVersion, mMinProtocolVersionSeen, mLevel, bucket, - mEntryTypeFilter, mSeenKeys); + mSeenKeys); } void @@ -226,54 +171,36 @@ ApplyBucketsWork::prepareForNextBucket() mApp.getCatchupManager().bucketsApplied(); mBucketToApplyIndex++; // If mBucketToApplyIndex is even, we are progressing to the next - // level, if we are using BucketListDB, this is the next greater - // level, otherwise it's the next lower level. + // level if (mBucketToApplyIndex % 2 == 0) { - mLevel = - mApp.getConfig().isUsingBucketListDB() ? mLevel + 1 : mLevel - 1; + ++mLevel; } } // We iterate through the live BucketList either in-order (level 0 curr, level 0 -// snap, level 1 curr, etc) when only applying offers, or in reverse order -// (level 9 curr, level 8 snap, level 8 curr, etc) when applying all entry -// types. When only applying offers, we keep track of the keys we have already +// snap, level 1 curr, etc). We keep track of the keys we have already // seen, and only apply an entry to the DB if it has not been seen before. This // allows us to perform a single write to the DB and ensure that only the newest // version is written. // -// When applying all entry types, this seen keys set would be too large. Since -// there can be no seen keys set, if we were to apply every entry in order, we -// would overwrite the newest version of an entry with an older version as we -// iterate through the BucketList. Due to this, we iterate in reverse order such -// that the newest version of a key is written last, overwriting the older -// versions. This is much slower due to DB churn. - BasicWork::State ApplyBucketsWork::doWork() { ZoneScoped; // Step 1: index buckets. Step 2: apply buckets. Step 3: assume state - bool isUsingBucketListDB = mApp.getConfig().isUsingBucketListDB(); - if (isUsingBucketListDB) + if (!mIndexBucketsWork) { - // Step 1: index buckets. - if (!mIndexBucketsWork) - { - // Spawn indexing work for the first time - mIndexBucketsWork = addWork(mBucketsToApply); - return State::WORK_RUNNING; - } - else if (mIndexBucketsWork->getState() != - BasicWork::State::WORK_SUCCESS) - { - // Exit early if indexing work is still running, or failed - return mIndexBucketsWork->getState(); - } + // Spawn indexing work for the first time + mIndexBucketsWork = addWork(mBucketsToApply); + return State::WORK_RUNNING; + } - // Otherwise, continue with next steps + else if (mIndexBucketsWork->getState() != BasicWork::State::WORK_SUCCESS) + { + // Exit early if indexing work is still running, or failed + return mIndexBucketsWork->getState(); } if (!mAssumeStateWork) @@ -293,8 +220,7 @@ ApplyBucketsWork::doWork() } } - auto isCurr = isUsingBucketListDB ? mBucketToApplyIndex % 2 == 0 - : mBucketToApplyIndex % 2 == 1; + auto isCurr = mBucketToApplyIndex % 2 == 0; if (mBucketApplicator) { TempLedgerVersionSetter tlvs(mApp, mMaxProtocolVersion); @@ -305,7 +231,9 @@ ApplyBucketsWork::doWork() return State::WORK_RUNNING; } // Application complete, check invariants and prepare for next - // bucket. + // bucket. Applying a bucket updates mSeenKeys with the keys applied + // by that bucket, so we need to provide a copy of the keys before + // application to the invariant check. mApp.getInvariantManager().checkOnBucketApply( mBucketsToApply.at(mBucketToApplyIndex), mApplyState.currentLedger, mLevel, isCurr, @@ -379,8 +307,7 @@ ApplyBucketsWork::getStatus() const { // This status string only applies to step 2 when we actually apply the // buckets. - bool doneIndexing = !mApp.getConfig().isUsingBucketListDB() || - (mIndexBucketsWork && mIndexBucketsWork->isDone()); + bool doneIndexing = mIndexBucketsWork && mIndexBucketsWork->isDone(); if (doneIndexing && !mSpawnedAssumeStateWork) { auto size = mTotalSize == 0 ? 0 : (100 * mAppliedSize / mTotalSize); diff --git a/src/catchup/ApplyBucketsWork.h b/src/catchup/ApplyBucketsWork.h index fc239d1592..bdff18bed1 100644 --- a/src/catchup/ApplyBucketsWork.h +++ b/src/catchup/ApplyBucketsWork.h @@ -22,7 +22,6 @@ class ApplyBucketsWork : public Work { std::map> const& mBuckets; HistoryArchiveState const& mApplyState; - std::function mEntryTypeFilter; bool mSpawnedAssumeStateWork{false}; std::shared_ptr mAssumeStateWork{}; @@ -50,7 +49,6 @@ class ApplyBucketsWork : public Work void advance(std::string const& name, BucketApplicator& applicator); std::shared_ptr getBucket(std::string const& bucketHash); - uint32_t startingLevel(); bool appliedAllBuckets() const; void startBucket(); void prepareForNextBucket(); @@ -60,11 +58,6 @@ class ApplyBucketsWork : public Work Application& app, std::map> const& buckets, HistoryArchiveState const& applyState, uint32_t maxProtocolVersion); - ApplyBucketsWork( - Application& app, - std::map> const& buckets, - HistoryArchiveState const& applyState, uint32_t maxProtocolVersion, - std::function onlyApply); ~ApplyBucketsWork() = default; std::string getStatus() const override; diff --git a/src/catchup/AssumeStateWork.cpp b/src/catchup/AssumeStateWork.cpp index 235d2f6385..9460d0fb03 100644 --- a/src/catchup/AssumeStateWork.cpp +++ b/src/catchup/AssumeStateWork.cpp @@ -64,10 +64,7 @@ AssumeStateWork::doWork() std::vector> seq; // Index Bucket files - if (mApp.getConfig().isUsingBucketListDB()) - { - seq.push_back(std::make_shared(mApp, mBuckets)); - } + seq.push_back(std::make_shared(mApp, mBuckets)); // Add bucket files to BucketList and restart merges auto assumeStateCB = [&has = mHas, diff --git a/src/catchup/CatchupWork.cpp b/src/catchup/CatchupWork.cpp index 91c2cc831c..e7434bdf0b 100644 --- a/src/catchup/CatchupWork.cpp +++ b/src/catchup/CatchupWork.cpp @@ -212,10 +212,7 @@ CatchupWork::downloadApplyBuckets() // the database. This guarantees that we clear that state the next time // the application starts. auto& ps = mApp.getPersistentState(); - for (auto let : xdr::xdr_traits::enum_values()) - { - ps.setRebuildForType(static_cast(let)); - } + ps.setRebuildForOfferTable(); std::vector> seq; auto version = mApp.getConfig().LEDGER_PROTOCOL_VERSION; @@ -243,20 +240,8 @@ CatchupWork::downloadApplyBuckets() version = mVerifiedLedgerRangeStart.header.ledgerVersion; } - std::shared_ptr applyBuckets; - if (mApp.getConfig().isUsingBucketListDB()) - { - // Only apply unsupported BucketListDB types to SQL DB when BucketList - // lookup is enabled - applyBuckets = std::make_shared( - mApp, mBuckets, *mBucketHAS, version, - BucketIndex::typeNotSupported); - } - else - { - applyBuckets = std::make_shared(mApp, mBuckets, - *mBucketHAS, version); - } + auto applyBuckets = std::make_shared( + mApp, mBuckets, *mBucketHAS, version); seq.push_back(applyBuckets); return std::make_shared(mApp, "download-verify-apply-buckets", seq, RETRY_NEVER); @@ -529,10 +514,7 @@ CatchupWork::runCatchupStep() // persistently available locally so it will return us to the // correct state. auto& ps = mApp.getPersistentState(); - for (auto let : xdr::xdr_traits::enum_values()) - { - ps.clearRebuildForType(static_cast(let)); - } + ps.clearRebuildForOfferTable(); } } else if (mTransactionsVerifyApplySeq) diff --git a/src/catchup/IndexBucketsWork.cpp b/src/catchup/IndexBucketsWork.cpp index 62ea3c289c..c7887b92f4 100644 --- a/src/catchup/IndexBucketsWork.cpp +++ b/src/catchup/IndexBucketsWork.cpp @@ -61,7 +61,7 @@ IndexBucketsWork::IndexWork::postWork() auto indexFilename = bm.bucketIndexFilename(self->mBucket->getHash()); - if (bm.getConfig().isPersistingBucketListDBIndexes() && + if (bm.getConfig().BUCKETLIST_DB_PERSIST_INDEX && fs::exists(indexFilename)) { self->mIndex = BucketIndex::load(bm, indexFilename, diff --git a/src/database/Database.cpp b/src/database/Database.cpp index 4ab34363cc..ba8fe571ee 100644 --- a/src/database/Database.cpp +++ b/src/database/Database.cpp @@ -252,13 +252,8 @@ Database::upgradeToCurrentSchema() putSchemaVersion(vers); } - // While not really a schema upgrade, we need to upgrade the DB when - // BucketListDB is enabled. - if (mApp.getConfig().isUsingBucketListDB()) - { - // Tx meta column no longer supported in BucketListDB - dropTxMetaIfExists(); - } + // Tx meta column no longer supported + dropTxMetaIfExists(); CLOG_INFO(Database, "DB schema is in current version"); releaseAssert(vers == SCHEMA_VERSION); diff --git a/src/invariant/BucketListIsConsistentWithDatabase.cpp b/src/invariant/BucketListIsConsistentWithDatabase.cpp index f1f3202e21..798059d9de 100644 --- a/src/invariant/BucketListIsConsistentWithDatabase.cpp +++ b/src/invariant/BucketListIsConsistentWithDatabase.cpp @@ -16,7 +16,6 @@ #include "main/Application.h" #include "main/PersistentState.h" #include "medida/timer.h" -#include "util/GlobalChecks.h" #include "util/XDRCereal.h" #include #include @@ -73,7 +72,7 @@ checkDbEntryCounts(Application& app, LedgerRange const& range, { std::string msg; auto& ltxRoot = app.getLedgerTxnRoot(); - uint64_t numInDb = ltxRoot.countObjects(OFFER, range); + uint64_t numInDb = ltxRoot.countOffers(range); if (numInDb != expectedOfferCount) { msg = fmt::format( diff --git a/src/invariant/test/BucketListIsConsistentWithDatabaseTests.cpp b/src/invariant/test/BucketListIsConsistentWithDatabaseTests.cpp index eabec762fa..8da3e973ec 100644 --- a/src/invariant/test/BucketListIsConsistentWithDatabaseTests.cpp +++ b/src/invariant/test/BucketListIsConsistentWithDatabaseTests.cpp @@ -188,7 +188,7 @@ struct BucketListGenerator out.put(*in); } - auto bucket = out.getBucket(bmApply, false); + auto bucket = out.getBucket(bmApply); }; writeBucketFile(level.getCurr()); writeBucketFile(level.getSnap()); @@ -329,8 +329,8 @@ class ApplyBucketsWorkAddEntry : public ApplyBucketsWork uint32_t maxLedger = std::numeric_limits::max() - 1; auto& ltxRoot = mApp.getLedgerTxnRoot(); - auto count = ltxRoot.countObjects( - OFFER, LedgerRange::inclusive(minLedger, maxLedger)); + auto count = ltxRoot.countOffers( + LedgerRange::inclusive(minLedger, maxLedger)); if (count > 0) { diff --git a/src/ledger/LedgerManagerImpl.cpp b/src/ledger/LedgerManagerImpl.cpp index f0f762f213..d56f4e7bed 100644 --- a/src/ledger/LedgerManagerImpl.cpp +++ b/src/ledger/LedgerManagerImpl.cpp @@ -347,41 +347,35 @@ LedgerManagerImpl::loadLastKnownLedger(bool restoreBucketlist) releaseAssert(latestLedgerHeader.has_value()); - // Step 3. Restore BucketList if we're doing a full core startup - // (startServices=true), OR when using BucketListDB - if (restoreBucketlist || mApp.getConfig().isUsingBucketListDB()) + HistoryArchiveState has = getLastClosedLedgerHAS(); + auto missing = mApp.getBucketManager().checkForMissingBucketsFiles(has); + auto pubmissing = + mApp.getHistoryManager().getMissingBucketsReferencedByPublishQueue(); + missing.insert(missing.end(), pubmissing.begin(), pubmissing.end()); + if (!missing.empty()) { - HistoryArchiveState has = getLastClosedLedgerHAS(); - auto missing = mApp.getBucketManager().checkForMissingBucketsFiles(has); - auto pubmissing = mApp.getHistoryManager() - .getMissingBucketsReferencedByPublishQueue(); - missing.insert(missing.end(), pubmissing.begin(), pubmissing.end()); - if (!missing.empty()) + CLOG_ERROR(Ledger, "{} buckets are missing from bucket directory '{}'", + missing.size(), mApp.getBucketManager().getBucketDir()); + throw std::runtime_error("Bucket directory is corrupt"); + } + + if (mApp.getConfig().MODE_ENABLES_BUCKETLIST) + { + // Only restart merges in full startup mode. Many modes in core + // (standalone offline commands, in-memory setup) do not need to + // spin up expensive merge processes. + auto assumeStateWork = + mApp.getWorkScheduler().executeWork( + has, latestLedgerHeader->ledgerVersion, restoreBucketlist); + if (assumeStateWork->getState() == BasicWork::State::WORK_SUCCESS) { - CLOG_ERROR(Ledger, - "{} buckets are missing from bucket directory '{}'", - missing.size(), mApp.getBucketManager().getBucketDir()); - throw std::runtime_error("Bucket directory is corrupt"); + CLOG_INFO(Ledger, "Assumed bucket-state for LCL: {}", + ledgerAbbrev(*latestLedgerHeader)); } - - if (mApp.getConfig().MODE_ENABLES_BUCKETLIST) + else { - // Only restart merges in full startup mode. Many modes in core - // (standalone offline commands, in-memory setup) do not need to - // spin up expensive merge processes. - auto assumeStateWork = - mApp.getWorkScheduler().executeWork( - has, latestLedgerHeader->ledgerVersion, restoreBucketlist); - if (assumeStateWork->getState() == BasicWork::State::WORK_SUCCESS) - { - CLOG_INFO(Ledger, "Assumed bucket-state for LCL: {}", - ledgerAbbrev(*latestLedgerHeader)); - } - else - { - // Work should only fail during graceful shutdown - releaseAssertOrThrow(mApp.isStopping()); - } + // Work should only fail during graceful shutdown + releaseAssertOrThrow(mApp.isStopping()); } } @@ -1043,8 +1037,7 @@ LedgerManagerImpl::closeLedger(LedgerCloseData const& ledgerData) hm.logAndUpdatePublishStatus(); // step 5 - if (protocolVersionStartsFrom(initialLedgerVers, - SOROBAN_PROTOCOL_VERSION)) + if (protocolVersionStartsFrom(initialLedgerVers, SOROBAN_PROTOCOL_VERSION)) { mApp.getBucketManager().startBackgroundEvictionScan(ledgerSeq + 1); } @@ -1277,8 +1270,7 @@ LedgerManagerImpl::advanceLedgerPointers(LedgerHeader const& header, mLastClosedLedger.hash = ledgerHash; mLastClosedLedger.header = header; - if (mApp.getConfig().isUsingBucketListDB() && - header.ledgerSeq != prevLedgerSeq) + if (header.ledgerSeq != prevLedgerSeq) { auto& bm = mApp.getBucketManager(); auto liveSnapshot = std::make_unique>( @@ -1481,10 +1473,7 @@ LedgerManagerImpl::prefetchTransactionData( { if (tx->isSoroban()) { - if (mApp.getConfig().isUsingBucketListDB()) - { - tx->insertKeysForTxApply(sorobanKeys, lkMeter.get()); - } + tx->insertKeysForTxApply(sorobanKeys, lkMeter.get()); } else { @@ -1493,14 +1482,11 @@ LedgerManagerImpl::prefetchTransactionData( } // Prefetch classic and soroban keys separately for greater visibility // into the performance of each mode. - if (mApp.getConfig().isUsingBucketListDB()) + if (!sorobanKeys.empty()) { - if (!sorobanKeys.empty()) - { - mApp.getLedgerTxnRoot().prefetchSoroban(sorobanKeys, - lkMeter.get()); - } + mApp.getLedgerTxnRoot().prefetchSoroban(sorobanKeys, lkMeter.get()); } + mApp.getLedgerTxnRoot().prefetchClassic(classicKeys); } } diff --git a/src/ledger/LedgerStateSnapshot.cpp b/src/ledger/LedgerStateSnapshot.cpp index e04101a0fc..ba51424b5b 100644 --- a/src/ledger/LedgerStateSnapshot.cpp +++ b/src/ledger/LedgerStateSnapshot.cpp @@ -222,11 +222,8 @@ LedgerSnapshot::LedgerSnapshot(AbstractLedgerTxn& ltx) LedgerSnapshot::LedgerSnapshot(Application& app) { - if (app.getConfig().DEPRECATED_SQL_LEDGER_STATE #ifdef BUILD_TESTS - || app.getConfig().MODE_USES_IN_MEMORY_LEDGER -#endif - ) + if (app.getConfig().MODE_USES_IN_MEMORY_LEDGER) { // Legacy read-only SQL transaction mLegacyLedgerTxn = std::make_unique( @@ -235,9 +232,8 @@ LedgerSnapshot::LedgerSnapshot(Application& app) mGetter = std::make_unique(*mLegacyLedgerTxn); } else - { +#endif mGetter = std::make_unique(app.getBucketManager()); - } } LedgerHeaderWrapper diff --git a/src/ledger/LedgerTxn.cpp b/src/ledger/LedgerTxn.cpp index 322c34c10c..cf322d447d 100644 --- a/src/ledger/LedgerTxn.cpp +++ b/src/ledger/LedgerTxn.cpp @@ -2002,22 +2002,16 @@ LedgerTxn::Impl::unsealHeader(LedgerTxn& self, } uint64_t -LedgerTxn::countObjects(LedgerEntryType let) const +LedgerTxn::countOffers(LedgerRange const& ledgers) const { - throw std::runtime_error("called countObjects on non-root LedgerTxn"); -} - -uint64_t -LedgerTxn::countObjects(LedgerEntryType let, LedgerRange const& ledgers) const -{ - throw std::runtime_error("called countObjects on non-root LedgerTxn"); + throw std::runtime_error("called countOffers on non-root LedgerTxn"); } void -LedgerTxn::deleteObjectsModifiedOnOrAfterLedger(uint32_t ledger) const +LedgerTxn::deleteOffersModifiedOnOrAfterLedger(uint32_t ledger) const { throw std::runtime_error( - "called deleteObjectsModifiedOnOrAfterLedger on non-root LedgerTxn"); + "called deleteOffersModifiedOnOrAfterLedger on non-root LedgerTxn"); } void @@ -2610,8 +2604,7 @@ accum(EntryIterator const& iter, std::vector& upsertBuffer, // Return true only if something is actually accumulated and not skipped over bool -BulkLedgerEntryChangeAccumulator::accumulate(EntryIterator const& iter, - bool bucketListDBEnabled) +BulkLedgerEntryChangeAccumulator::accumulate(EntryIterator const& iter) { // Right now, only LEDGER_ENTRY are recorded in the SQL database if (iter.key().type() != InternalLedgerEntryType::LEDGER_ENTRY) @@ -2619,55 +2612,15 @@ BulkLedgerEntryChangeAccumulator::accumulate(EntryIterator const& iter, return false; } - // Don't accumulate entry types that are supported by BucketListDB when it - // is enabled + // Don't accumulate entry types that are supported by BucketListDB auto type = iter.key().ledgerKey().type(); - if (bucketListDBEnabled && !BucketIndex::typeNotSupported(type)) + if (!BucketIndex::typeNotSupported(type)) { return false; } - switch (type) - { - case ACCOUNT: - accum(iter, mAccountsToUpsert, mAccountsToDelete); - break; - case TRUSTLINE: - accum(iter, mTrustLinesToUpsert, mTrustLinesToDelete); - break; - case OFFER: - accum(iter, mOffersToUpsert, mOffersToDelete); - break; - case DATA: - accum(iter, mAccountDataToUpsert, mAccountDataToDelete); - break; - case CLAIMABLE_BALANCE: - accum(iter, mClaimableBalanceToUpsert, mClaimableBalanceToDelete); - break; - case LIQUIDITY_POOL: - accum(iter, mLiquidityPoolToUpsert, mLiquidityPoolToDelete); - break; - case CONTRACT_DATA: - accum(iter, mContractDataToUpsert, mContractDataToDelete); - break; - case CONTRACT_CODE: - accum(iter, mContractCodeToUpsert, mContractCodeToDelete); - break; - case CONFIG_SETTING: - { - // Configuration can not be deleted. - releaseAssert(iter.entryExists()); - std::vector emptyEntries; - accum(iter, mConfigSettingsToUpsert, emptyEntries); - break; - } - case TTL: - accum(iter, mTTLToUpsert, mTTLToDelete); - break; - default: - abort(); - } - + releaseAssertOrThrow(type == OFFER); + accum(iter, mOffersToUpsert, mOffersToDelete); return true; } @@ -2676,30 +2629,7 @@ LedgerTxnRoot::Impl::bulkApply(BulkLedgerEntryChangeAccumulator& bleca, size_t bufferThreshold, LedgerTxnConsistency cons) { - auto& upsertAccounts = bleca.getAccountsToUpsert(); - if (upsertAccounts.size() > bufferThreshold) - { - bulkUpsertAccounts(upsertAccounts); - upsertAccounts.clear(); - } - auto& deleteAccounts = bleca.getAccountsToDelete(); - if (deleteAccounts.size() > bufferThreshold) - { - bulkDeleteAccounts(deleteAccounts, cons); - deleteAccounts.clear(); - } - auto& upsertTrustLines = bleca.getTrustLinesToUpsert(); - if (upsertTrustLines.size() > bufferThreshold) - { - bulkUpsertTrustLines(upsertTrustLines); - upsertTrustLines.clear(); - } - auto& deleteTrustLines = bleca.getTrustLinesToDelete(); - if (deleteTrustLines.size() > bufferThreshold) - { - bulkDeleteTrustLines(deleteTrustLines, cons); - deleteTrustLines.clear(); - } + auto& upsertOffers = bleca.getOffersToUpsert(); if (upsertOffers.size() > bufferThreshold) { @@ -2712,87 +2642,6 @@ LedgerTxnRoot::Impl::bulkApply(BulkLedgerEntryChangeAccumulator& bleca, bulkDeleteOffers(deleteOffers, cons); deleteOffers.clear(); } - auto& upsertAccountData = bleca.getAccountDataToUpsert(); - if (upsertAccountData.size() > bufferThreshold) - { - bulkUpsertAccountData(upsertAccountData); - upsertAccountData.clear(); - } - auto& deleteAccountData = bleca.getAccountDataToDelete(); - if (deleteAccountData.size() > bufferThreshold) - { - bulkDeleteAccountData(deleteAccountData, cons); - deleteAccountData.clear(); - } - auto& upsertClaimableBalance = bleca.getClaimableBalanceToUpsert(); - if (upsertClaimableBalance.size() > bufferThreshold) - { - bulkUpsertClaimableBalance(upsertClaimableBalance); - upsertClaimableBalance.clear(); - } - auto& deleteClaimableBalance = bleca.getClaimableBalanceToDelete(); - if (deleteClaimableBalance.size() > bufferThreshold) - { - bulkDeleteClaimableBalance(deleteClaimableBalance, cons); - deleteClaimableBalance.clear(); - } - auto& upsertLiquidityPool = bleca.getLiquidityPoolToUpsert(); - if (upsertLiquidityPool.size() > bufferThreshold) - { - bulkUpsertLiquidityPool(upsertLiquidityPool); - upsertLiquidityPool.clear(); - } - auto& deleteLiquidityPool = bleca.getLiquidityPoolToDelete(); - if (deleteLiquidityPool.size() > bufferThreshold) - { - bulkDeleteLiquidityPool(deleteLiquidityPool, cons); - deleteLiquidityPool.clear(); - } - auto& upsertConfigSettings = bleca.getConfigSettingsToUpsert(); - if (upsertConfigSettings.size() > bufferThreshold) - { - bulkUpsertConfigSettings(upsertConfigSettings); - upsertConfigSettings.clear(); - } - auto& upsertContractData = bleca.getContractDataToUpsert(); - if (upsertContractData.size() > bufferThreshold) - { - bulkUpsertContractData(upsertContractData); - upsertContractData.clear(); - } - auto& deleteContractData = bleca.getContractDataToDelete(); - if (deleteContractData.size() > bufferThreshold) - { - bulkDeleteContractData(deleteContractData, cons); - deleteContractData.clear(); - } - - auto& upsertContractCode = bleca.getContractCodeToUpsert(); - if (upsertContractCode.size() > bufferThreshold) - { - bulkUpsertContractCode(upsertContractCode); - upsertContractCode.clear(); - } - auto& deleteContractCode = bleca.getContractCodeToDelete(); - if (deleteContractCode.size() > bufferThreshold) - { - bulkDeleteContractCode(deleteContractCode, cons); - deleteContractCode.clear(); - } - - auto& upsertTTL = bleca.getTTLToUpsert(); - if (upsertTTL.size() > bufferThreshold) - { - bulkUpsertTTL(upsertTTL); - upsertTTL.clear(); - } - - auto& deleteTTL = bleca.getTTLToDelete(); - if (deleteTTL.size() > bufferThreshold) - { - bulkDeleteTTL(deleteTTL, cons); - deleteTTL.clear(); - } } void @@ -2814,14 +2663,13 @@ LedgerTxnRoot::Impl::commitChild(EntryIterator iter, // guarantee, so use std::unique_ptr<...>::swap to achieve it auto childHeader = std::make_unique(mChild->getHeader()); - auto bucketListDBEnabled = mApp.getConfig().isUsingBucketListDB(); auto bleca = BulkLedgerEntryChangeAccumulator(); [[maybe_unused]] int64_t counter{0}; try { while ((bool)iter) { - if (bleca.accumulate(iter, bucketListDBEnabled)) + if (bleca.accumulate(iter)) { ++counter; } @@ -2900,40 +2748,18 @@ LedgerTxnRoot::Impl::tableFromLedgerEntryType(LedgerEntryType let) } uint64_t -LedgerTxnRoot::countObjects(LedgerEntryType let) const -{ - return mImpl->countObjects(let); -} - -uint64_t -LedgerTxnRoot::Impl::countObjects(LedgerEntryType let) const -{ - using namespace soci; - throwIfChild(); - - std::string query = - "SELECT COUNT(*) FROM " + tableFromLedgerEntryType(let) + ";"; - uint64_t count = 0; - mApp.getDatabase().getSession() << query, into(count); - return count; -} - -uint64_t -LedgerTxnRoot::countObjects(LedgerEntryType let, - LedgerRange const& ledgers) const +LedgerTxnRoot::countOffers(LedgerRange const& ledgers) const { - return mImpl->countObjects(let, ledgers); + return mImpl->countOffers(ledgers); } uint64_t -LedgerTxnRoot::Impl::countObjects(LedgerEntryType let, - LedgerRange const& ledgers) const +LedgerTxnRoot::Impl::countOffers(LedgerRange const& ledgers) const { using namespace soci; throwIfChild(); - std::string query = "SELECT COUNT(*) FROM " + - tableFromLedgerEntryType(let) + + std::string query = "SELECT COUNT(*) FROM offers" " WHERE lastmodified >= :v1 AND lastmodified < :v2;"; uint64_t count = 0; int first = static_cast(ledgers.mFirst); @@ -2944,26 +2770,22 @@ LedgerTxnRoot::Impl::countObjects(LedgerEntryType let, } void -LedgerTxnRoot::deleteObjectsModifiedOnOrAfterLedger(uint32_t ledger) const +LedgerTxnRoot::deleteOffersModifiedOnOrAfterLedger(uint32_t ledger) const { - return mImpl->deleteObjectsModifiedOnOrAfterLedger(ledger); + return mImpl->deleteOffersModifiedOnOrAfterLedger(ledger); } void -LedgerTxnRoot::Impl::deleteObjectsModifiedOnOrAfterLedger(uint32_t ledger) const +LedgerTxnRoot::Impl::deleteOffersModifiedOnOrAfterLedger(uint32_t ledger) const { using namespace soci; throwIfChild(); mEntryCache.clear(); mBestOffers.clear(); - for (auto let : xdr::xdr_traits::enum_values()) - { - LedgerEntryType t = static_cast(let); - std::string query = "DELETE FROM " + tableFromLedgerEntryType(t) + - " WHERE lastmodified >= :v1"; - mApp.getDatabase().getSession() << query, use(ledger); - } + std::string query = "DELETE FROM " + tableFromLedgerEntryType(OFFER) + + " WHERE lastmodified >= :v1"; + mApp.getDatabase().getSession() << query, use(ledger); } void @@ -3096,128 +2918,14 @@ LedgerTxnRoot::Impl::prefetchInternal(UnorderedSet const& keys, } }; - if (mApp.getConfig().isUsingBucketListDB()) - { - LedgerKeySet keysToSearch; - for (auto const& key : keys) - { - insertIfNotLoaded(keysToSearch, key); - } - auto blLoad = getSearchableLiveBucketListSnapshot().loadKeysWithLimits( - keysToSearch, lkMeter); - cacheResult(populateLoadedEntries(keysToSearch, blLoad, lkMeter)); - } - else + LedgerKeySet keysToSearch; + for (auto const& key : keys) { - UnorderedSet accounts; - UnorderedSet offers; - UnorderedSet trustlines; - UnorderedSet data; - UnorderedSet claimablebalance; - UnorderedSet liquiditypool; - UnorderedSet contractdata; - UnorderedSet configSettings; - UnorderedSet contractCode; - UnorderedSet ttl; - - for (auto const& key : keys) - { - switch (key.type()) - { - case ACCOUNT: - insertIfNotLoaded(accounts, key); - if (accounts.size() == mBulkLoadBatchSize) - { - cacheResult(bulkLoadAccounts(accounts)); - accounts.clear(); - } - break; - case OFFER: - insertIfNotLoaded(offers, key); - if (offers.size() == mBulkLoadBatchSize) - { - cacheResult(bulkLoadOffers(offers)); - offers.clear(); - } - break; - case TRUSTLINE: - insertIfNotLoaded(trustlines, key); - if (trustlines.size() == mBulkLoadBatchSize) - { - cacheResult(bulkLoadTrustLines(trustlines)); - trustlines.clear(); - } - break; - case DATA: - insertIfNotLoaded(data, key); - if (data.size() == mBulkLoadBatchSize) - { - cacheResult(bulkLoadData(data)); - data.clear(); - } - break; - case CLAIMABLE_BALANCE: - insertIfNotLoaded(claimablebalance, key); - if (claimablebalance.size() == mBulkLoadBatchSize) - { - cacheResult(bulkLoadClaimableBalance(claimablebalance)); - claimablebalance.clear(); - } - break; - case LIQUIDITY_POOL: - insertIfNotLoaded(liquiditypool, key); - if (liquiditypool.size() == mBulkLoadBatchSize) - { - cacheResult(bulkLoadLiquidityPool(liquiditypool)); - liquiditypool.clear(); - } - break; - case CONTRACT_DATA: - insertIfNotLoaded(contractdata, key); - if (contractdata.size() == mBulkLoadBatchSize) - { - cacheResult(bulkLoadContractData(contractdata)); - contractdata.clear(); - } - break; - case CONTRACT_CODE: - insertIfNotLoaded(contractCode, key); - if (contractCode.size() == mBulkLoadBatchSize) - { - cacheResult(bulkLoadContractCode(contractCode)); - contractCode.clear(); - } - break; - case CONFIG_SETTING: - insertIfNotLoaded(configSettings, key); - if (configSettings.size() == mBulkLoadBatchSize) - { - cacheResult(bulkLoadConfigSettings(configSettings)); - configSettings.clear(); - } - break; - case TTL: - insertIfNotLoaded(ttl, key); - if (ttl.size() == mBulkLoadBatchSize) - { - cacheResult(bulkLoadTTL(ttl)); - ttl.clear(); - } - } - } - - // Prefetch whatever is remaining - cacheResult(bulkLoadAccounts(accounts)); - cacheResult(bulkLoadOffers(offers)); - cacheResult(bulkLoadTrustLines(trustlines)); - cacheResult(bulkLoadData(data)); - cacheResult(bulkLoadClaimableBalance(claimablebalance)); - cacheResult(bulkLoadLiquidityPool(liquiditypool)); - cacheResult(bulkLoadConfigSettings(configSettings)); - cacheResult(bulkLoadContractData(contractdata)); - cacheResult(bulkLoadContractCode(contractCode)); - cacheResult(bulkLoadTTL(ttl)); + insertIfNotLoaded(keysToSearch, key); } + auto blLoad = getSearchableLiveBucketListSnapshot().loadKeysWithLimits( + keysToSearch, lkMeter); + cacheResult(populateLoadedEntries(keysToSearch, blLoad, lkMeter)); return total; } @@ -3489,7 +3197,6 @@ LedgerTxnRoot::Impl::areEntriesMissingInCacheForOffer(OfferEntry const& oe) SearchableLiveBucketListSnapshot& LedgerTxnRoot::Impl::getSearchableLiveBucketListSnapshot() const { - releaseAssert(mApp.getConfig().isUsingBucketListDB()); if (!mSearchableBucketListSnapshot) { mSearchableBucketListSnapshot = @@ -3633,17 +3340,9 @@ LedgerTxnRoot::Impl::getPoolShareTrustLinesByAccountAndAsset( std::vector trustLines; try { - if (mApp.getConfig().isUsingBucketListDB()) - { - trustLines = - getSearchableLiveBucketListSnapshot() - .loadPoolShareTrustLinesByAccountAndAsset(account, asset); - } - else - { - trustLines = - loadPoolShareTrustLinesByAccountAndAsset(account, asset); - } + trustLines = + getSearchableLiveBucketListSnapshot() + .loadPoolShareTrustLinesByAccountAndAsset(account, asset); } catch (NonSociRelatedException&) { @@ -3697,15 +3396,8 @@ LedgerTxnRoot::Impl::getInflationWinners(size_t maxWinners, int64_t minVotes) { try { - if (mApp.getConfig().isUsingBucketListDB()) - { - return getSearchableLiveBucketListSnapshot().loadInflationWinners( - maxWinners, minVotes); - } - else - { - return loadInflationWinners(maxWinners, minVotes); - } + return getSearchableLiveBucketListSnapshot().loadInflationWinners( + maxWinners, minVotes); } catch (std::exception& e) { @@ -3753,47 +3445,13 @@ LedgerTxnRoot::Impl::getNewestVersion(InternalLedgerKey const& gkey) const std::shared_ptr entry; try { - if (mApp.getConfig().isUsingBucketListDB() && key.type() != OFFER) + if (key.type() != OFFER) { entry = getSearchableLiveBucketListSnapshot().load(key); } else { - switch (key.type()) - { - case ACCOUNT: - entry = loadAccount(key); - break; - case DATA: - entry = loadData(key); - break; - case OFFER: - entry = loadOffer(key); - break; - case TRUSTLINE: - entry = loadTrustLine(key); - break; - case CLAIMABLE_BALANCE: - entry = loadClaimableBalance(key); - break; - case LIQUIDITY_POOL: - entry = loadLiquidityPool(key); - break; - case CONTRACT_DATA: - entry = loadContractData(key); - break; - case CONTRACT_CODE: - entry = loadContractCode(key); - break; - case CONFIG_SETTING: - entry = loadConfigSetting(key); - break; - case TTL: - entry = loadTTL(key); - break; - default: - throw std::runtime_error("Unknown key type"); - } + entry = loadOffer(key); } } catch (NonSociRelatedException&) diff --git a/src/ledger/LedgerTxn.h b/src/ledger/LedgerTxn.h index b839ddafc5..1c1cd4385b 100644 --- a/src/ledger/LedgerTxn.h +++ b/src/ledger/LedgerTxn.h @@ -465,20 +465,14 @@ class AbstractLedgerTxnParent virtual std::shared_ptr getNewestVersion(InternalLedgerKey const& key) const = 0; - // Return the count of the number of ledger objects of type `let`. Will - // throw when called on anything other than a (real or stub) root LedgerTxn. - virtual uint64_t countObjects(LedgerEntryType let) const = 0; - - // Return the count of the number of ledger objects of type `let` within + // Return the count of the number of offer objects within // range of ledgers `ledgers`. Will throw when called on anything other than // a (real or stub) root LedgerTxn. - virtual uint64_t countObjects(LedgerEntryType let, - LedgerRange const& ledgers) const = 0; + virtual uint64_t countOffers(LedgerRange const& ledgers) const = 0; // Delete all ledger entries modified on-or-after `ledger`. Will throw // when called on anything other than a (real or stub) root LedgerTxn. - virtual void - deleteObjectsModifiedOnOrAfterLedger(uint32_t ledger) const = 0; + virtual void deleteOffersModifiedOnOrAfterLedger(uint32_t ledger) const = 0; // Delete all account ledger entries in the database. Will throw when called // on anything other than a (real or stub) root LedgerTxn. @@ -817,10 +811,8 @@ class LedgerTxn : public AbstractLedgerTxn void unsealHeader(std::function f) override; - uint64_t countObjects(LedgerEntryType let) const override; - uint64_t countObjects(LedgerEntryType let, - LedgerRange const& ledgers) const override; - void deleteObjectsModifiedOnOrAfterLedger(uint32_t ledger) const override; + uint64_t countOffers(LedgerRange const& ledgers) const override; + void deleteOffersModifiedOnOrAfterLedger(uint32_t ledger) const override; void dropAccounts(bool rebuild) override; void dropData(bool rebuild) override; void dropOffers(bool rebuild) override; @@ -881,11 +873,9 @@ class LedgerTxnRoot : public AbstractLedgerTxnParent void commitChild(EntryIterator iter, LedgerTxnConsistency cons) noexcept override; - uint64_t countObjects(LedgerEntryType let) const override; - uint64_t countObjects(LedgerEntryType let, - LedgerRange const& ledgers) const override; + uint64_t countOffers(LedgerRange const& ledgers) const override; - void deleteObjectsModifiedOnOrAfterLedger(uint32_t ledger) const override; + void deleteOffersModifiedOnOrAfterLedger(uint32_t ledger) const override; void dropAccounts(bool rebuild) override; void dropData(bool rebuild) override; diff --git a/src/ledger/LedgerTxnImpl.h b/src/ledger/LedgerTxnImpl.h index b536cb3777..82241b74ff 100644 --- a/src/ledger/LedgerTxnImpl.h +++ b/src/ledger/LedgerTxnImpl.h @@ -53,52 +53,10 @@ class EntryIterator::AbstractImpl // reorganizing the relevant parts of soci. class BulkLedgerEntryChangeAccumulator { - - std::vector mAccountsToUpsert; - std::vector mAccountsToDelete; - std::vector mAccountDataToUpsert; - std::vector mAccountDataToDelete; - std::vector mClaimableBalanceToUpsert; - std::vector mClaimableBalanceToDelete; std::vector mOffersToUpsert; std::vector mOffersToDelete; - std::vector mTrustLinesToUpsert; - std::vector mTrustLinesToDelete; - std::vector mLiquidityPoolToUpsert; - std::vector mLiquidityPoolToDelete; - std::vector mContractDataToUpsert; - std::vector mContractDataToDelete; - std::vector mContractCodeToUpsert; - std::vector mContractCodeToDelete; - std::vector mConfigSettingsToUpsert; - std::vector mTTLToUpsert; - std::vector mTTLToDelete; public: - std::vector& - getAccountsToUpsert() - { - return mAccountsToUpsert; - } - - std::vector& - getAccountsToDelete() - { - return mAccountsToDelete; - } - - std::vector& - getTrustLinesToUpsert() - { - return mTrustLinesToUpsert; - } - - std::vector& - getTrustLinesToDelete() - { - return mTrustLinesToDelete; - } - std::vector& getOffersToUpsert() { @@ -111,85 +69,7 @@ class BulkLedgerEntryChangeAccumulator return mOffersToDelete; } - std::vector& - getAccountDataToUpsert() - { - return mAccountDataToUpsert; - } - - std::vector& - getAccountDataToDelete() - { - return mAccountDataToDelete; - } - - std::vector& - getClaimableBalanceToUpsert() - { - return mClaimableBalanceToUpsert; - } - - std::vector& - getClaimableBalanceToDelete() - { - return mClaimableBalanceToDelete; - } - - std::vector& - getLiquidityPoolToUpsert() - { - return mLiquidityPoolToUpsert; - } - - std::vector& - getLiquidityPoolToDelete() - { - return mLiquidityPoolToDelete; - } - - std::vector& - getConfigSettingsToUpsert() - { - return mConfigSettingsToUpsert; - } - - std::vector& - getContractDataToUpsert() - { - return mContractDataToUpsert; - } - - std::vector& - getContractDataToDelete() - { - return mContractDataToDelete; - } - - std::vector& - getContractCodeToUpsert() - { - return mContractCodeToUpsert; - } - - std::vector& - getContractCodeToDelete() - { - return mContractCodeToDelete; - } - - std::vector& - getTTLToUpsert() - { - return mTTLToUpsert; - } - - std::vector& - getTTLToDelete() - { - return mTTLToDelete; - } - - bool accumulate(EntryIterator const& iter, bool bucketListDBEnabled); + bool accumulate(EntryIterator const& iter); }; // Many functions in LedgerTxn::Impl provide a basic exception safety @@ -892,13 +772,11 @@ class LedgerTxnRoot::Impl void commitChild(EntryIterator iter, LedgerTxnConsistency cons) noexcept; - // countObjects has the strong exception safety guarantee. - uint64_t countObjects(LedgerEntryType let) const; - uint64_t countObjects(LedgerEntryType let, - LedgerRange const& ledgers) const; + // countOffers has the strong exception safety guarantee. + uint64_t countOffers(LedgerRange const& ledgers) const; - // deleteObjectsModifiedOnOrAfterLedger has no exception safety guarantees. - void deleteObjectsModifiedOnOrAfterLedger(uint32_t ledger) const; + // deleteOffersModifiedOnOrAfterLedger has no exception safety guarantees. + void deleteOffersModifiedOnOrAfterLedger(uint32_t ledger) const; // dropAccounts, dropData, dropOffers, and dropTrustLines have no exception // safety guarantees. diff --git a/src/ledger/NetworkConfig.cpp b/src/ledger/NetworkConfig.cpp index f22144668e..6219d83384 100644 --- a/src/ledger/NetworkConfig.cpp +++ b/src/ledger/NetworkConfig.cpp @@ -1867,7 +1867,7 @@ SorobanNetworkConfig::writeAllSettings(AbstractLedgerTxn& ltx, // If testing with BucketListDB, we need to commit directly to the // BucketList - if (app.getConfig().isUsingBucketListDB()) + if (!app.getConfig().MODE_USES_IN_MEMORY_LEDGER) { auto lcl = app.getLedgerManager().getLastClosedLedgerHeader(); lcl.header.ledgerSeq += 1; diff --git a/src/ledger/test/InMemoryLedgerTxn.cpp b/src/ledger/test/InMemoryLedgerTxn.cpp index 93a18733d7..e213ace9ba 100644 --- a/src/ledger/test/InMemoryLedgerTxn.cpp +++ b/src/ledger/test/InMemoryLedgerTxn.cpp @@ -366,22 +366,15 @@ InMemoryLedgerTxn::dropOffers(bool rebuild) } uint64_t -InMemoryLedgerTxn::countObjects(LedgerEntryType let) const +InMemoryLedgerTxn::countOffers(LedgerRange const& ledgers) const { - return mRealRootForOffers.countObjects(let); -} - -uint64_t -InMemoryLedgerTxn::countObjects(LedgerEntryType let, - LedgerRange const& ledgers) const -{ - return mRealRootForOffers.countObjects(let, ledgers); + return mRealRootForOffers.countOffers(ledgers); } void -InMemoryLedgerTxn::deleteObjectsModifiedOnOrAfterLedger(uint32_t ledger) const +InMemoryLedgerTxn::deleteOffersModifiedOnOrAfterLedger(uint32_t ledger) const { - mRealRootForOffers.deleteObjectsModifiedOnOrAfterLedger(ledger); + mRealRootForOffers.deleteOffersModifiedOnOrAfterLedger(ledger); } UnorderedMap diff --git a/src/ledger/test/InMemoryLedgerTxn.h b/src/ledger/test/InMemoryLedgerTxn.h index f7c754284f..6a14d217fa 100644 --- a/src/ledger/test/InMemoryLedgerTxn.h +++ b/src/ledger/test/InMemoryLedgerTxn.h @@ -134,12 +134,8 @@ class InMemoryLedgerTxn : public LedgerTxn OfferDescriptor const& worseThan) override; void dropOffers(bool rebuild) override; - - uint64_t countObjects(LedgerEntryType let) const override; - uint64_t countObjects(LedgerEntryType let, - LedgerRange const& ledgers) const override; - - void deleteObjectsModifiedOnOrAfterLedger(uint32_t ledger) const override; + uint64_t countOffers(LedgerRange const& ledgers) const override; + void deleteOffersModifiedOnOrAfterLedger(uint32_t ledger) const override; #ifdef BEST_OFFER_DEBUGGING virtual bool bestOfferDebuggingEnabled() const override; @@ -150,5 +146,4 @@ class InMemoryLedgerTxn : public LedgerTxn std::unordered_set& exclude) override; #endif }; - } diff --git a/src/ledger/test/InMemoryLedgerTxnRoot.cpp b/src/ledger/test/InMemoryLedgerTxnRoot.cpp index 7da4f37e1c..3d37ba9ae7 100644 --- a/src/ledger/test/InMemoryLedgerTxnRoot.cpp +++ b/src/ledger/test/InMemoryLedgerTxnRoot.cpp @@ -97,20 +97,13 @@ InMemoryLedgerTxnRoot::getNewestVersion(InternalLedgerKey const& key) const } uint64_t -InMemoryLedgerTxnRoot::countObjects(LedgerEntryType let) const -{ - return 0; -} - -uint64_t -InMemoryLedgerTxnRoot::countObjects(LedgerEntryType let, - LedgerRange const& ledgers) const +InMemoryLedgerTxnRoot::countOffers(LedgerRange const& ledgers) const { return 0; } void -InMemoryLedgerTxnRoot::deleteObjectsModifiedOnOrAfterLedger( +InMemoryLedgerTxnRoot::deleteOffersModifiedOnOrAfterLedger( uint32_t ledger) const { } diff --git a/src/ledger/test/InMemoryLedgerTxnRoot.h b/src/ledger/test/InMemoryLedgerTxnRoot.h index 5d4bc3fe19..1fb4fd4617 100644 --- a/src/ledger/test/InMemoryLedgerTxnRoot.h +++ b/src/ledger/test/InMemoryLedgerTxnRoot.h @@ -64,11 +64,9 @@ class InMemoryLedgerTxnRoot : public AbstractLedgerTxnParent std::shared_ptr getNewestVersion(InternalLedgerKey const& key) const override; - uint64_t countObjects(LedgerEntryType let) const override; - uint64_t countObjects(LedgerEntryType let, - LedgerRange const& ledgers) const override; + uint64_t countOffers(LedgerRange const& ledgers) const override; - void deleteObjectsModifiedOnOrAfterLedger(uint32_t ledger) const override; + void deleteOffersModifiedOnOrAfterLedger(uint32_t ledger) const override; void dropAccounts(bool rebuild) override; void dropData(bool rebuild) override; diff --git a/src/main/ApplicationImpl.cpp b/src/main/ApplicationImpl.cpp index 9c6c636406..409c819030 100644 --- a/src/main/ApplicationImpl.cpp +++ b/src/main/ApplicationImpl.cpp @@ -179,122 +179,34 @@ ApplicationImpl::ApplicationImpl(VirtualClock& clock, Config const& cfg) static void maybeRebuildLedger(Application& app, bool applyBuckets) { - std::set toDrop; - std::set toRebuild; auto& ps = app.getPersistentState(); - auto bucketListDBEnabled = app.getConfig().isUsingBucketListDB(); - -#ifdef BUILD_TESTS - if (app.getConfig().MODE_USES_IN_MEMORY_LEDGER) - { - // in-memory mode must always rebuild SQL table - toRebuild.emplace(OFFER); - } - else -#endif + if (ps.shouldRebuildForOfferTable()) { - for (auto let : xdr::xdr_traits::enum_values()) - { - // If BucketListDB is enabled, drop all tables except for offers - LedgerEntryType t = static_cast(let); - if (let != OFFER && bucketListDBEnabled) - { - toDrop.emplace(t); - continue; - } - - if (ps.shouldRebuildForType(t)) - { - toRebuild.emplace(t); - } - } - } + app.getDatabase().clearPreparedStatementCache(); + soci::transaction tx(app.getDatabase().getSession()); + LOG_INFO(DEFAULT_LOG, "Dropping offers"); + app.getLedgerTxnRoot().dropOffers(/*rebuild=*/true); - app.getDatabase().clearPreparedStatementCache(); - soci::transaction tx(app.getDatabase().getSession()); + tx.commit(); - auto loopEntries = [&](auto const& entryTypeSet, bool shouldRebuild) { - for (auto let : entryTypeSet) + // No transaction is needed. ApplyBucketsWork breaks the apply into many + // small chunks, each of which has its own transaction. If it fails at + // some point in the middle, then rebuildledger will not be cleared so + // this will run again on next start up. + if (applyBuckets) { - switch (let) + LOG_INFO(DEFAULT_LOG, + "Rebuilding ledger tables by applying buckets"); + if (!applyBucketsForLCL(app)) { - case ACCOUNT: - LOG_INFO(DEFAULT_LOG, "Dropping accounts"); - app.getLedgerTxnRoot().dropAccounts(shouldRebuild); - break; - case TRUSTLINE: - LOG_INFO(DEFAULT_LOG, "Dropping trustlines"); - app.getLedgerTxnRoot().dropTrustLines(shouldRebuild); - break; - case OFFER: - LOG_INFO(DEFAULT_LOG, "Dropping offers"); - app.getLedgerTxnRoot().dropOffers(shouldRebuild); - break; - case DATA: - LOG_INFO(DEFAULT_LOG, "Dropping accountdata"); - app.getLedgerTxnRoot().dropData(shouldRebuild); - break; - case CLAIMABLE_BALANCE: - LOG_INFO(DEFAULT_LOG, "Dropping claimablebalances"); - app.getLedgerTxnRoot().dropClaimableBalances(shouldRebuild); - break; - case LIQUIDITY_POOL: - LOG_INFO(DEFAULT_LOG, "Dropping liquiditypools"); - app.getLedgerTxnRoot().dropLiquidityPools(shouldRebuild); - break; - case CONTRACT_DATA: - LOG_INFO(DEFAULT_LOG, "Dropping contractdata"); - app.getLedgerTxnRoot().dropContractData(shouldRebuild); - break; - case CONTRACT_CODE: - LOG_INFO(DEFAULT_LOG, "Dropping contractcode"); - app.getLedgerTxnRoot().dropContractCode(shouldRebuild); - break; - case CONFIG_SETTING: - LOG_INFO(DEFAULT_LOG, "Dropping configsettings"); - app.getLedgerTxnRoot().dropConfigSettings(shouldRebuild); - break; - case TTL: - LOG_INFO(DEFAULT_LOG, "Dropping ttl"); - app.getLedgerTxnRoot().dropTTL(shouldRebuild); - break; - default: - abort(); + throw std::runtime_error("Could not rebuild ledger tables"); } - } - }; - - loopEntries(toRebuild, true); - loopEntries(toDrop, false); - tx.commit(); - - // Nothing to apply, exit early - if (toRebuild.empty()) - { - return; - } - - // No transaction is needed. ApplyBucketsWork breaks the apply into many - // small chunks, each of which has its own transaction. If it fails at - // some point in the middle, then rebuildledger will not be cleared so - // this will run again on next start up. - if (applyBuckets) - { - LOG_INFO(DEFAULT_LOG, "Rebuilding ledger tables by applying buckets"); - auto filter = [&toRebuild](LedgerEntryType t) { - return toRebuild.find(t) != toRebuild.end(); - }; - if (!applyBucketsForLCL(app, filter)) - { - throw std::runtime_error("Could not rebuild ledger tables"); + LOG_INFO(DEFAULT_LOG, "Successfully rebuilt ledger tables"); } LOG_INFO(DEFAULT_LOG, "Successfully rebuilt ledger tables"); } - for (auto let : toRebuild) - { - ps.clearRebuildForType(let); - } + ps.clearRebuildForOfferTable(); } void @@ -351,7 +263,6 @@ ApplicationImpl::initialize(bool createNewDB, bool forceRebuild) #endif BucketListIsConsistentWithDatabase::registerInvariant(*this); - AccountSubEntriesCountIsValid::registerInvariant(*this); ConservationOfLumens::registerInvariant(*this); LedgerEntryIsValid::registerInvariant(*this); @@ -399,7 +310,7 @@ ApplicationImpl::resetLedgerState() #endif { auto& lsRoot = getLedgerTxnRoot(); - lsRoot.deleteObjectsModifiedOnOrAfterLedger(0); + lsRoot.deleteOffersModifiedOnOrAfterLedger(0); } } @@ -418,10 +329,7 @@ ApplicationImpl::upgradeToCurrentSchemaAndMaybeRebuildLedger(bool applyBuckets, if (forceRebuild) { auto& ps = getPersistentState(); - for (auto let : xdr::xdr_traits::enum_values()) - { - ps.setRebuildForType(static_cast(let)); - } + ps.setRebuildForOfferTable(); } mDatabase->upgradeToCurrentSchema(); @@ -734,67 +642,35 @@ ApplicationImpl::validateAndLogConfig() "release. Please use sqlite3 for non-ledger state data."); } - if (mConfig.DEPRECATED_SQL_LEDGER_STATE) + auto pageSizeExp = mConfig.BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT; + if (pageSizeExp != 0) { - if (mPersistentState->getState(PersistentState::kDBBackend) == - BucketIndex::DB_BACKEND_STATE) + // If the page size is less than 256 bytes, it is essentially + // indexing individual keys, so page size should be set to 0 + // instead. + if (pageSizeExp < 8) { throw std::invalid_argument( - "To downgrade to DEPRECATED_SQL_LEDGER_STATE, run " - "stellar-core new-db."); + "BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT " + "must be at least 8 or set to 0 for individual entry " + "indexing"); } - CLOG_WARNING( - Bucket, - "SQL for ledger state is enabled. This feature is deprecated! Node " - "may see performance degredation and lose sync with the network."); - } - else - { - if (mConfig.isUsingBucketListDB()) - { - mPersistentState->setState(PersistentState::kDBBackend, - BucketIndex::DB_BACKEND_STATE); - auto pageSizeExp = mConfig.BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT; - if (pageSizeExp != 0) - { - // If the page size is less than 256 bytes, it is essentially - // indexing individual keys, so page size should be set to 0 - // instead. - if (pageSizeExp < 8) - { - throw std::invalid_argument( - "BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT " - "must be at least 8 or set to 0 for individual entry " - "indexing"); - } - - // Check if pageSize will cause overflow - if (pageSizeExp > 31) - { - throw std::invalid_argument( - "BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT " - "must be less than 32"); - } - } - - CLOG_INFO(Bucket, - "BucketListDB enabled: pageSizeExponent: {} indexCutOff: " - "{}MB, persist indexes: {}", - pageSizeExp, mConfig.BUCKETLIST_DB_INDEX_CUTOFF, - mConfig.isPersistingBucketListDBIndexes()); - } - else + // Check if pageSize will cause overflow + if (pageSizeExp > 31) { - CLOG_WARNING( - Bucket, - "DEPRECATED_SQL_LEDGER_STATE set to false but " - "deprecated SQL ledger state is active. To disable deprecated " - "SQL ledger state, " - "MODE_ENABLES_BUCKETLIST must be set."); + throw std::invalid_argument( + "BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT " + "must be less than 32"); } } + CLOG_INFO(Bucket, + "BucketListDB enabled: pageSizeExponent: {} indexCutOff: " + "{}MB, persist indexes: {}", + pageSizeExp, mConfig.BUCKETLIST_DB_INDEX_CUTOFF, + mConfig.BUCKETLIST_DB_PERSIST_INDEX); + if (mConfig.HTTP_QUERY_PORT != 0) { if (isNetworkedValidator) @@ -810,13 +686,6 @@ ApplicationImpl::validateAndLogConfig() "HTTP_QUERY_PORT must be different from HTTP_PORT"); } - if (!mConfig.isUsingBucketListDB()) - { - throw std::invalid_argument( - "HTTP_QUERY_PORT requires DEPRECATED_SQL_LEDGER_STATE to be " - "false"); - } - if (mConfig.QUERY_THREAD_POOL_SIZE == 0) { throw std::invalid_argument( diff --git a/src/main/ApplicationUtils.cpp b/src/main/ApplicationUtils.cpp index 2d9d811b53..6898facd6e 100644 --- a/src/main/ApplicationUtils.cpp +++ b/src/main/ApplicationUtils.cpp @@ -163,8 +163,7 @@ runApp(Application::pointer app) } bool -applyBucketsForLCL(Application& app, - std::function onlyApply) +applyBucketsForLCL(Application& app) { auto has = app.getLedgerManager().getLastClosedLedgerHAS(); auto lclHash = @@ -180,7 +179,7 @@ applyBucketsForLCL(Application& app, std::map> buckets; auto work = app.getWorkScheduler().scheduleWork( - buckets, has, maxProtocolVersion, onlyApply); + buckets, has, maxProtocolVersion); while (app.getClock().crank(true) && !work->isDone()) ; @@ -188,12 +187,6 @@ applyBucketsForLCL(Application& app, return work->getState() == BasicWork::State::WORK_SUCCESS; } -bool -applyBucketsForLCL(Application& app) -{ - return applyBucketsForLCL(app, [](LedgerEntryType) { return true; }); -} - void httpCommand(std::string const& command, unsigned short port) { diff --git a/src/main/ApplicationUtils.h b/src/main/ApplicationUtils.h index 140626bc7f..ac0848bdb6 100644 --- a/src/main/ApplicationUtils.h +++ b/src/main/ApplicationUtils.h @@ -55,8 +55,6 @@ int catchup(Application::pointer app, CatchupConfiguration cc, // Reduild ledger state based on the buckets. Ensure ledger state is properly // reset before calling this function. bool applyBucketsForLCL(Application& app); -bool applyBucketsForLCL(Application& app, - std::function onlyApply); int publish(Application::pointer app); std::string minimalDBForInMemoryMode(Config const& cfg); bool canRebuildInMemoryLedgerFromBuckets(uint32_t startAtLedger, uint32_t lcl); diff --git a/src/main/CommandHandler.cpp b/src/main/CommandHandler.cpp index e2e22dbe44..87e9397361 100644 --- a/src/main/CommandHandler.cpp +++ b/src/main/CommandHandler.cpp @@ -72,8 +72,7 @@ CommandHandler::CommandHandler(Application& app) : mApp(app) app.getClock().getIOContext(), ipStr, mApp.getConfig().HTTP_PORT, httpMaxClient); - if (mApp.getConfig().HTTP_QUERY_PORT && - mApp.getConfig().isUsingBucketListDB()) + if (mApp.getConfig().HTTP_QUERY_PORT) { mQueryServer = std::make_unique( ipStr, mApp.getConfig().HTTP_QUERY_PORT, httpMaxClient, diff --git a/src/main/CommandLine.cpp b/src/main/CommandLine.cpp index 0c2a3c717c..d3728c5541 100644 --- a/src/main/CommandLine.cpp +++ b/src/main/CommandLine.cpp @@ -1177,8 +1177,9 @@ runNewDB(CommandLineArgs const& args) }; return runWithHelp(args, - {configurationParser(configOption), - minimalDBParser(minimalForInMemoryMode)}, + { + configurationParser(configOption), + }, [&] { auto cfg = configOption.getConfig(); initializeDatabase(cfg); @@ -1485,9 +1486,8 @@ run(CommandLineArgs const& args) args, {configurationParser(configOption), disableBucketGCParser(disableBucketGC), - metadataOutputStreamParser(stream), inMemoryParser(inMemory), - waitForConsensusParser(waitForConsensus), - startAtLedgerParser(startAtLedger), startAtHashParser(startAtHash)}, + metadataOutputStreamParser(stream), + waitForConsensusParser(waitForConsensus)}, [&] { Config cfg; std::shared_ptr clock; diff --git a/src/main/Config.cpp b/src/main/Config.cpp index 7d290a008d..da8fb98551 100644 --- a/src/main/Config.cpp +++ b/src/main/Config.cpp @@ -157,7 +157,6 @@ Config::Config() : NODE_SEED(SecretKey::random()) CATCHUP_COMPLETE = false; CATCHUP_RECENT = 0; BACKGROUND_OVERLAY_PROCESSING = true; - DEPRECATED_SQL_LEDGER_STATE = false; BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT = 14; // 2^14 == 16 kb BUCKETLIST_DB_INDEX_CUTOFF = 20; // 20 mb BUCKETLIST_DB_PERSIST_INDEX = true; @@ -1082,16 +1081,20 @@ Config::processConfig(std::shared_ptr t) "is ignored. Please remove from config"); }}, {"DEPRECATED_SQL_LEDGER_STATE", - [&]() { DEPRECATED_SQL_LEDGER_STATE = readBool(item); }}, + [&]() { + CLOG_WARNING( + Bucket, + "DEPRECATED_SQL_LEDGER_STATE is deprecated and " + "ignored. Please remove from config"); + }}, // Still support EXPERIMENTAL_BUCKETLIST_DB* flags for // captive-core for 21.0 release, remove in 21.1 release {"EXPERIMENTAL_BUCKETLIST_DB", [&]() { - DEPRECATED_SQL_LEDGER_STATE = !readBool(item); CLOG_WARNING( Bucket, - "EXPERIMENTAL_BUCKETLIST_DB flag is deprecated, " - "use DEPRECATED_SQL_LEDGER_STATE=false instead."); + "EXPERIMENTAL_BUCKETLIST_DB flag is deprecated. " + "please remove from config"); }}, {"EXPERIMENTAL_BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT", [&]() { @@ -1812,33 +1815,11 @@ Config::processConfig(std::shared_ptr t) // Validators default to starting the network from local state FORCE_SCP = NODE_IS_VALIDATOR; - // Require either DEPRECATED_SQL_LEDGER_STATE or - // EXPERIMENTAL_BUCKETLIST_DB to be backwards compatible with horizon - // and RPC, but do not allow both. - if (!t->contains("DEPRECATED_SQL_LEDGER_STATE") && - !t->contains("EXPERIMENTAL_BUCKETLIST_DB")) - { - std::string msg = - "Invalid configuration: " - "DEPRECATED_SQL_LEDGER_STATE not set. Default setting is FALSE " - "and is appropriate for most nodes."; - throw std::runtime_error(msg); - } // Only allow one version of all BucketListDB flags, either the // deprecated flag or new flag, but not both. - else if (t->contains("DEPRECATED_SQL_LEDGER_STATE") && - t->contains("EXPERIMENTAL_BUCKETLIST_DB")) - { - std::string msg = - "Invalid configuration: EXPERIMENTAL_BUCKETLIST_DB and " - "DEPRECATED_SQL_LEDGER_STATE must not both be set. " - "EXPERIMENTAL_BUCKETLIST_DB is deprecated, use " - "DEPRECATED_SQL_LEDGER_STATE only."; - throw std::runtime_error(msg); - } - else if (t->contains( - "EXPERIMENTAL_BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT") && - t->contains("BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT")) + if (t->contains( + "EXPERIMENTAL_BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT") && + t->contains("BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT")) { std::string msg = "Invalid configuration: " @@ -2384,22 +2365,6 @@ Config::modeDoesCatchupWithBucketList() const return MODE_DOES_CATCHUP && MODE_ENABLES_BUCKETLIST; } -bool -Config::isUsingBucketListDB() const -{ - return !DEPRECATED_SQL_LEDGER_STATE -#ifdef BUILD_TESTS - && !MODE_USES_IN_MEMORY_LEDGER -#endif - && MODE_ENABLES_BUCKETLIST; -} - -bool -Config::isPersistingBucketListDBIndexes() const -{ - return isUsingBucketListDB() && BUCKETLIST_DB_PERSIST_INDEX; -} - bool Config::modeStoresAllHistory() const { diff --git a/src/main/Config.h b/src/main/Config.h index 6c0e3dfc5f..0bd69449c1 100644 --- a/src/main/Config.h +++ b/src/main/Config.h @@ -441,10 +441,6 @@ class Config : public std::enable_shared_from_this // configuration) to delay emitting metadata by one ledger. bool EXPERIMENTAL_PRECAUTION_DELAY_META; - // A config parameter that when set uses SQL as the primary - // key-value store for LedgerEntry lookups instead of BucketListDB. - bool DEPRECATED_SQL_LEDGER_STATE; - // Page size exponent used by BucketIndex when indexing ranges of // BucketEntry's. If set to 0, BucketEntry's are individually indexed. // Otherwise, pageSize == @@ -788,8 +784,6 @@ class Config : public std::enable_shared_from_this std::chrono::seconds getExpectedLedgerCloseTime() const; bool modeDoesCatchupWithBucketList() const; - bool isUsingBucketListDB() const; - bool isUsingBackgroundEviction() const; bool isPersistingBucketListDBIndexes() const; bool modeStoresAllHistory() const; bool modeStoresAnyHistory() const; diff --git a/src/main/PersistentState.cpp b/src/main/PersistentState.cpp index 150d3f62ab..ecb7c12eaa 100644 --- a/src/main/PersistentState.cpp +++ b/src/main/PersistentState.cpp @@ -149,32 +149,24 @@ PersistentState::setSCPStateV1ForSlot( } bool -PersistentState::shouldRebuildForType(LedgerEntryType let) +PersistentState::shouldRebuildForOfferTable() { ZoneScoped; - return !getFromDb(getStoreStateName(kRebuildLedger, let)).empty(); + return !getFromDb(getStoreStateName(kRebuildLedger, OFFER)).empty(); } void -PersistentState::clearRebuildForType(LedgerEntryType let) +PersistentState::clearRebuildForOfferTable() { ZoneScoped; - updateDb(getStoreStateName(kRebuildLedger, let), ""); + updateDb(getStoreStateName(kRebuildLedger, OFFER), ""); } void -PersistentState::setRebuildForType(LedgerEntryType let) +PersistentState::setRebuildForOfferTable() { ZoneScoped; - - // Only allow rebuilds for offer table if BucketListDB enabled, other tables - // don't exist - if (mApp.getConfig().isUsingBucketListDB() && let != OFFER) - { - return; - } - - updateDb(getStoreStateName(kRebuildLedger, let), "1"); + updateDb(getStoreStateName(kRebuildLedger, OFFER), "1"); } void diff --git a/src/main/PersistentState.h b/src/main/PersistentState.h index c22cd59e57..7dc359ae2e 100644 --- a/src/main/PersistentState.h +++ b/src/main/PersistentState.h @@ -46,9 +46,9 @@ class PersistentState setSCPStateV1ForSlot(uint64 slot, std::string const& value, std::unordered_map const& txSets); - bool shouldRebuildForType(LedgerEntryType let); - void clearRebuildForType(LedgerEntryType let); - void setRebuildForType(LedgerEntryType let); + bool shouldRebuildForOfferTable(); + void clearRebuildForOfferTable(); + void setRebuildForOfferTable(); bool hasTxSet(Hash const& txSetHash); void deleteTxSets(std::unordered_set hashesToDelete); diff --git a/src/main/test/ConfigTests.cpp b/src/main/test/ConfigTests.cpp index 2b1dcc559f..e930723d17 100644 --- a/src/main/test/ConfigTests.cpp +++ b/src/main/test/ConfigTests.cpp @@ -286,7 +286,6 @@ TEST_CASE("bad validators configs", "[config]") NODE_SEED="SA7FGJMMUIHNE3ZPI2UO5I632A7O5FBAZTXFAIEVFA4DSSGLHXACLAIT a3" {NODE_HOME_DOMAIN} NODE_IS_VALIDATOR=true -DEPRECATED_SQL_LEDGER_STATE=true ############################ # list of HOME_DOMAINS @@ -473,9 +472,7 @@ TEST_CASE("nesting level", "[config]") auto secretKey = SecretKey::fromSeed(hash); return secretKey.getStrKeyPublic(); }; - std::string configNesting = - "DEPRECATED_SQL_LEDGER_STATE=true\n" // Required for all configs - "UNSAFE_QUORUM=true"; + std::string configNesting = "UNSAFE_QUORUM=true"; std::string quorumSetNumber = ""; std::string quorumSetTemplate = R"( @@ -536,7 +533,6 @@ TEST_CASE("operation filter configuration", "[config]") }; std::stringstream ss; - ss << "DEPRECATED_SQL_LEDGER_STATE=true\n"; // required for all configs ss << "UNSAFE_QUORUM=true\n"; toConfigStr(vals, ss); ss << "\n[QUORUM_SET]\n"; diff --git a/src/simulation/test/LoadGeneratorTests.cpp b/src/simulation/test/LoadGeneratorTests.cpp index 5cc05ee820..657e06807f 100644 --- a/src/simulation/test/LoadGeneratorTests.cpp +++ b/src/simulation/test/LoadGeneratorTests.cpp @@ -26,7 +26,6 @@ TEST_CASE("generate load in protocol 1") auto cfg = getTestConfig(i); cfg.TESTING_UPGRADE_MAX_TX_SET_SIZE = 5000; cfg.TESTING_UPGRADE_LEDGER_PROTOCOL_VERSION = 1; - cfg.DEPRECATED_SQL_LEDGER_STATE = false; return cfg; }); @@ -892,8 +891,6 @@ TEST_CASE("apply load", "[loadgen][applyload]") cfg.APPLY_LOAD_MAX_CONTRACT_EVENT_SIZE_BYTES = 8198; cfg.APPLY_LOAD_MAX_TX_COUNT = 50; - REQUIRE(cfg.isUsingBucketListDB()); - VirtualClock clock(VirtualClock::REAL_TIME); auto app = createTestApplication(clock, cfg); diff --git a/src/test/TestUtils.cpp b/src/test/TestUtils.cpp index e752ad6a5b..e53eed53c4 100644 --- a/src/test/TestUtils.cpp +++ b/src/test/TestUtils.cpp @@ -282,7 +282,7 @@ modifySorobanNetworkConfig(Application& app, // Need to close a ledger following call to `addBatch` from config upgrade // to refresh cached state - if (app.getConfig().isUsingBucketListDB()) + if (!app.getConfig().MODE_USES_IN_MEMORY_LEDGER) { txtest::closeLedger(app); } diff --git a/src/test/TxTests.cpp b/src/test/TxTests.cpp index b9a69a8f1b..818c28400c 100644 --- a/src/test/TxTests.cpp +++ b/src/test/TxTests.cpp @@ -389,9 +389,13 @@ checkTransaction(TransactionTestFrame& txFrame, Application& app) void applyTx(TransactionTestFramePtr const& tx, Application& app, bool checkSeqNum) { + if (app.getConfig().MODE_USES_IN_MEMORY_LEDGER) + { + applyCheck(tx, app, checkSeqNum); + } // We cannot commit directly to the DB if running BucketListDB, so close a // ledger with the TX instead - if (app.getConfig().isUsingBucketListDB()) + else { auto resultSet = closeLedger(app, {tx}); @@ -406,10 +410,6 @@ applyTx(TransactionTestFramePtr const& tx, Application& app, bool checkSeqNum) REQUIRE(meta.size() == 1); recordOrCheckGlobalTestTxMetadata(meta.back().getXDR()); } - else - { - applyCheck(tx, app, checkSeqNum); - } throwIf(tx->getResult()); checkTransaction(*tx, app); From 70b2228969edccb69d78f3f176c4d7ed257d1e55 Mon Sep 17 00:00:00 2001 From: Garand Tyson Date: Wed, 9 Oct 2024 19:32:00 -0700 Subject: [PATCH 08/17] Support upgrade-db for BucketListDB --- src/database/Database.cpp | 45 +++++++++++++++++++++++++++++++++++++++ src/database/Database.h | 1 + 2 files changed, 46 insertions(+) diff --git a/src/database/Database.cpp b/src/database/Database.cpp index ba8fe571ee..f61c2b5c9a 100644 --- a/src/database/Database.cpp +++ b/src/database/Database.cpp @@ -254,6 +254,7 @@ Database::upgradeToCurrentSchema() // Tx meta column no longer supported dropTxMetaIfExists(); + maybeUpgradeToBucketListDB(); CLOG_INFO(Database, "DB schema is in current version"); releaseAssert(vers == SCHEMA_VERSION); @@ -293,6 +294,50 @@ Database::dropTxMetaIfExists() } } +void +Database::maybeUpgradeToBucketListDB() +{ + if (mApp.getPersistentState().getState(PersistentState::kDBBackend) != + BucketIndex::DB_BACKEND_STATE) + { + CLOG_INFO(Database, "Upgrading to BucketListDB"); + + // Drop all LedgerEntry tables except for offers + CLOG_INFO(Database, "Dropping table accounts"); + getSession() << "DROP TABLE IF EXISTS accounts;"; + + CLOG_INFO(Database, "Dropping table signers"); + getSession() << "DROP TABLE IF EXISTS signers;"; + + CLOG_INFO(Database, "Dropping table claimablebalance"); + getSession() << "DROP TABLE IF EXISTS claimablebalance;"; + + CLOG_INFO(Database, "Dropping table configsettings"); + getSession() << "DROP TABLE IF EXISTS configsettings;"; + + CLOG_INFO(Database, "Dropping table contractcode"); + getSession() << "DROP TABLE IF EXISTS contractcode;"; + + CLOG_INFO(Database, "Dropping table contractdata"); + getSession() << "DROP TABLE IF EXISTS contractdata;"; + + CLOG_INFO(Database, "Dropping table accountdata"); + getSession() << "DROP TABLE IF EXISTS accountdata;"; + + CLOG_INFO(Database, "Dropping table liquiditypool"); + getSession() << "DROP TABLE IF EXISTS liquiditypool;"; + + CLOG_INFO(Database, "Dropping table trustlines"); + getSession() << "DROP TABLE IF EXISTS trustlines;"; + + CLOG_INFO(Database, "Dropping table ttl"); + getSession() << "DROP TABLE IF EXISTS ttl;"; + + mApp.getPersistentState().setState(PersistentState::kDBBackend, + BucketIndex::DB_BACKEND_STATE); + } +} + void Database::putSchemaVersion(unsigned long vers) { diff --git a/src/database/Database.h b/src/database/Database.h index e3ad43b214..73540c2884 100644 --- a/src/database/Database.h +++ b/src/database/Database.h @@ -174,6 +174,7 @@ class Database : NonMovableOrCopyable void upgradeToCurrentSchema(); void dropTxMetaIfExists(); + void maybeUpgradeToBucketListDB(); // Access the underlying SOCI session object soci::session& getSession(); From a12e0d772dfc2d3bdf766c463157b8f64e20529d Mon Sep 17 00:00:00 2001 From: marta-lokhova Date: Mon, 16 Dec 2024 17:27:34 -0800 Subject: [PATCH 09/17] Add loadgen mode --- docs/software/commands.md | 3 ++- src/main/CommandHandler.cpp | 13 +++++++++++-- src/simulation/LoadGenerator.cpp | 17 +++++++++++++++++ src/simulation/LoadGenerator.h | 2 ++ src/simulation/test/LoadGeneratorTests.cpp | 20 ++++++++++++++++++++ 5 files changed, 52 insertions(+), 3 deletions(-) diff --git a/docs/software/commands.md b/docs/software/commands.md index db17ba6a83..faec8d2ad3 100644 --- a/docs/software/commands.md +++ b/docs/software/commands.md @@ -471,7 +471,7 @@ this survey mechanism, just set `SURVEYOR_KEYS` to `$self` or a bogus key ### The following HTTP commands are exposed on test instances * **generateload** `generateload[?mode= - (create|pay|pretend|mixed_classic|soroban_upload|soroban_invoke_setup|soroban_invoke|upgrade_setup|create_upgrade|mixed_classic_soroban)&accounts=N&offset=K&txs=M&txrate=R&spikesize=S&spikeinterval=I&maxfeerate=F&skiplowfeetxs=(0|1)&dextxpercent=D&minpercentsuccess=S&instances=Y&wasms=Z&payweight=P&sorobanuploadweight=Q&sorobaninvokeweight=R]` + (create|pay|pretend|mixed_classic|soroban_upload|soroban_invoke_setup|soroban_invoke|upgrade_setup|create_upgrade|mixed_classic_soroban|stop)&accounts=N&offset=K&txs=M&txrate=R&spikesize=S&spikeinterval=I&maxfeerate=F&skiplowfeetxs=(0|1)&dextxpercent=D&minpercentsuccess=S&instances=Y&wasms=Z&payweight=P&sorobanuploadweight=Q&sorobaninvokeweight=R]` Artificially generate load for testing; must be used with `ARTIFICIALLY_GENERATE_LOAD_FOR_TESTING` set to true. @@ -532,6 +532,7 @@ this survey mechanism, just set `SURVEYOR_KEYS` to `$self` or a bogus key `soroban_upload`, and `soroban_invoke` load with the likelihood of any generated transaction falling into each mode being determined by the mode's weight divided by the sum of all weights. + * `stop` mode stops any existing load generation run and marks it as "failed". Non-`create` load generation makes use of the additional parameters: * when a nonzero `spikeinterval` is given, a spike will occur every diff --git a/src/main/CommandHandler.cpp b/src/main/CommandHandler.cpp index e2e22dbe44..29d6965e82 100644 --- a/src/main/CommandHandler.cpp +++ b/src/main/CommandHandler.cpp @@ -1263,9 +1263,18 @@ CommandHandler::generateLoad(std::string const& params, std::string& retStr) { std::map map; http::server::server::parseParams(params, map); + auto modeStr = + parseOptionalParamOrDefault(map, "mode", "create"); + // First check if a current run needs to be stopped + if (modeStr == "stop") + { + mApp.getLoadGenerator().stop(); + retStr = "Stopped load generation"; + return; + } + GeneratedLoadConfig cfg; - cfg.mode = LoadGenerator::getMode( - parseOptionalParamOrDefault(map, "mode", "create")); + cfg.mode = LoadGenerator::getMode(modeStr); cfg.nAccounts = parseOptionalParamOrDefault(map, "accounts", 1000); diff --git a/src/simulation/LoadGenerator.cpp b/src/simulation/LoadGenerator.cpp index bef7d70f91..94063d83a8 100644 --- a/src/simulation/LoadGenerator.cpp +++ b/src/simulation/LoadGenerator.cpp @@ -276,6 +276,23 @@ LoadGenerator::resetSorobanState() mContactOverheadBytes = 0; } +void +LoadGenerator::stop() +{ + ZoneScoped; + if (mStarted) + { + // Some residual transactions might still be pending in consensus, but + // that should be harmless. + if (mLoadTimer) + { + mLoadTimer->cancel(); + } + mLoadgenFail.Mark(); + reset(); + } +} + void LoadGenerator::start(GeneratedLoadConfig& cfg) { diff --git a/src/simulation/LoadGenerator.h b/src/simulation/LoadGenerator.h index 0fd898176a..2f5394dfab 100644 --- a/src/simulation/LoadGenerator.h +++ b/src/simulation/LoadGenerator.h @@ -205,6 +205,8 @@ class LoadGenerator return mContactOverheadBytes; } + void stop(); + private: struct TxMetrics { diff --git a/src/simulation/test/LoadGeneratorTests.cpp b/src/simulation/test/LoadGeneratorTests.cpp index 5cc05ee820..8025fac897 100644 --- a/src/simulation/test/LoadGeneratorTests.cpp +++ b/src/simulation/test/LoadGeneratorTests.cpp @@ -124,6 +124,26 @@ TEST_CASE("generate load with unique accounts", "[loadgen]") }, 10 * Herder::EXP_LEDGER_TIMESPAN_SECONDS, false); } + SECTION("stop loadgen") + { + loadGen.generateLoad(GeneratedLoadConfig::createAccountsLoad( + /* nAccounts */ 10000, + /* txRate */ 1)); + simulation->crankForAtLeast(std::chrono::seconds(10), false); + auto& acc = app.getMetrics().NewMeter({"loadgen", "account", "created"}, + "account"); + auto numAccounts = acc.count(); + REQUIRE(app.getMetrics() + .NewMeter({"loadgen", "run", "failed"}, "run") + .count() == 0); + loadGen.stop(); + REQUIRE(app.getMetrics() + .NewMeter({"loadgen", "run", "failed"}, "run") + .count() == 1); + // No new txs submitted + simulation->crankForAtLeast(std::chrono::seconds(10), false); + REQUIRE(acc.count() == numAccounts); + } } TEST_CASE("modify soroban network config", "[loadgen][soroban]") From 09bb5180b27539654ec268da170850bad6ed5d6c Mon Sep 17 00:00:00 2001 From: Garand Tyson Date: Wed, 9 Oct 2024 20:26:24 -0700 Subject: [PATCH 10/17] Removed LedgerEntry SQL code --- src/bucket/test/BucketManagerTests.cpp | 170 ++--- src/catchup/ApplyBucketsWork.cpp | 3 +- src/database/Database.cpp | 36 - src/ledger/LedgerTxn.cpp | 148 +--- src/ledger/LedgerTxn.h | 60 +- src/ledger/LedgerTxnAccountSQL.cpp | 679 ------------------ src/ledger/LedgerTxnClaimableBalanceSQL.cpp | 374 ---------- src/ledger/LedgerTxnConfigSettingSQL.cpp | 295 -------- src/ledger/LedgerTxnContractCodeSQL.cpp | 387 ---------- src/ledger/LedgerTxnContractDataSQL.cpp | 462 ------------ src/ledger/LedgerTxnDataSQL.cpp | 508 ------------- src/ledger/LedgerTxnImpl.h | 80 +-- src/ledger/LedgerTxnLiquidityPoolSQL.cpp | 420 ----------- src/ledger/LedgerTxnOfferSQL.cpp | 71 +- src/ledger/LedgerTxnTTLSQL.cpp | 382 ---------- src/ledger/LedgerTxnTrustLineSQL.cpp | 522 -------------- src/ledger/test/InMemoryLedgerTxn.cpp | 4 +- src/ledger/test/InMemoryLedgerTxn.h | 2 +- src/ledger/test/InMemoryLedgerTxnRoot.cpp | 47 +- src/ledger/test/InMemoryLedgerTxnRoot.h | 11 +- src/main/ApplicationImpl.cpp | 20 +- src/main/CommandLine.cpp | 10 +- src/main/Config.cpp | 5 +- src/main/PersistentState.h | 1 + .../InvokeHostFunctionTests.json | 4 +- .../InvokeHostFunctionTests.json | 4 +- 26 files changed, 145 insertions(+), 4560 deletions(-) delete mode 100644 src/ledger/LedgerTxnAccountSQL.cpp delete mode 100644 src/ledger/LedgerTxnClaimableBalanceSQL.cpp delete mode 100644 src/ledger/LedgerTxnConfigSettingSQL.cpp delete mode 100644 src/ledger/LedgerTxnContractCodeSQL.cpp delete mode 100644 src/ledger/LedgerTxnContractDataSQL.cpp delete mode 100644 src/ledger/LedgerTxnDataSQL.cpp delete mode 100644 src/ledger/LedgerTxnLiquidityPoolSQL.cpp delete mode 100644 src/ledger/LedgerTxnTTLSQL.cpp delete mode 100644 src/ledger/LedgerTxnTrustLineSQL.cpp diff --git a/src/bucket/test/BucketManagerTests.cpp b/src/bucket/test/BucketManagerTests.cpp index 745a2ad955..0a92aa29f3 100644 --- a/src/bucket/test/BucketManagerTests.cpp +++ b/src/bucket/test/BucketManagerTests.cpp @@ -189,112 +189,90 @@ TEST_CASE("skip list", "[bucket][bucketmanager]") TEST_CASE_VERSIONS("bucketmanager ownership", "[bucket][bucketmanager]") { - auto test = [&](bool bucketListDB) { - VirtualClock clock; - Config cfg = getTestConfig(); - - // Make sure all Buckets serialize indexes to disk for test - cfg.BUCKETLIST_DB_INDEX_CUTOFF = 0; - cfg.MANUAL_CLOSE = false; - - if (bucketListDB) - { - // Enable BucketListDB with persistent indexes - cfg.NODE_IS_VALIDATOR = false; - cfg.FORCE_SCP = false; - } - - for_versions_with_differing_bucket_logic(cfg, [&](Config const& cfg) { - Application::pointer app = createTestApplication(clock, cfg); - - std::vector live( - LedgerTestUtils::generateValidUniqueLedgerEntriesWithExclusions( - {CONFIG_SETTING}, 10)); - std::vector dead{}; - - std::shared_ptr b1; - - { - std::shared_ptr b2 = LiveBucket::fresh( - app->getBucketManager(), getAppLedgerVersion(app), {}, live, - dead, /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); - b1 = b2; - - // Bucket is referenced by b1, b2 and the BucketManager. - CHECK(b1.use_count() == 3); - - std::shared_ptr b3 = LiveBucket::fresh( - app->getBucketManager(), getAppLedgerVersion(app), {}, live, - dead, /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); - std::shared_ptr b4 = LiveBucket::fresh( - app->getBucketManager(), getAppLedgerVersion(app), {}, live, - dead, /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); - // Bucket is referenced by b1, b2, b3, b4 and the BucketManager. - CHECK(b1.use_count() == 5); - } - - // Take pointer by reference to not mess up use_count() - auto dropBucket = [&](std::shared_ptr& b) { - std::string filename = b->getFilename().string(); - std::string indexFilename = - app->getBucketManager().bucketIndexFilename(b->getHash()); - CHECK(fs::exists(filename)); - if (bucketListDB) - { - CHECK(fs::exists(indexFilename)); - } + VirtualClock clock; + Config cfg = getTestConfig(); - b.reset(); - app->getBucketManager().forgetUnreferencedBuckets(); - CHECK(!fs::exists(filename)); - CHECK(!fs::exists(indexFilename)); - }; + // Make sure all Buckets serialize indexes to disk for test + cfg.BUCKETLIST_DB_INDEX_CUTOFF = 0; + cfg.MANUAL_CLOSE = false; - // Bucket is now only referenced by b1 and the BucketManager. - CHECK(b1.use_count() == 2); + for_versions_with_differing_bucket_logic(cfg, [&](Config const& cfg) { + Application::pointer app = createTestApplication(clock, cfg); - // Drop bucket ourselves then purge bucketManager. - dropBucket(b1); + std::vector live( + LedgerTestUtils::generateValidUniqueLedgerEntriesWithExclusions( + {CONFIG_SETTING}, 10)); + std::vector dead{}; - // Try adding a bucket to the BucketManager's bucketlist - auto& bl = app->getBucketManager().getLiveBucketList(); - bl.addBatch(*app, 1, getAppLedgerVersion(app), {}, live, dead); - clearFutures(app, bl); - b1 = bl.getLevel(0).getCurr(); + std::shared_ptr b1; - // Bucket should be referenced by bucketlist itself, BucketManager - // cache and b1. - CHECK(b1.use_count() == 3); + { + std::shared_ptr b2 = LiveBucket::fresh( + app->getBucketManager(), getAppLedgerVersion(app), {}, live, + dead, /*countMergeEvents=*/true, clock.getIOContext(), + /*doFsync=*/true); + b1 = b2; - // This shouldn't change if we forget unreferenced buckets since - // it's referenced by bucketlist. - app->getBucketManager().forgetUnreferencedBuckets(); + // Bucket is referenced by b1, b2 and the BucketManager. CHECK(b1.use_count() == 3); - // But if we mutate the curr bucket of the bucketlist, it should. - live[0] = LedgerTestUtils::generateValidLedgerEntryWithExclusions( - {CONFIG_SETTING}); - bl.addBatch(*app, 1, getAppLedgerVersion(app), {}, live, dead); - clearFutures(app, bl); - CHECK(b1.use_count() == 2); - - // Drop it again. - dropBucket(b1); - }); - }; + std::shared_ptr b3 = LiveBucket::fresh( + app->getBucketManager(), getAppLedgerVersion(app), {}, live, + dead, /*countMergeEvents=*/true, clock.getIOContext(), + /*doFsync=*/true); + std::shared_ptr b4 = LiveBucket::fresh( + app->getBucketManager(), getAppLedgerVersion(app), {}, live, + dead, /*countMergeEvents=*/true, clock.getIOContext(), + /*doFsync=*/true); + // Bucket is referenced by b1, b2, b3, b4 and the BucketManager. + CHECK(b1.use_count() == 5); + } - SECTION("BucketListDB") - { - test(true); - } + // Take pointer by reference to not mess up use_count() + auto dropBucket = [&](std::shared_ptr& b) { + std::string filename = b->getFilename().string(); + std::string indexFilename = + app->getBucketManager().bucketIndexFilename(b->getHash()); + CHECK(fs::exists(filename)); + CHECK(fs::exists(indexFilename)); - SECTION("SQL") - { - test(false); - } + b.reset(); + app->getBucketManager().forgetUnreferencedBuckets(); + CHECK(!fs::exists(filename)); + CHECK(!fs::exists(indexFilename)); + }; + + // Bucket is now only referenced by b1 and the BucketManager. + CHECK(b1.use_count() == 2); + + // Drop bucket ourselves then purge bucketManager. + dropBucket(b1); + + // Try adding a bucket to the BucketManager's bucketlist + auto& bl = app->getBucketManager().getLiveBucketList(); + bl.addBatch(*app, 1, getAppLedgerVersion(app), {}, live, dead); + clearFutures(app, bl); + b1 = bl.getLevel(0).getCurr(); + + // Bucket should be referenced by bucketlist itself, BucketManager + // cache and b1. + CHECK(b1.use_count() == 3); + + // This shouldn't change if we forget unreferenced buckets since + // it's referenced by bucketlist. + app->getBucketManager().forgetUnreferencedBuckets(); + CHECK(b1.use_count() == 3); + + // But if we mutate the curr bucket of the bucketlist, it should. + live[0] = LedgerTestUtils::generateValidLedgerEntryWithExclusions( + {CONFIG_SETTING}); + bl.addBatch(*app, 1, getAppLedgerVersion(app), {}, live, dead); + clearFutures(app, bl); + CHECK(b1.use_count() == 2); + + // Drop it again. + dropBucket(b1); + }); } TEST_CASE("bucketmanager missing buckets fail", "[bucket][bucketmanager]") diff --git a/src/catchup/ApplyBucketsWork.cpp b/src/catchup/ApplyBucketsWork.cpp index 099f10d70d..8358384018 100644 --- a/src/catchup/ApplyBucketsWork.cpp +++ b/src/catchup/ApplyBucketsWork.cpp @@ -104,7 +104,8 @@ ApplyBucketsWork::doReset() // The current size of this set is 1.6 million during BucketApply // (as of 12/20/23). There's not a great way to estimate this, so // reserving with some extra wiggle room - mSeenKeys.reserve(2'000'000); + static const size_t estimatedOfferKeyCount = 2'000'000; + mSeenKeys.reserve(estimatedOfferKeyCount); auto addBucket = [this](std::shared_ptr const& bucket) { if (bucket->getSize() > 0) diff --git a/src/database/Database.cpp b/src/database/Database.cpp index f61c2b5c9a..73cccdb6f6 100644 --- a/src/database/Database.cpp +++ b/src/database/Database.cpp @@ -252,48 +252,12 @@ Database::upgradeToCurrentSchema() putSchemaVersion(vers); } - // Tx meta column no longer supported - dropTxMetaIfExists(); maybeUpgradeToBucketListDB(); CLOG_INFO(Database, "DB schema is in current version"); releaseAssert(vers == SCHEMA_VERSION); } -void -Database::dropTxMetaIfExists() -{ - int txMetaExists{}; - std::string selectStr; - if (isSqlite()) - { - selectStr = "SELECT EXISTS (" - "SELECT 1 " - "FROM pragma_table_info('txhistory') " - "WHERE name = 'txmeta');"; - } - else - { - selectStr = "SELECT EXISTS (" - "SELECT 1 " - "FROM information_schema.columns " - "WHERE " - "table_name = 'txhistory' AND " - "column_name = 'txmeta');"; - } - - auto& st = getPreparedStatement(selectStr).statement(); - st.exchange(soci::into(txMetaExists)); - st.define_and_bind(); - st.execute(true); - - if (txMetaExists) - { - CLOG_INFO(Database, "Dropping txmeta column from txhistory table"); - getSession() << "ALTER TABLE txhistory DROP COLUMN txmeta;"; - } -} - void Database::maybeUpgradeToBucketListDB() { diff --git a/src/ledger/LedgerTxn.cpp b/src/ledger/LedgerTxn.cpp index cf322d447d..f2d6c1e79d 100644 --- a/src/ledger/LedgerTxn.cpp +++ b/src/ledger/LedgerTxn.cpp @@ -2015,66 +2015,11 @@ LedgerTxn::deleteOffersModifiedOnOrAfterLedger(uint32_t ledger) const } void -LedgerTxn::dropAccounts(bool rebuild) -{ - throw std::runtime_error("called dropAccounts on non-root LedgerTxn"); -} - -void -LedgerTxn::dropData(bool rebuild) -{ - throw std::runtime_error("called dropData on non-root LedgerTxn"); -} - -void -LedgerTxn::dropOffers(bool rebuild) +LedgerTxn::dropOffers() { throw std::runtime_error("called dropOffers on non-root LedgerTxn"); } -void -LedgerTxn::dropTrustLines(bool rebuild) -{ - throw std::runtime_error("called dropTrustLines on non-root LedgerTxn"); -} - -void -LedgerTxn::dropClaimableBalances(bool rebuild) -{ - throw std::runtime_error( - "called dropClaimableBalances on non-root LedgerTxn"); -} - -void -LedgerTxn::dropLiquidityPools(bool rebuild) -{ - throw std::runtime_error("called dropLiquidityPools on non-root LedgerTxn"); -} - -void -LedgerTxn::dropContractData(bool rebuild) -{ - throw std::runtime_error("called dropContractData on non-root LedgerTxn"); -} - -void -LedgerTxn::dropContractCode(bool rebuild) -{ - throw std::runtime_error("called dropContractCode on non-root LedgerTxn"); -} - -void -LedgerTxn::dropConfigSettings(bool rebuild) -{ - throw std::runtime_error("called dropConfigSettings on non-root LedgerTxn"); -} - -void -LedgerTxn::dropTTL(bool rebuild) -{ - throw std::runtime_error("called dropTTL on non-root LedgerTxn"); -} - double LedgerTxn::getPrefetchHitRate() const { @@ -2717,36 +2662,6 @@ LedgerTxnRoot::Impl::commitChild(EntryIterator iter, mPrefetchMisses = 0; } -std::string -LedgerTxnRoot::Impl::tableFromLedgerEntryType(LedgerEntryType let) -{ - switch (let) - { - case ACCOUNT: - return "accounts"; - case DATA: - return "accountdata"; - case OFFER: - return "offers"; - case TRUSTLINE: - return "trustlines"; - case CLAIMABLE_BALANCE: - return "claimablebalance"; - case LIQUIDITY_POOL: - return "liquiditypool"; - case CONTRACT_DATA: - return "contractdata"; - case CONTRACT_CODE: - return "contractcode"; - case CONFIG_SETTING: - return "configsettings"; - case TTL: - return "ttl"; - default: - throw std::runtime_error("Unknown ledger entry type"); - } -} - uint64_t LedgerTxnRoot::countOffers(LedgerRange const& ledgers) const { @@ -2783,69 +2698,14 @@ LedgerTxnRoot::Impl::deleteOffersModifiedOnOrAfterLedger(uint32_t ledger) const mEntryCache.clear(); mBestOffers.clear(); - std::string query = "DELETE FROM " + tableFromLedgerEntryType(OFFER) + - " WHERE lastmodified >= :v1"; + std::string query = "DELETE FROM offers WHERE lastmodified >= :v1"; mApp.getDatabase().getSession() << query, use(ledger); } void -LedgerTxnRoot::dropAccounts(bool rebuild) -{ - mImpl->dropAccounts(rebuild); -} - -void -LedgerTxnRoot::dropData(bool rebuild) -{ - mImpl->dropData(rebuild); -} - -void -LedgerTxnRoot::dropOffers(bool rebuild) -{ - mImpl->dropOffers(rebuild); -} - -void -LedgerTxnRoot::dropTrustLines(bool rebuild) -{ - mImpl->dropTrustLines(rebuild); -} - -void -LedgerTxnRoot::dropClaimableBalances(bool rebuild) -{ - mImpl->dropClaimableBalances(rebuild); -} - -void -LedgerTxnRoot::dropLiquidityPools(bool rebuild) -{ - mImpl->dropLiquidityPools(rebuild); -} - -void -LedgerTxnRoot::dropContractData(bool rebuild) -{ - mImpl->dropContractData(rebuild); -} - -void -LedgerTxnRoot::dropContractCode(bool rebuild) -{ - mImpl->dropContractCode(rebuild); -} - -void -LedgerTxnRoot::dropConfigSettings(bool rebuild) -{ - mImpl->dropConfigSettings(rebuild); -} - -void -LedgerTxnRoot::dropTTL(bool rebuild) +LedgerTxnRoot::dropOffers() { - mImpl->dropTTL(rebuild); + mImpl->dropOffers(); } uint32_t diff --git a/src/ledger/LedgerTxn.h b/src/ledger/LedgerTxn.h index 1c1cd4385b..a89ac6bac8 100644 --- a/src/ledger/LedgerTxn.h +++ b/src/ledger/LedgerTxn.h @@ -474,45 +474,9 @@ class AbstractLedgerTxnParent // when called on anything other than a (real or stub) root LedgerTxn. virtual void deleteOffersModifiedOnOrAfterLedger(uint32_t ledger) const = 0; - // Delete all account ledger entries in the database. Will throw when called - // on anything other than a (real or stub) root LedgerTxn. - virtual void dropAccounts(bool rebuild) = 0; - - // Delete all account-data ledger entries. Will throw when called on - // anything other than a (real or stub) root LedgerTxn. - virtual void dropData(bool rebuild) = 0; - // Delete all offer ledger entries. Will throw when called on anything other // than a (real or stub) root LedgerTxn. - virtual void dropOffers(bool rebuild) = 0; - - // Delete all trustline ledger entries. Will throw when called on anything - // other than a (real or stub) root LedgerTxn. - virtual void dropTrustLines(bool rebuild) = 0; - - // Delete all claimable balance ledger entries. Will throw when called on - // anything other than a (real or stub) root LedgerTxn. - virtual void dropClaimableBalances(bool rebuild) = 0; - - // Delete all liquidity pool ledger entries. Will throw when called on - // anything other than a (real or stub) root LedgerTxn. - virtual void dropLiquidityPools(bool rebuild) = 0; - - // Delete all contract data ledger entries. Will throw when called on - // anything other than a (real or stub) root LedgerTxn. - virtual void dropContractData(bool rebuild) = 0; - - // Delete all contract code ledger entries. Will throw when called on - // anything other than a (real or stub) root LedgerTxn. - virtual void dropContractCode(bool rebuild) = 0; - - // Delete all config setting ledger entries. Will throw when called on - // anything other than a (real or stub) root LedgerTxn. - virtual void dropConfigSettings(bool rebuild) = 0; - - // Delete all ttl ledger entries. Will throw when called on - // anything other than a (real or stub) root LedgerTxn. - virtual void dropTTL(bool rebuild) = 0; + virtual void dropOffers() = 0; // Return the current cache hit rate for prefetched ledger entries, as a // fraction from 0.0 to 1.0. Will throw when called on anything other than a @@ -813,16 +777,7 @@ class LedgerTxn : public AbstractLedgerTxn uint64_t countOffers(LedgerRange const& ledgers) const override; void deleteOffersModifiedOnOrAfterLedger(uint32_t ledger) const override; - void dropAccounts(bool rebuild) override; - void dropData(bool rebuild) override; - void dropOffers(bool rebuild) override; - void dropTrustLines(bool rebuild) override; - void dropClaimableBalances(bool rebuild) override; - void dropLiquidityPools(bool rebuild) override; - void dropContractData(bool rebuild) override; - void dropContractCode(bool rebuild) override; - void dropConfigSettings(bool rebuild) override; - void dropTTL(bool rebuild) override; + void dropOffers() override; double getPrefetchHitRate() const override; uint32_t prefetchClassic(UnorderedSet const& keys) override; @@ -877,16 +832,7 @@ class LedgerTxnRoot : public AbstractLedgerTxnParent void deleteOffersModifiedOnOrAfterLedger(uint32_t ledger) const override; - void dropAccounts(bool rebuild) override; - void dropData(bool rebuild) override; - void dropOffers(bool rebuild) override; - void dropTrustLines(bool rebuild) override; - void dropClaimableBalances(bool rebuild) override; - void dropLiquidityPools(bool rebuild) override; - void dropContractData(bool rebuild) override; - void dropContractCode(bool rebuild) override; - void dropConfigSettings(bool rebuild) override; - void dropTTL(bool rebuild) override; + void dropOffers() override; #ifdef BUILD_TESTS void resetForFuzzer() override; diff --git a/src/ledger/LedgerTxnAccountSQL.cpp b/src/ledger/LedgerTxnAccountSQL.cpp deleted file mode 100644 index 3313917893..0000000000 --- a/src/ledger/LedgerTxnAccountSQL.cpp +++ /dev/null @@ -1,679 +0,0 @@ -// Copyright 2018 Stellar Development Foundation and contributors. Licensed -// under the Apache License, Version 2.0. See the COPYING file at the root -// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 - -#include "crypto/KeyUtils.h" -#include "crypto/SecretKey.h" -#include "crypto/SignerKey.h" -#include "database/Database.h" -#include "database/DatabaseTypeSpecificOperation.h" -#include "ledger/LedgerTxnImpl.h" -#include "ledger/LedgerTypeUtils.h" -#include "main/Application.h" -#include "util/Decoder.h" -#include "util/GlobalChecks.h" -#include "util/Logging.h" -#include "util/XDROperators.h" -#include "util/types.h" -#include "xdrpp/marshal.h" -#include - -namespace stellar -{ - -std::shared_ptr -LedgerTxnRoot::Impl::loadAccount(LedgerKey const& key) const -{ - ZoneScoped; - std::string actIDStrKey = KeyUtils::toStrKey(key.account().accountID); - - std::string inflationDest, homeDomain, thresholds, signers; - soci::indicator inflationDestInd, signersInd; - std::string extensionStr; - soci::indicator extensionInd; - std::string ledgerExtStr; - soci::indicator ledgerExtInd; - - LedgerEntry le; - le.data.type(ACCOUNT); - auto& account = le.data.account(); - - auto prep = mApp.getDatabase().getPreparedStatement( - "SELECT balance, seqnum, numsubentries, " - "inflationdest, homedomain, thresholds, " - "flags, lastmodified, " - "signers, extension, " - "ledgerext FROM accounts WHERE accountid=:v1"); - auto& st = prep.statement(); - st.exchange(soci::into(account.balance)); - st.exchange(soci::into(account.seqNum)); - st.exchange(soci::into(account.numSubEntries)); - st.exchange(soci::into(inflationDest, inflationDestInd)); - st.exchange(soci::into(homeDomain)); - st.exchange(soci::into(thresholds)); - st.exchange(soci::into(account.flags)); - st.exchange(soci::into(le.lastModifiedLedgerSeq)); - st.exchange(soci::into(signers, signersInd)); - st.exchange(soci::into(extensionStr, extensionInd)); - st.exchange(soci::into(ledgerExtStr, ledgerExtInd)); - st.exchange(soci::use(actIDStrKey)); - st.define_and_bind(); - { - auto timer = mApp.getDatabase().getSelectTimer("account"); - st.execute(true); - } - if (!st.got_data()) - { - return nullptr; - } - - account.accountID = key.account().accountID; - decoder::decode_b64(homeDomain, account.homeDomain); - - bn::decode_b64(thresholds.begin(), thresholds.end(), - account.thresholds.begin()); - - if (inflationDestInd == soci::i_ok) - { - account.inflationDest.activate() = - KeyUtils::fromStrKey(inflationDest); - } - - if (signersInd == soci::i_ok) - { - std::vector signersOpaque; - decoder::decode_b64(signers, signersOpaque); - xdr::xdr_from_opaque(signersOpaque, account.signers); - releaseAssert( - std::adjacent_find(account.signers.begin(), account.signers.end(), - [](Signer const& lhs, Signer const& rhs) { - return !(lhs.key < rhs.key); - }) == account.signers.end()); - } - - decodeOpaqueXDR(extensionStr, extensionInd, account.ext); - - decodeOpaqueXDR(ledgerExtStr, ledgerExtInd, le.ext); - - return std::make_shared(std::move(le)); -} - -std::vector -LedgerTxnRoot::Impl::loadInflationWinners(size_t maxWinners, - int64_t minBalance) const -{ - InflationWinner w; - std::string inflationDest; - - auto prep = mApp.getDatabase().getPreparedStatement( - "SELECT sum(balance) AS votes, inflationdest" - " FROM accounts WHERE inflationdest IS NOT NULL" - " AND balance >= 1000000000 GROUP BY inflationdest" - " ORDER BY votes DESC, inflationdest DESC LIMIT :lim"); - auto& st = prep.statement(); - st.exchange(soci::into(w.votes)); - st.exchange(soci::into(inflationDest)); - st.exchange(soci::use(maxWinners)); - st.define_and_bind(); - st.execute(true); - - std::vector winners; - while (st.got_data()) - { - w.accountID = KeyUtils::fromStrKey(inflationDest); - if (w.votes < minBalance) - { - break; - } - winners.push_back(w); - st.fetch(); - } - return winners; -} - -class BulkUpsertAccountsOperation : public DatabaseTypeSpecificOperation -{ - Database& mDB; - std::vector mAccountIDs; - std::vector mBalances; - std::vector mSeqNums; - std::vector mSubEntryNums; - std::vector mInflationDests; - std::vector mInflationDestInds; - std::vector mFlags; - std::vector mHomeDomains; - std::vector mThresholds; - std::vector mSigners; - std::vector mSignerInds; - std::vector mLastModifieds; - std::vector mExtensions; - std::vector mExtensionInds; - std::vector mLedgerExtensions; - - public: - BulkUpsertAccountsOperation(Database& DB, - std::vector const& entries) - : mDB(DB) - { - mAccountIDs.reserve(entries.size()); - mBalances.reserve(entries.size()); - mSeqNums.reserve(entries.size()); - mSubEntryNums.reserve(entries.size()); - mInflationDests.reserve(entries.size()); - mInflationDestInds.reserve(entries.size()); - mFlags.reserve(entries.size()); - mHomeDomains.reserve(entries.size()); - mThresholds.reserve(entries.size()); - mSigners.reserve(entries.size()); - mSignerInds.reserve(entries.size()); - mLastModifieds.reserve(entries.size()); - mExtensions.reserve(entries.size()); - mExtensionInds.reserve(entries.size()); - mLedgerExtensions.reserve(entries.size()); - - for (auto const& e : entries) - { - releaseAssert(e.entryExists()); - releaseAssert(e.entry().type() == - InternalLedgerEntryType::LEDGER_ENTRY); - auto const& le = e.entry().ledgerEntry(); - releaseAssert(le.data.type() == ACCOUNT); - auto const& account = le.data.account(); - mAccountIDs.emplace_back(KeyUtils::toStrKey(account.accountID)); - mBalances.emplace_back(account.balance); - mSeqNums.emplace_back(account.seqNum); - mSubEntryNums.emplace_back(unsignedToSigned(account.numSubEntries)); - - if (account.inflationDest) - { - mInflationDests.emplace_back( - KeyUtils::toStrKey(*account.inflationDest)); - mInflationDestInds.emplace_back(soci::i_ok); - } - else - { - mInflationDests.emplace_back(""); - mInflationDestInds.emplace_back(soci::i_null); - } - mFlags.emplace_back(unsignedToSigned(account.flags)); - mHomeDomains.emplace_back(decoder::encode_b64(account.homeDomain)); - mThresholds.emplace_back(decoder::encode_b64(account.thresholds)); - if (account.signers.empty()) - { - mSigners.emplace_back(""); - mSignerInds.emplace_back(soci::i_null); - } - else - { - mSigners.emplace_back( - decoder::encode_b64(xdr::xdr_to_opaque(account.signers))); - mSignerInds.emplace_back(soci::i_ok); - } - mLastModifieds.emplace_back( - unsignedToSigned(le.lastModifiedLedgerSeq)); - - if (account.ext.v() >= 1) - { - mExtensions.emplace_back( - decoder::encode_b64(xdr::xdr_to_opaque(account.ext))); - mExtensionInds.emplace_back(soci::i_ok); - } - else - { - mExtensions.emplace_back(""); - mExtensionInds.emplace_back(soci::i_null); - } - - mLedgerExtensions.emplace_back( - decoder::encode_b64(xdr::xdr_to_opaque(le.ext))); - } - } - - void - doSociGenericOperation() - { - std::string sql = - "INSERT INTO accounts ( " - "accountid, balance, seqnum, numsubentries, inflationdest," - "homedomain, thresholds, signers, flags, lastmodified, " - "extension, ledgerext " - ") VALUES ( " - ":id, :v1, :v2, :v3, :v4, :v5, :v6, :v7, :v8, :v9, :v10, :v11 " - ") ON CONFLICT (accountid) DO UPDATE SET " - "balance = excluded.balance, " - "seqnum = excluded.seqnum, " - "numsubentries = excluded.numsubentries, " - "inflationdest = excluded.inflationdest, " - "homedomain = excluded.homedomain, " - "thresholds = excluded.thresholds, " - "signers = excluded.signers, " - "flags = excluded.flags, " - "lastmodified = excluded.lastmodified, " - "extension = excluded.extension, " - "ledgerext = excluded.ledgerext"; - auto prep = mDB.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(mAccountIDs)); - st.exchange(soci::use(mBalances)); - st.exchange(soci::use(mSeqNums)); - st.exchange(soci::use(mSubEntryNums)); - st.exchange(soci::use(mInflationDests, mInflationDestInds)); - st.exchange(soci::use(mHomeDomains)); - st.exchange(soci::use(mThresholds)); - st.exchange(soci::use(mSigners, mSignerInds)); - st.exchange(soci::use(mFlags)); - st.exchange(soci::use(mLastModifieds)); - st.exchange(soci::use(mExtensions, mExtensionInds)); - st.exchange(soci::use(mLedgerExtensions)); - st.define_and_bind(); - { - auto timer = mDB.getUpsertTimer("account"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mAccountIDs.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } - - void - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - doSociGenericOperation(); - } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strAccountIDs, strBalances, strSeqNums, strSubEntryNums, - strInflationDests, strFlags, strHomeDomains, strThresholds, - strSigners, strLastModifieds, strExtensions, strLedgerExtensions; - - PGconn* conn = pg->conn_; - marshalToPGArray(conn, strAccountIDs, mAccountIDs); - marshalToPGArray(conn, strBalances, mBalances); - marshalToPGArray(conn, strSeqNums, mSeqNums); - marshalToPGArray(conn, strSubEntryNums, mSubEntryNums); - marshalToPGArray(conn, strInflationDests, mInflationDests, - &mInflationDestInds); - marshalToPGArray(conn, strFlags, mFlags); - marshalToPGArray(conn, strHomeDomains, mHomeDomains); - marshalToPGArray(conn, strThresholds, mThresholds); - marshalToPGArray(conn, strSigners, mSigners, &mSignerInds); - marshalToPGArray(conn, strLastModifieds, mLastModifieds); - marshalToPGArray(conn, strExtensions, mExtensions, &mExtensionInds); - marshalToPGArray(conn, strLedgerExtensions, mLedgerExtensions); - - std::string sql = "WITH r AS (SELECT " - "unnest(:ids::TEXT[]), " - "unnest(:v1::BIGINT[]), " - "unnest(:v2::BIGINT[]), " - "unnest(:v3::INT[]), " - "unnest(:v4::TEXT[]), " - "unnest(:v5::TEXT[]), " - "unnest(:v6::TEXT[]), " - "unnest(:v7::TEXT[]), " - "unnest(:v8::INT[]), " - "unnest(:v9::INT[]), " - "unnest(:v10::TEXT[]), " - "unnest(:v11::TEXT[]) " - ")" - "INSERT INTO accounts ( " - "accountid, balance, seqnum, " - "numsubentries, inflationdest, homedomain, " - "thresholds, signers, " - "flags, lastmodified, extension, " - "ledgerext " - ") SELECT * FROM r " - "ON CONFLICT (accountid) DO UPDATE SET " - "balance = excluded.balance, " - "seqnum = excluded.seqnum, " - "numsubentries = excluded.numsubentries, " - "inflationdest = excluded.inflationdest, " - "homedomain = excluded.homedomain, " - "thresholds = excluded.thresholds, " - "signers = excluded.signers, " - "flags = excluded.flags, " - "lastmodified = excluded.lastmodified, " - "extension = excluded.extension, " - "ledgerext = excluded.ledgerext"; - auto prep = mDB.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(strAccountIDs)); - st.exchange(soci::use(strBalances)); - st.exchange(soci::use(strSeqNums)); - st.exchange(soci::use(strSubEntryNums)); - st.exchange(soci::use(strInflationDests)); - st.exchange(soci::use(strHomeDomains)); - st.exchange(soci::use(strThresholds)); - st.exchange(soci::use(strSigners)); - st.exchange(soci::use(strFlags)); - st.exchange(soci::use(strLastModifieds)); - st.exchange(soci::use(strExtensions)); - st.exchange(soci::use(strLedgerExtensions)); - st.define_and_bind(); - { - auto timer = mDB.getUpsertTimer("account"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mAccountIDs.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif -}; - -class BulkDeleteAccountsOperation : public DatabaseTypeSpecificOperation -{ - Database& mDB; - LedgerTxnConsistency mCons; - std::vector mAccountIDs; - - public: - BulkDeleteAccountsOperation(Database& DB, LedgerTxnConsistency cons, - std::vector const& entries) - : mDB(DB), mCons(cons) - { - for (auto const& e : entries) - { - releaseAssert(!e.entryExists()); - releaseAssert(e.key().type() == - InternalLedgerEntryType::LEDGER_ENTRY); - releaseAssert(e.key().ledgerKey().type() == ACCOUNT); - auto const& account = e.key().ledgerKey().account(); - mAccountIDs.emplace_back(KeyUtils::toStrKey(account.accountID)); - } - } - - void - doSociGenericOperation() - { - std::string sql = "DELETE FROM accounts WHERE accountid = :id"; - auto prep = mDB.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(mAccountIDs)); - st.define_and_bind(); - { - auto timer = mDB.getDeleteTimer("account"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mAccountIDs.size() && - mCons == LedgerTxnConsistency::EXACT) - { - throw std::runtime_error("Could not update data in SQL"); - } - } - - void - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - doSociGenericOperation(); - } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - PGconn* conn = pg->conn_; - std::string strAccountIDs; - marshalToPGArray(conn, strAccountIDs, mAccountIDs); - std::string sql = - "WITH r AS (SELECT unnest(:ids::TEXT[])) " - "DELETE FROM accounts WHERE accountid IN (SELECT * FROM r)"; - auto prep = mDB.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(strAccountIDs)); - st.define_and_bind(); - { - auto timer = mDB.getDeleteTimer("account"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mAccountIDs.size() && - mCons == LedgerTxnConsistency::EXACT) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif -}; - -void -LedgerTxnRoot::Impl::bulkUpsertAccounts( - std::vector const& entries) -{ - ZoneScoped; - ZoneValue(static_cast(entries.size())); - BulkUpsertAccountsOperation op(mApp.getDatabase(), entries); - mApp.getDatabase().doDatabaseTypeSpecificOperation(op); -} - -void -LedgerTxnRoot::Impl::bulkDeleteAccounts( - std::vector const& entries, LedgerTxnConsistency cons) -{ - ZoneScoped; - ZoneValue(static_cast(entries.size())); - BulkDeleteAccountsOperation op(mApp.getDatabase(), cons, entries); - mApp.getDatabase().doDatabaseTypeSpecificOperation(op); -} - -void -LedgerTxnRoot::Impl::dropAccounts(bool rebuild) -{ - throwIfChild(); - mEntryCache.clear(); - mBestOffers.clear(); - - mApp.getDatabase().getSession() << "DROP TABLE IF EXISTS accounts;"; - mApp.getDatabase().getSession() << "DROP TABLE IF EXISTS signers;"; - - if (rebuild) - { - std::string coll = mApp.getDatabase().getSimpleCollationClause(); - - mApp.getDatabase().getSession() - << "CREATE TABLE accounts" - << "(" - << "accountid VARCHAR(56) " << coll << " PRIMARY KEY," - << "balance BIGINT NOT NULL CHECK (balance >= 0)," - "buyingliabilities BIGINT CHECK (buyingliabilities >= 0)," - "sellingliabilities BIGINT CHECK (sellingliabilities >= 0)," - "seqnum BIGINT NOT NULL," - "numsubentries INT NOT NULL CHECK (numsubentries " - ">= 0)," - "inflationdest VARCHAR(56)," - "homedomain VARCHAR(44) NOT NULL," - "thresholds TEXT NOT NULL," - "flags INT NOT NULL," - "signers TEXT," - "lastmodified INT NOT NULL," - "extension TEXT," - "ledgerext TEXT NOT NULL" - ");"; - if (!mApp.getDatabase().isSqlite()) - { - mApp.getDatabase().getSession() << "ALTER TABLE accounts " - << "ALTER COLUMN accountid " - << "TYPE VARCHAR(56) COLLATE \"C\""; - } - } -} - -class BulkLoadAccountsOperation - : public DatabaseTypeSpecificOperation> -{ - Database& mDb; - std::vector mAccountIDs; - - std::vector - executeAndFetch(soci::statement& st) - { - std::string accountID, inflationDest, homeDomain, thresholds, signers; - int64_t balance; - uint64_t seqNum; - uint32_t numSubEntries, flags, lastModified; - std::string extension; - soci::indicator inflationDestInd, signersInd, extensionInd; - std::string ledgerExtension; - soci::indicator ledgerExtInd; - - st.exchange(soci::into(accountID)); - st.exchange(soci::into(balance)); - st.exchange(soci::into(seqNum)); - st.exchange(soci::into(numSubEntries)); - st.exchange(soci::into(inflationDest, inflationDestInd)); - st.exchange(soci::into(homeDomain)); - st.exchange(soci::into(thresholds)); - st.exchange(soci::into(flags)); - st.exchange(soci::into(lastModified)); - st.exchange(soci::into(extension, extensionInd)); - st.exchange(soci::into(signers, signersInd)); - st.exchange(soci::into(ledgerExtension, ledgerExtInd)); - st.define_and_bind(); - { - auto timer = mDb.getSelectTimer("account"); - st.execute(true); - } - - std::vector res; - while (st.got_data()) - { - res.emplace_back(); - auto& le = res.back(); - le.data.type(ACCOUNT); - auto& ae = le.data.account(); - - ae.accountID = KeyUtils::fromStrKey(accountID); - ae.balance = balance; - ae.seqNum = seqNum; - ae.numSubEntries = numSubEntries; - - if (inflationDestInd == soci::i_ok) - { - ae.inflationDest.activate() = - KeyUtils::fromStrKey(inflationDest); - } - - decoder::decode_b64(homeDomain, ae.homeDomain); - - bn::decode_b64(thresholds.begin(), thresholds.end(), - ae.thresholds.begin()); - - if (inflationDestInd == soci::i_ok) - { - ae.inflationDest.activate() = - KeyUtils::fromStrKey(inflationDest); - } - - ae.flags = flags; - le.lastModifiedLedgerSeq = lastModified; - - decodeOpaqueXDR(extension, extensionInd, ae.ext); - - if (signersInd == soci::i_ok) - { - std::vector signersOpaque; - decoder::decode_b64(signers, signersOpaque); - xdr::xdr_from_opaque(signersOpaque, ae.signers); - releaseAssert(std::adjacent_find( - ae.signers.begin(), ae.signers.end(), - [](Signer const& lhs, Signer const& rhs) { - return !(lhs.key < rhs.key); - }) == ae.signers.end()); - } - - decodeOpaqueXDR(ledgerExtension, ledgerExtInd, le.ext); - - st.fetch(); - } - return res; - } - - public: - BulkLoadAccountsOperation(Database& db, UnorderedSet const& keys) - : mDb(db) - { - mAccountIDs.reserve(keys.size()); - for (auto const& k : keys) - { - releaseAssert(k.type() == ACCOUNT); - mAccountIDs.emplace_back(KeyUtils::toStrKey(k.account().accountID)); - } - } - - virtual std::vector - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - std::vector accountIDcstrs; - accountIDcstrs.reserve(mAccountIDs.size()); - for (auto const& acc : mAccountIDs) - { - accountIDcstrs.emplace_back(acc.c_str()); - } - - std::string sql = - "SELECT accountid, balance, seqnum, numsubentries, " - "inflationdest, homedomain, thresholds, flags, lastmodified, " - "extension, signers, ledgerext" - " FROM accounts " - "WHERE accountid IN carray(?, ?, 'char*')"; - - auto prep = mDb.getPreparedStatement(sql); - auto be = prep.statement().get_backend(); - if (be == nullptr) - { - throw std::runtime_error("no sql backend"); - } - auto sqliteStatement = - dynamic_cast(be); - releaseAssertOrThrow(sqliteStatement); - auto st = sqliteStatement->stmt_; - - sqlite3_reset(st); - sqlite3_bind_pointer(st, 1, accountIDcstrs.data(), "carray", 0); - sqlite3_bind_int(st, 2, static_cast(accountIDcstrs.size())); - return executeAndFetch(prep.statement()); - } - -#ifdef USE_POSTGRES - virtual std::vector - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strAccountIDs; - marshalToPGArray(pg->conn_, strAccountIDs, mAccountIDs); - - std::string sql = - "WITH r AS (SELECT unnest(:v1::TEXT[])) " - "SELECT accountid, balance, seqnum, numsubentries, " - "inflationdest, homedomain, thresholds, flags, lastmodified, " - "extension, signers, ledgerext" - " FROM accounts " - "WHERE accountid IN (SELECT * FROM r)"; - - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(strAccountIDs)); - return executeAndFetch(st); - } -#endif -}; - -UnorderedMap> -LedgerTxnRoot::Impl::bulkLoadAccounts(UnorderedSet const& keys) const -{ - ZoneScoped; - ZoneValue(static_cast(keys.size())); - if (!keys.empty()) - { - BulkLoadAccountsOperation op(mApp.getDatabase(), keys); - return populateLoadedEntries( - keys, mApp.getDatabase().doDatabaseTypeSpecificOperation(op)); - } - else - { - return {}; - } -} -} diff --git a/src/ledger/LedgerTxnClaimableBalanceSQL.cpp b/src/ledger/LedgerTxnClaimableBalanceSQL.cpp deleted file mode 100644 index 365efe95bf..0000000000 --- a/src/ledger/LedgerTxnClaimableBalanceSQL.cpp +++ /dev/null @@ -1,374 +0,0 @@ -// Copyright 2020 Stellar Development Foundation and contributors. Licensed -// under the Apache License, Version 2.0. See the COPYING file at the root -// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 - -#include "ledger/LedgerTxnImpl.h" -#include "ledger/LedgerTypeUtils.h" -#include "main/Application.h" -#include "util/GlobalChecks.h" -#include "util/types.h" - -namespace stellar -{ - -std::shared_ptr -LedgerTxnRoot::Impl::loadClaimableBalance(LedgerKey const& key) const -{ - auto balanceID = toOpaqueBase64(key.claimableBalance().balanceID); - - std::string claimableBalanceEntryStr; - LedgerEntry le; - - std::string sql = "SELECT ledgerentry " - "FROM claimablebalance " - "WHERE balanceid= :balanceid"; - auto prep = mApp.getDatabase().getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::into(claimableBalanceEntryStr)); - st.exchange(soci::use(balanceID)); - st.define_and_bind(); - st.execute(true); - if (!st.got_data()) - { - return nullptr; - } - - fromOpaqueBase64(le, claimableBalanceEntryStr); - releaseAssert(le.data.type() == CLAIMABLE_BALANCE); - - return std::make_shared(std::move(le)); -} - -class BulkLoadClaimableBalanceOperation - : public DatabaseTypeSpecificOperation> -{ - Database& mDb; - std::vector mBalanceIDs; - - std::vector - executeAndFetch(soci::statement& st) - { - std::string balanceIdStr, claimableBalanceEntryStr; - - st.exchange(soci::into(balanceIdStr)); - st.exchange(soci::into(claimableBalanceEntryStr)); - st.define_and_bind(); - { - auto timer = mDb.getSelectTimer("claimablebalance"); - st.execute(true); - } - - std::vector res; - while (st.got_data()) - { - res.emplace_back(); - auto& le = res.back(); - - fromOpaqueBase64(le, claimableBalanceEntryStr); - releaseAssert(le.data.type() == CLAIMABLE_BALANCE); - - st.fetch(); - } - return res; - } - - public: - BulkLoadClaimableBalanceOperation(Database& db, - UnorderedSet const& keys) - : mDb(db) - { - mBalanceIDs.reserve(keys.size()); - for (auto const& k : keys) - { - releaseAssert(k.type() == CLAIMABLE_BALANCE); - mBalanceIDs.emplace_back( - toOpaqueBase64(k.claimableBalance().balanceID)); - } - } - - std::vector - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - std::vector cstrBalanceIDs; - cstrBalanceIDs.reserve(mBalanceIDs.size()); - for (size_t i = 0; i < mBalanceIDs.size(); ++i) - { - cstrBalanceIDs.emplace_back(mBalanceIDs[i].c_str()); - } - - std::string sql = "WITH r AS (SELECT value FROM carray(?, ?, 'char*')) " - "SELECT balanceid, ledgerentry " - "FROM claimablebalance " - "WHERE balanceid IN r"; - - auto prep = mDb.getPreparedStatement(sql); - auto be = prep.statement().get_backend(); - if (be == nullptr) - { - throw std::runtime_error("no sql backend"); - } - auto sqliteStatement = - dynamic_cast(be); - releaseAssertOrThrow(sqliteStatement); - auto st = sqliteStatement->stmt_; - - sqlite3_reset(st); - sqlite3_bind_pointer(st, 1, cstrBalanceIDs.data(), "carray", 0); - sqlite3_bind_int(st, 2, static_cast(cstrBalanceIDs.size())); - return executeAndFetch(prep.statement()); - } - -#ifdef USE_POSTGRES - std::vector - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strBalanceIDs; - marshalToPGArray(pg->conn_, strBalanceIDs, mBalanceIDs); - - std::string sql = "WITH r AS (SELECT unnest(:v1::TEXT[])) " - "SELECT balanceid, ledgerentry " - "FROM claimablebalance " - "WHERE balanceid IN (SELECT * from r)"; - - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(strBalanceIDs)); - return executeAndFetch(st); - } -#endif -}; - -UnorderedMap> -LedgerTxnRoot::Impl::bulkLoadClaimableBalance( - UnorderedSet const& keys) const -{ - if (!keys.empty()) - { - BulkLoadClaimableBalanceOperation op(mApp.getDatabase(), keys); - return populateLoadedEntries( - keys, mApp.getDatabase().doDatabaseTypeSpecificOperation(op)); - } - else - { - return {}; - } -} - -class BulkDeleteClaimableBalanceOperation - : public DatabaseTypeSpecificOperation -{ - Database& mDb; - LedgerTxnConsistency mCons; - std::vector mBalanceIDs; - - public: - BulkDeleteClaimableBalanceOperation( - Database& db, LedgerTxnConsistency cons, - std::vector const& entries) - : mDb(db), mCons(cons) - { - mBalanceIDs.reserve(entries.size()); - for (auto const& e : entries) - { - releaseAssert(!e.entryExists()); - releaseAssert(e.key().ledgerKey().type() == CLAIMABLE_BALANCE); - mBalanceIDs.emplace_back(toOpaqueBase64( - e.key().ledgerKey().claimableBalance().balanceID)); - } - } - - void - doSociGenericOperation() - { - std::string sql = "DELETE FROM claimablebalance WHERE balanceid = :id"; - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(mBalanceIDs)); - st.define_and_bind(); - { - auto timer = mDb.getDeleteTimer("claimablebalance"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mBalanceIDs.size() && - mCons == LedgerTxnConsistency::EXACT) - { - throw std::runtime_error("Could not update data in SQL"); - } - } - - void - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - doSociGenericOperation(); - } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strBalanceIDs; - marshalToPGArray(pg->conn_, strBalanceIDs, mBalanceIDs); - - std::string sql = "WITH r AS (SELECT unnest(:v1::TEXT[])) " - "DELETE FROM claimablebalance " - "WHERE balanceid IN (SELECT * FROM r)"; - - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(strBalanceIDs)); - st.define_and_bind(); - { - auto timer = mDb.getDeleteTimer("claimablebalance"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mBalanceIDs.size() && - mCons == LedgerTxnConsistency::EXACT) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif -}; - -void -LedgerTxnRoot::Impl::bulkDeleteClaimableBalance( - std::vector const& entries, LedgerTxnConsistency cons) -{ - BulkDeleteClaimableBalanceOperation op(mApp.getDatabase(), cons, entries); - mApp.getDatabase().doDatabaseTypeSpecificOperation(op); -} - -class BulkUpsertClaimableBalanceOperation - : public DatabaseTypeSpecificOperation -{ - Database& mDb; - std::vector mBalanceIDs; - std::vector mClaimableBalanceEntrys; - std::vector mLastModifieds; - - void - accumulateEntry(LedgerEntry const& entry) - { - releaseAssert(entry.data.type() == CLAIMABLE_BALANCE); - mBalanceIDs.emplace_back( - toOpaqueBase64(entry.data.claimableBalance().balanceID)); - mClaimableBalanceEntrys.emplace_back(toOpaqueBase64(entry)); - mLastModifieds.emplace_back( - unsignedToSigned(entry.lastModifiedLedgerSeq)); - } - - public: - BulkUpsertClaimableBalanceOperation( - Database& Db, std::vector const& entryIter) - : mDb(Db) - { - for (auto const& e : entryIter) - { - releaseAssert(e.entryExists()); - accumulateEntry(e.entry().ledgerEntry()); - } - } - - void - doSociGenericOperation() - { - std::string sql = "INSERT INTO claimablebalance " - "(balanceid, ledgerentry, lastmodified) " - "VALUES " - "( :id, :v1, :v2 ) " - "ON CONFLICT (balanceid) DO UPDATE SET " - "balanceid = excluded.balanceid, ledgerentry = " - "excluded.ledgerentry, lastmodified = " - "excluded.lastmodified"; - - auto prep = mDb.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(mBalanceIDs)); - st.exchange(soci::use(mClaimableBalanceEntrys)); - st.exchange(soci::use(mLastModifieds)); - st.define_and_bind(); - { - auto timer = mDb.getUpsertTimer("claimablebalance"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mBalanceIDs.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } - - void - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - doSociGenericOperation(); - } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strBalanceIDs, strClaimableBalanceEntry, strLastModifieds; - - PGconn* conn = pg->conn_; - marshalToPGArray(conn, strBalanceIDs, mBalanceIDs); - marshalToPGArray(conn, strClaimableBalanceEntry, - mClaimableBalanceEntrys); - marshalToPGArray(conn, strLastModifieds, mLastModifieds); - - std::string sql = "WITH r AS " - "(SELECT unnest(:ids::TEXT[]), unnest(:v1::TEXT[]), " - "unnest(:v2::INT[]))" - "INSERT INTO claimablebalance " - "(balanceid, ledgerentry, lastmodified) " - "SELECT * FROM r " - "ON CONFLICT (balanceid) DO UPDATE SET " - "balanceid = excluded.balanceid, ledgerentry = " - "excluded.ledgerentry, " - "lastmodified = excluded.lastmodified"; - - auto prep = mDb.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(strBalanceIDs)); - st.exchange(soci::use(strClaimableBalanceEntry)); - st.exchange(soci::use(strLastModifieds)); - st.define_and_bind(); - { - auto timer = mDb.getUpsertTimer("claimablebalance"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mBalanceIDs.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif -}; - -void -LedgerTxnRoot::Impl::bulkUpsertClaimableBalance( - std::vector const& entries) -{ - BulkUpsertClaimableBalanceOperation op(mApp.getDatabase(), entries); - mApp.getDatabase().doDatabaseTypeSpecificOperation(op); -} - -void -LedgerTxnRoot::Impl::dropClaimableBalances(bool rebuild) -{ - throwIfChild(); - mEntryCache.clear(); - mBestOffers.clear(); - - mApp.getDatabase().getSession() << "DROP TABLE IF EXISTS claimablebalance;"; - - if (rebuild) - { - std::string coll = mApp.getDatabase().getSimpleCollationClause(); - mApp.getDatabase().getSession() - << "CREATE TABLE claimablebalance (" - << "balanceid VARCHAR(48) " << coll << " PRIMARY KEY, " - << "ledgerentry TEXT NOT NULL, " - << "lastmodified INT NOT NULL);"; - } -} -} diff --git a/src/ledger/LedgerTxnConfigSettingSQL.cpp b/src/ledger/LedgerTxnConfigSettingSQL.cpp deleted file mode 100644 index dbfea378a7..0000000000 --- a/src/ledger/LedgerTxnConfigSettingSQL.cpp +++ /dev/null @@ -1,295 +0,0 @@ -// Copyright 2022 Stellar Development Foundation and contributors. Licensed -// under the Apache License, Version 2.0. See the COPYING file at the root -// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 - -#include "ledger/LedgerTxnImpl.h" -#include "ledger/LedgerTypeUtils.h" -#include "ledger/NonSociRelatedException.h" -#include "main/Application.h" -#include "util/GlobalChecks.h" -#include "util/types.h" - -namespace stellar -{ - -static void -throwIfNotConfigSetting(LedgerEntryType type) -{ - if (type != CONFIG_SETTING) - { - throw NonSociRelatedException("LedgerEntry is not a CONFIG_SETTING"); - } -} - -std::shared_ptr -LedgerTxnRoot::Impl::loadConfigSetting(LedgerKey const& key) const -{ - int32_t configSettingID = key.configSetting().configSettingID; - std::string configSettingEntryStr; - - std::string sql = "SELECT ledgerentry " - "FROM configsettings " - "WHERE configsettingid = :configsettingid"; - auto prep = mApp.getDatabase().getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::into(configSettingEntryStr)); - st.exchange(soci::use(configSettingID)); - st.define_and_bind(); - { - auto timer = mApp.getDatabase().getSelectTimer("configsetting"); - st.execute(true); - } - if (!st.got_data()) - { - return nullptr; - } - - LedgerEntry le; - fromOpaqueBase64(le, configSettingEntryStr); - throwIfNotConfigSetting(le.data.type()); - - return std::make_shared(std::move(le)); -} - -class bulkLoadConfigSettingsOperation - : public DatabaseTypeSpecificOperation> -{ - Database& mDb; - std::vector mConfigSettingIDs; - - std::vector - executeAndFetch(soci::statement& st) - { - std::string configSettingEntryStr; - - st.exchange(soci::into(configSettingEntryStr)); - st.define_and_bind(); - { - auto timer = mDb.getSelectTimer("configsetting"); - st.execute(true); - } - - std::vector res; - while (st.got_data()) - { - res.emplace_back(); - auto& le = res.back(); - - fromOpaqueBase64(le, configSettingEntryStr); - throwIfNotConfigSetting(le.data.type()); - - st.fetch(); - } - return res; - } - - public: - bulkLoadConfigSettingsOperation(Database& db, - UnorderedSet const& keys) - : mDb(db) - { - mConfigSettingIDs.reserve(keys.size()); - for (auto const& k : keys) - { - throwIfNotConfigSetting(k.type()); - mConfigSettingIDs.emplace_back(k.configSetting().configSettingID); - } - } - - std::vector - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - std::string sql = "WITH r AS (SELECT value FROM carray(?, ?, 'int32')) " - "SELECT ledgerentry " - "FROM configsettings " - "WHERE configsettingid IN r"; - - auto prep = mDb.getPreparedStatement(sql); - auto be = prep.statement().get_backend(); - if (be == nullptr) - { - throw std::runtime_error("no sql backend"); - } - auto sqliteStatement = - dynamic_cast(be); - releaseAssertOrThrow(sqliteStatement); - auto st = sqliteStatement->stmt_; - - sqlite3_reset(st); - sqlite3_bind_pointer(st, 1, (void*)mConfigSettingIDs.data(), "carray", - 0); - sqlite3_bind_int(st, 2, static_cast(mConfigSettingIDs.size())); - return executeAndFetch(prep.statement()); - } - -#ifdef USE_POSTGRES - std::vector - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strConfigSettingIDs; - marshalToPGArray(pg->conn_, strConfigSettingIDs, mConfigSettingIDs); - - std::string sql = "WITH r AS (SELECT unnest(:v1::INT[])) " - "SELECT ledgerentry " - "FROM configsettings " - "WHERE configsettingid IN (SELECT * from r)"; - - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(strConfigSettingIDs)); - return executeAndFetch(st); - } -#endif -}; - -UnorderedMap> -LedgerTxnRoot::Impl::bulkLoadConfigSettings( - UnorderedSet const& keys) const -{ - if (!keys.empty()) - { - bulkLoadConfigSettingsOperation op(mApp.getDatabase(), keys); - return populateLoadedEntries( - keys, mApp.getDatabase().doDatabaseTypeSpecificOperation(op)); - } - else - { - return {}; - } -} - -class bulkUpsertConfigSettingsOperation - : public DatabaseTypeSpecificOperation -{ - Database& mDb; - std::vector mConfigSettingIDs; - std::vector mConfigSettingEntries; - std::vector mLastModifieds; - - void - accumulateEntry(LedgerEntry const& entry) - { - throwIfNotConfigSetting(entry.data.type()); - - mConfigSettingIDs.emplace_back( - entry.data.configSetting().configSettingID()); - mConfigSettingEntries.emplace_back(toOpaqueBase64(entry)); - mLastModifieds.emplace_back( - unsignedToSigned(entry.lastModifiedLedgerSeq)); - } - - public: - bulkUpsertConfigSettingsOperation( - Database& Db, std::vector const& entryIter) - : mDb(Db) - { - for (auto const& e : entryIter) - { - releaseAssert(e.entryExists()); - accumulateEntry(e.entry().ledgerEntry()); - } - } - - void - doSociGenericOperation() - { - std::string sql = "INSERT INTO configsettings " - "(configsettingid, ledgerentry, lastmodified) " - "VALUES " - "( :id, :v1, :v2 ) " - "ON CONFLICT (configsettingid) DO UPDATE SET " - "ledgerentry = excluded.ledgerentry, " - "lastmodified = excluded.lastmodified"; - - auto prep = mDb.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(mConfigSettingIDs)); - st.exchange(soci::use(mConfigSettingEntries)); - st.exchange(soci::use(mLastModifieds)); - st.define_and_bind(); - { - auto timer = mDb.getUpsertTimer("configsetting"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != - mConfigSettingIDs.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } - - void - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - doSociGenericOperation(); - } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strConfigSettingIDs, strConfigSettingEntries, - strLastModifieds; - - PGconn* conn = pg->conn_; - marshalToPGArray(conn, strConfigSettingIDs, mConfigSettingIDs); - marshalToPGArray(conn, strConfigSettingEntries, mConfigSettingEntries); - marshalToPGArray(conn, strLastModifieds, mLastModifieds); - - std::string sql = "WITH r AS " - "(SELECT unnest(:ids::INT[]), unnest(:v1::TEXT[]), " - "unnest(:v2::INT[])) " - "INSERT INTO configsettings " - "(configsettingid, ledgerentry, lastmodified) " - "SELECT * FROM r " - "ON CONFLICT (configsettingid) DO UPDATE SET " - "ledgerentry = excluded.ledgerentry, " - "lastmodified = excluded.lastmodified"; - - auto prep = mDb.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(strConfigSettingIDs)); - st.exchange(soci::use(strConfigSettingEntries)); - st.exchange(soci::use(strLastModifieds)); - st.define_and_bind(); - { - auto timer = mDb.getUpsertTimer("configsetting"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != - mConfigSettingIDs.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif -}; - -void -LedgerTxnRoot::Impl::bulkUpsertConfigSettings( - std::vector const& entries) -{ - bulkUpsertConfigSettingsOperation op(mApp.getDatabase(), entries); - mApp.getDatabase().doDatabaseTypeSpecificOperation(op); -} - -void -LedgerTxnRoot::Impl::dropConfigSettings(bool rebuild) -{ - throwIfChild(); - mEntryCache.clear(); - mBestOffers.clear(); - - mApp.getDatabase().getSession() << "DROP TABLE IF EXISTS configsettings;"; - - if (rebuild) - { - std::string coll = mApp.getDatabase().getSimpleCollationClause(); - mApp.getDatabase().getSession() - << "CREATE TABLE configsettings (" - << "configsettingid INT PRIMARY KEY, " - << "ledgerentry TEXT " << coll << " NOT NULL, " - << "lastmodified INT NOT NULL);"; - } -} -} \ No newline at end of file diff --git a/src/ledger/LedgerTxnContractCodeSQL.cpp b/src/ledger/LedgerTxnContractCodeSQL.cpp deleted file mode 100644 index ee0aecb7c6..0000000000 --- a/src/ledger/LedgerTxnContractCodeSQL.cpp +++ /dev/null @@ -1,387 +0,0 @@ -// Copyright 2022 Stellar Development Foundation and contributors. Licensed -// under the Apache License, Version 2.0. See the COPYING file at the root -// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 - -#include "ledger/LedgerTxnImpl.h" -#include "ledger/LedgerTypeUtils.h" -#include "ledger/NonSociRelatedException.h" -#include "main/Application.h" -#include "util/GlobalChecks.h" -#include "util/types.h" - -namespace stellar -{ - -static void -throwIfNotContractCode(LedgerEntryType type) -{ - if (type != CONTRACT_CODE) - { - throw NonSociRelatedException("LedgerEntry is not a CONTRACT_CODE"); - } -} - -std::shared_ptr -LedgerTxnRoot::Impl::loadContractCode(LedgerKey const& k) const -{ - auto hash = toOpaqueBase64(k.contractCode().hash); - std::string contractCodeEntryStr; - - std::string sql = "SELECT ledgerentry " - "FROM contractcode " - "WHERE hash = :hash"; - auto prep = mApp.getDatabase().getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::into(contractCodeEntryStr)); - st.exchange(soci::use(hash)); - st.define_and_bind(); - { - auto timer = mApp.getDatabase().getSelectTimer("contractcode"); - st.execute(true); - } - if (!st.got_data()) - { - return nullptr; - } - - LedgerEntry le; - fromOpaqueBase64(le, contractCodeEntryStr); - throwIfNotContractCode(le.data.type()); - - return std::make_shared(std::move(le)); -} - -class BulkLoadContractCodeOperation - : public DatabaseTypeSpecificOperation> -{ - Database& mDb; - std::vector mHashes; - - std::vector - executeAndFetch(soci::statement& st) - { - std::string contractCodeEntryStr; - - st.exchange(soci::into(contractCodeEntryStr)); - st.define_and_bind(); - { - auto timer = mDb.getSelectTimer("contractcode"); - st.execute(true); - } - - std::vector res; - while (st.got_data()) - { - res.emplace_back(); - auto& le = res.back(); - - fromOpaqueBase64(le, contractCodeEntryStr); - throwIfNotContractCode(le.data.type()); - - st.fetch(); - } - return res; - } - - public: - BulkLoadContractCodeOperation(Database& db, - UnorderedSet const& keys) - : mDb(db) - { - mHashes.reserve(keys.size()); - for (auto const& k : keys) - { - throwIfNotContractCode(k.type()); - mHashes.emplace_back(toOpaqueBase64(k.contractCode().hash)); - } - } - - std::vector - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - std::vector cStrHashes; - cStrHashes.reserve(mHashes.size()); - for (auto const& h : mHashes) - { - cStrHashes.emplace_back(h.c_str()); - } - std::string sql = "SELECT ledgerentry " - "FROM contractcode " - "WHERE hash IN carray(?, ?, 'char*')"; - - auto prep = mDb.getPreparedStatement(sql); - auto be = prep.statement().get_backend(); - if (be == nullptr) - { - throw std::runtime_error("no sql backend"); - } - auto sqliteStatement = - dynamic_cast(be); - releaseAssertOrThrow(sqliteStatement); - auto st = sqliteStatement->stmt_; - - sqlite3_reset(st); - sqlite3_bind_pointer(st, 1, (void*)cStrHashes.data(), "carray", 0); - sqlite3_bind_int(st, 2, static_cast(cStrHashes.size())); - return executeAndFetch(prep.statement()); - } - -#ifdef USE_POSTGRES - std::vector - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strHashes; - marshalToPGArray(pg->conn_, strHashes, mHashes); - - std::string sql = "WITH r AS (SELECT unnest(:v1::TEXT[])) " - "SELECT ledgerentry " - "FROM contractcode " - "WHERE (hash) IN (SELECT * from r)"; - - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(strHashes)); - return executeAndFetch(st); - } -#endif -}; - -UnorderedMap> -LedgerTxnRoot::Impl::bulkLoadContractCode( - UnorderedSet const& keys) const -{ - if (!keys.empty()) - { - BulkLoadContractCodeOperation op(mApp.getDatabase(), keys); - return populateLoadedEntries( - keys, mApp.getDatabase().doDatabaseTypeSpecificOperation(op)); - } - else - { - return {}; - } -} - -class BulkDeleteContractCodeOperation - : public DatabaseTypeSpecificOperation -{ - Database& mDb; - LedgerTxnConsistency mCons; - std::vector mHashes; - - public: - BulkDeleteContractCodeOperation(Database& db, LedgerTxnConsistency cons, - std::vector const& entries) - : mDb(db), mCons(cons) - { - mHashes.reserve(entries.size()); - for (auto const& e : entries) - { - releaseAssert(!e.entryExists()); - throwIfNotContractCode(e.key().ledgerKey().type()); - mHashes.emplace_back( - toOpaqueBase64(e.key().ledgerKey().contractCode().hash)); - } - } - - void - doSociGenericOperation() - { - std::string sql = "DELETE FROM contractcode WHERE hash = :id"; - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(mHashes)); - st.define_and_bind(); - { - auto timer = mDb.getDeleteTimer("contractcode"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mHashes.size() && - mCons == LedgerTxnConsistency::EXACT) - { - throw std::runtime_error("Could not update data in SQL"); - } - } - - void - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - doSociGenericOperation(); - } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strHashes; - marshalToPGArray(pg->conn_, strHashes, mHashes); - - std::string sql = "WITH r AS (SELECT unnest(:v1::TEXT[])) " - "DELETE FROM contractcode " - "WHERE hash IN (SELECT * FROM r)"; - - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(strHashes)); - st.define_and_bind(); - { - auto timer = mDb.getDeleteTimer("contractcode"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mHashes.size() && - mCons == LedgerTxnConsistency::EXACT) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif -}; - -void -LedgerTxnRoot::Impl::bulkDeleteContractCode( - std::vector const& entries, LedgerTxnConsistency cons) -{ - BulkDeleteContractCodeOperation op(mApp.getDatabase(), cons, entries); - mApp.getDatabase().doDatabaseTypeSpecificOperation(op); -} - -class BulkUpsertContractCodeOperation - : public DatabaseTypeSpecificOperation -{ - Database& mDb; - std::vector mHashes; - std::vector mContractCodeEntries; - std::vector mLastModifieds; - - void - accumulateEntry(LedgerEntry const& entry) - { - throwIfNotContractCode(entry.data.type()); - - mHashes.emplace_back(toOpaqueBase64(entry.data.contractCode().hash)); - mContractCodeEntries.emplace_back(toOpaqueBase64(entry)); - mLastModifieds.emplace_back( - unsignedToSigned(entry.lastModifiedLedgerSeq)); - } - - public: - BulkUpsertContractCodeOperation(Database& Db, - std::vector const& entryIter) - : mDb(Db) - { - for (auto const& e : entryIter) - { - releaseAssert(e.entryExists()); - accumulateEntry(e.entry().ledgerEntry()); - } - } - - void - doSociGenericOperation() - { - std::string sql = "INSERT INTO contractCode " - "(hash, ledgerentry, lastmodified) " - "VALUES " - "( :hash, :v1, :v2 ) " - "ON CONFLICT (hash) DO UPDATE SET " - "ledgerentry = excluded.ledgerentry, " - "lastmodified = excluded.lastmodified"; - - auto prep = mDb.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(mHashes)); - st.exchange(soci::use(mContractCodeEntries)); - st.exchange(soci::use(mLastModifieds)); - st.define_and_bind(); - { - auto timer = mDb.getUpsertTimer("contractcode"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mHashes.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } - - void - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - doSociGenericOperation(); - } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strHashes, strContractCodeEntries, strLastModifieds; - - PGconn* conn = pg->conn_; - marshalToPGArray(conn, strHashes, mHashes); - marshalToPGArray(conn, strContractCodeEntries, mContractCodeEntries); - marshalToPGArray(conn, strLastModifieds, mLastModifieds); - - std::string sql = "WITH r AS " - "(SELECT unnest(:v1::TEXT[]), " - "unnest(:v1::TEXT[]), unnest(:v2::INT[])) " - "INSERT INTO contractcode " - "(hash, ledgerentry, lastmodified) " - "SELECT * FROM r " - "ON CONFLICT (hash) DO UPDATE SET " - "ledgerentry = excluded.ledgerentry, " - "lastmodified = excluded.lastmodified"; - - auto prep = mDb.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(strHashes)); - st.exchange(soci::use(strContractCodeEntries)); - st.exchange(soci::use(strLastModifieds)); - st.define_and_bind(); - { - auto timer = mDb.getUpsertTimer("contractcode"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mHashes.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif -}; - -void -LedgerTxnRoot::Impl::bulkUpsertContractCode( - std::vector const& entries) -{ - BulkUpsertContractCodeOperation op(mApp.getDatabase(), entries); - mApp.getDatabase().doDatabaseTypeSpecificOperation(op); -} - -void -LedgerTxnRoot::Impl::dropContractCode(bool rebuild) -{ - throwIfChild(); - mEntryCache.clear(); - mBestOffers.clear(); - - std::string coll = mApp.getDatabase().getSimpleCollationClause(); - - mApp.getDatabase().getSession() << "DROP TABLE IF EXISTS contractcode;"; - - if (rebuild) - { - mApp.getDatabase().getSession() - << "CREATE TABLE contractcode (" - << "hash TEXT " << coll << " NOT NULL, " - << "ledgerentry TEXT " << coll << " NOT NULL, " - << "lastmodified INT NOT NULL, " - << "PRIMARY KEY (hash));"; - if (!mApp.getDatabase().isSqlite()) - { - mApp.getDatabase().getSession() << "ALTER TABLE contractcode " - << "ALTER COLUMN hash " - << "TYPE TEXT COLLATE \"C\";"; - } - } -} - -} diff --git a/src/ledger/LedgerTxnContractDataSQL.cpp b/src/ledger/LedgerTxnContractDataSQL.cpp deleted file mode 100644 index 1c71f67cb7..0000000000 --- a/src/ledger/LedgerTxnContractDataSQL.cpp +++ /dev/null @@ -1,462 +0,0 @@ -// Copyright 2022 Stellar Development Foundation and contributors. Licensed -// under the Apache License, Version 2.0. See the COPYING file at the root -// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 - -#include "ledger/LedgerTxnImpl.h" -#include "ledger/LedgerTypeUtils.h" -#include "ledger/NonSociRelatedException.h" -#include "main/Application.h" -#include "util/GlobalChecks.h" -#include "util/types.h" - -namespace stellar -{ - -static void -throwIfNotContractData(LedgerEntryType type) -{ - if (type != CONTRACT_DATA) - { - throw NonSociRelatedException("LedgerEntry is not a CONTRACT_DATA"); - } -} - -std::shared_ptr -LedgerTxnRoot::Impl::loadContractData(LedgerKey const& k) const -{ - auto contractID = toOpaqueBase64(k.contractData().contract); - auto key = toOpaqueBase64(k.contractData().key); - int32_t type = k.contractData().durability; - std::string contractDataEntryStr; - - std::string sql = - "SELECT ledgerentry " - "FROM contractdata " - "WHERE contractID = :contractID AND key = :key AND type = :type"; - auto prep = mApp.getDatabase().getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::into(contractDataEntryStr)); - st.exchange(soci::use(contractID)); - st.exchange(soci::use(key)); - st.exchange(soci::use(type)); - st.define_and_bind(); - { - auto timer = mApp.getDatabase().getSelectTimer("contractdata"); - st.execute(true); - } - if (!st.got_data()) - { - return nullptr; - } - - LedgerEntry le; - fromOpaqueBase64(le, contractDataEntryStr); - throwIfNotContractData(le.data.type()); - - return std::make_shared(std::move(le)); -} - -class BulkLoadContractDataOperation - : public DatabaseTypeSpecificOperation> -{ - Database& mDb; - std::vector mContractIDs; - std::vector mKeys; - std::vector mTypes; - - std::vector - executeAndFetch(soci::statement& st) - { - std::string contractDataEntryStr; - - st.exchange(soci::into(contractDataEntryStr)); - st.define_and_bind(); - { - auto timer = mDb.getSelectTimer("contractdata"); - st.execute(true); - } - - std::vector res; - while (st.got_data()) - { - res.emplace_back(); - auto& le = res.back(); - - fromOpaqueBase64(le, contractDataEntryStr); - throwIfNotContractData(le.data.type()); - - st.fetch(); - } - return res; - } - - public: - BulkLoadContractDataOperation(Database& db, - UnorderedSet const& keys) - : mDb(db) - { - mContractIDs.reserve(keys.size()); - mKeys.reserve(keys.size()); - mTypes.reserve(keys.size()); - for (auto const& k : keys) - { - throwIfNotContractData(k.type()); - mContractIDs.emplace_back( - toOpaqueBase64(k.contractData().contract)); - mKeys.emplace_back(toOpaqueBase64(k.contractData().key)); - mTypes.emplace_back(k.contractData().durability); - } - } - - std::vector - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - std::vector cStrContractIDs, cStrKeys; - cStrContractIDs.reserve(mContractIDs.size()); - cStrKeys.reserve(cStrKeys.size()); - for (auto const& cid : mContractIDs) - { - cStrContractIDs.emplace_back(cid.c_str()); - } - for (auto const& key : mKeys) - { - cStrKeys.emplace_back(key.c_str()); - } - - std::string sqlJoin = "SELECT x.value, y.value, z.value " - "FROM " - "(SELECT rowid, value FROM carray(?, ?, 'char*') " - "ORDER BY rowid) AS x " - "INNER JOIN " - "(SELECT rowid, value FROM carray(?, ?, 'char*') " - "ORDER BY rowid) AS y " - "ON x.rowid = y.rowid " - "INNER JOIN " - "(SELECT rowid, value FROM carray(?, ?, 'int32') " - "ORDER BY rowid) AS z " - "ON x.rowid = z.rowid"; - - std::string sql = "WITH r AS (" + sqlJoin + - ") " - "SELECT ledgerentry " - "FROM contractdata " - "WHERE (contractid, key, type) IN (SELECT * FROM r)"; - - auto prep = mDb.getPreparedStatement(sql); - auto be = prep.statement().get_backend(); - if (be == nullptr) - { - throw std::runtime_error("no sql backend"); - } - auto sqliteStatement = - dynamic_cast(be); - releaseAssertOrThrow(sqliteStatement); - auto st = sqliteStatement->stmt_; - - sqlite3_reset(st); - sqlite3_bind_pointer(st, 1, (void*)cStrContractIDs.data(), "carray", 0); - sqlite3_bind_int(st, 2, static_cast(mContractIDs.size())); - sqlite3_bind_pointer(st, 3, (void*)cStrKeys.data(), "carray", 0); - sqlite3_bind_int(st, 4, static_cast(mKeys.size())); - sqlite3_bind_pointer(st, 5, (void*)mTypes.data(), "carray", 0); - sqlite3_bind_int(st, 6, static_cast(mTypes.size())); - return executeAndFetch(prep.statement()); - } - -#ifdef USE_POSTGRES - std::vector - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strContractIDs, strKeys, strTypes; - marshalToPGArray(pg->conn_, strContractIDs, mContractIDs); - marshalToPGArray(pg->conn_, strKeys, mKeys); - marshalToPGArray(pg->conn_, strTypes, mTypes); - - std::string sql = "WITH r AS (SELECT unnest(:ids::TEXT[]), " - "unnest(:v1::TEXT[]), unnest(:v2::INT[])) " - "SELECT ledgerentry " - "FROM contractdata " - "WHERE (contractid, key, type) IN (SELECT * from r)"; - - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(strContractIDs)); - st.exchange(soci::use(strKeys)); - st.exchange(soci::use(strTypes)); - return executeAndFetch(st); - } -#endif -}; - -UnorderedMap> -LedgerTxnRoot::Impl::bulkLoadContractData( - UnorderedSet const& keys) const -{ - if (!keys.empty()) - { - BulkLoadContractDataOperation op(mApp.getDatabase(), keys); - return populateLoadedEntries( - keys, mApp.getDatabase().doDatabaseTypeSpecificOperation(op)); - } - else - { - return {}; - } -} - -class BulkDeleteContractDataOperation - : public DatabaseTypeSpecificOperation -{ - Database& mDb; - LedgerTxnConsistency mCons; - std::vector mContractIDs; - std::vector mKeys; - std::vector mTypes; - - public: - BulkDeleteContractDataOperation(Database& db, LedgerTxnConsistency cons, - std::vector const& entries) - : mDb(db), mCons(cons) - { - mContractIDs.reserve(entries.size()); - for (auto const& e : entries) - { - releaseAssert(!e.entryExists()); - throwIfNotContractData(e.key().ledgerKey().type()); - mContractIDs.emplace_back( - toOpaqueBase64(e.key().ledgerKey().contractData().contract)); - mKeys.emplace_back( - toOpaqueBase64(e.key().ledgerKey().contractData().key)); - mTypes.emplace_back(e.key().ledgerKey().contractData().durability); - } - } - - void - doSociGenericOperation() - { - std::string sql = "DELETE FROM contractdata WHERE contractid = :id " - "AND key = :key AND type = :type"; - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(mContractIDs)); - st.exchange(soci::use(mKeys)); - st.exchange(soci::use(mTypes)); - st.define_and_bind(); - { - auto timer = mDb.getDeleteTimer("contractdata"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != - mContractIDs.size() && - mCons == LedgerTxnConsistency::EXACT) - { - throw std::runtime_error("Could not update data in SQL"); - } - } - - void - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - doSociGenericOperation(); - } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strContractIDs, strKeys, strTypes; - marshalToPGArray(pg->conn_, strContractIDs, mContractIDs); - marshalToPGArray(pg->conn_, strKeys, mKeys); - marshalToPGArray(pg->conn_, strTypes, mTypes); - - std::string sql = "WITH r AS (SELECT unnest(:ids::TEXT[]), " - "unnest(:v1::TEXT[]), unnest(:v2::INT[])) " - "DELETE FROM contractdata " - "WHERE (contractid, key, type) IN (SELECT * FROM r)"; - - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(strContractIDs)); - st.exchange(soci::use(strKeys)); - st.exchange(soci::use(strTypes)); - st.define_and_bind(); - { - auto timer = mDb.getDeleteTimer("contractdata"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != - mContractIDs.size() && - mCons == LedgerTxnConsistency::EXACT) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif -}; - -void -LedgerTxnRoot::Impl::bulkDeleteContractData( - std::vector const& entries, LedgerTxnConsistency cons) -{ - BulkDeleteContractDataOperation op(mApp.getDatabase(), cons, entries); - mApp.getDatabase().doDatabaseTypeSpecificOperation(op); -} - -class BulkUpsertContractDataOperation - : public DatabaseTypeSpecificOperation -{ - Database& mDb; - std::vector mContractIDs; - std::vector mKeys; - std::vector mTypes; - std::vector mContractDataEntries; - std::vector mLastModifieds; - - void - accumulateEntry(LedgerEntry const& entry) - { - throwIfNotContractData(entry.data.type()); - - mContractIDs.emplace_back( - toOpaqueBase64(entry.data.contractData().contract)); - mKeys.emplace_back(toOpaqueBase64(entry.data.contractData().key)); - mTypes.emplace_back(entry.data.contractData().durability); - mContractDataEntries.emplace_back(toOpaqueBase64(entry)); - mLastModifieds.emplace_back( - unsignedToSigned(entry.lastModifiedLedgerSeq)); - } - - public: - BulkUpsertContractDataOperation(Database& Db, - std::vector const& entryIter) - : mDb(Db) - { - for (auto const& e : entryIter) - { - releaseAssert(e.entryExists()); - accumulateEntry(e.entry().ledgerEntry()); - } - } - - void - doSociGenericOperation() - { - std::string sql = "INSERT INTO contractData " - "(contractid, key, type, ledgerentry, lastmodified) " - "VALUES " - "( :id, :key, :type, :v1, :v2 ) " - "ON CONFLICT (contractid, key, type) DO UPDATE SET " - "ledgerentry = excluded.ledgerentry, " - "lastmodified = excluded.lastmodified"; - - auto prep = mDb.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(mContractIDs)); - st.exchange(soci::use(mKeys)); - st.exchange(soci::use(mTypes)); - st.exchange(soci::use(mContractDataEntries)); - st.exchange(soci::use(mLastModifieds)); - st.define_and_bind(); - { - auto timer = mDb.getUpsertTimer("contractdata"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mContractIDs.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } - - void - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - doSociGenericOperation(); - } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strContractIDs, strKeys, strTypes, strContractDataEntries, - strLastModifieds; - - PGconn* conn = pg->conn_; - marshalToPGArray(conn, strContractIDs, mContractIDs); - marshalToPGArray(conn, strKeys, mKeys); - marshalToPGArray(conn, strTypes, mTypes); - marshalToPGArray(conn, strContractDataEntries, mContractDataEntries); - marshalToPGArray(conn, strLastModifieds, mLastModifieds); - - std::string sql = - "WITH r AS " - "(SELECT unnest(:ids::TEXT[]), unnest(:v1::TEXT[]), " - "unnest(:v2::INT[]), unnest(:v3::TEXT[]), unnest(:v4::INT[])) " - "INSERT INTO contractdata " - "(contractid, key, type, ledgerentry, lastmodified) " - "SELECT * FROM r " - "ON CONFLICT (contractid,key,type) DO UPDATE SET " - "ledgerentry = excluded.ledgerentry, " - "lastmodified = excluded.lastmodified"; - - auto prep = mDb.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(strContractIDs)); - st.exchange(soci::use(strKeys)); - st.exchange(soci::use(strTypes)); - st.exchange(soci::use(strContractDataEntries)); - st.exchange(soci::use(strLastModifieds)); - st.define_and_bind(); - { - auto timer = mDb.getUpsertTimer("contractdata"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mContractIDs.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif -}; - -void -LedgerTxnRoot::Impl::bulkUpsertContractData( - std::vector const& entries) -{ - BulkUpsertContractDataOperation op(mApp.getDatabase(), entries); - mApp.getDatabase().doDatabaseTypeSpecificOperation(op); -} - -void -LedgerTxnRoot::Impl::dropContractData(bool rebuild) -{ - throwIfChild(); - mEntryCache.clear(); - mBestOffers.clear(); - - mApp.getDatabase().getSession() << "DROP TABLE IF EXISTS contractdata;"; - - if (rebuild) - { - std::string coll = mApp.getDatabase().getSimpleCollationClause(); - mApp.getDatabase().getSession() - << "CREATE TABLE contractdata (" - << "contractid TEXT " << coll << " NOT NULL, " - << "key TEXT " << coll << " NOT NULL, " - << "type INT NOT NULL, " - << "ledgerentry TEXT " << coll << " NOT NULL, " - << "lastmodified INT NOT NULL, " - << "PRIMARY KEY (contractid, key, type));"; - if (!mApp.getDatabase().isSqlite()) - { - mApp.getDatabase().getSession() << "ALTER TABLE contractdata " - << "ALTER COLUMN contractid " - << "TYPE TEXT COLLATE \"C\"," - << "ALTER COLUMN key " - << "TYPE TEXT COLLATE \"C\"," - << "ALTER COLUMN type " - << "TYPE INT;"; - } - } -} - -} \ No newline at end of file diff --git a/src/ledger/LedgerTxnDataSQL.cpp b/src/ledger/LedgerTxnDataSQL.cpp deleted file mode 100644 index 10f6ba7be5..0000000000 --- a/src/ledger/LedgerTxnDataSQL.cpp +++ /dev/null @@ -1,508 +0,0 @@ -// Copyright 2018 Stellar Development Foundation and contributors. Licensed -// under the Apache License, Version 2.0. See the COPYING file at the root -// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 - -#include "crypto/KeyUtils.h" -#include "crypto/SecretKey.h" -#include "database/Database.h" -#include "database/DatabaseTypeSpecificOperation.h" -#include "ledger/LedgerTxnImpl.h" -#include "ledger/LedgerTypeUtils.h" -#include "main/Application.h" -#include "util/Decoder.h" -#include "util/GlobalChecks.h" -#include "util/Logging.h" -#include "util/types.h" -#include - -namespace stellar -{ - -std::shared_ptr -LedgerTxnRoot::Impl::loadData(LedgerKey const& key) const -{ - ZoneScoped; - std::string actIDStrKey = KeyUtils::toStrKey(key.data().accountID); - std::string dataName = decoder::encode_b64(key.data().dataName); - - std::string dataValue; - soci::indicator dataValueIndicator; - std::string extensionStr; - soci::indicator extensionInd; - std::string ledgerExtStr; - soci::indicator ledgerExtInd; - - LedgerEntry le; - le.data.type(DATA); - DataEntry& de = le.data.data(); - - std::string sql = "SELECT datavalue, lastmodified, extension, " - "ledgerext " - "FROM accountdata " - "WHERE accountid= :id AND dataname= :dataname"; - auto prep = mApp.getDatabase().getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::into(dataValue, dataValueIndicator)); - st.exchange(soci::into(le.lastModifiedLedgerSeq)); - st.exchange(soci::into(extensionStr, extensionInd)); - st.exchange(soci::into(ledgerExtStr, ledgerExtInd)); - st.exchange(soci::use(actIDStrKey)); - st.exchange(soci::use(dataName)); - st.define_and_bind(); - st.execute(true); - if (!st.got_data()) - { - return nullptr; - } - - de.accountID = key.data().accountID; - de.dataName = key.data().dataName; - - if (dataValueIndicator != soci::i_ok) - { - throw std::runtime_error("bad database state"); - } - decoder::decode_b64(dataValue, de.dataValue); - - decodeOpaqueXDR(extensionStr, extensionInd, de.ext); - - decodeOpaqueXDR(ledgerExtStr, ledgerExtInd, le.ext); - - return std::make_shared(std::move(le)); -} - -class BulkUpsertDataOperation : public DatabaseTypeSpecificOperation -{ - Database& mDB; - std::vector mAccountIDs; - std::vector mDataNames; - std::vector mDataValues; - std::vector mLastModifieds; - std::vector mExtensions; - std::vector mLedgerExtensions; - - void - accumulateEntry(LedgerEntry const& entry) - { - releaseAssert(entry.data.type() == DATA); - DataEntry const& data = entry.data.data(); - mAccountIDs.emplace_back(KeyUtils::toStrKey(data.accountID)); - mDataNames.emplace_back(decoder::encode_b64(data.dataName)); - mDataValues.emplace_back(decoder::encode_b64(data.dataValue)); - mLastModifieds.emplace_back( - unsignedToSigned(entry.lastModifiedLedgerSeq)); - mExtensions.emplace_back( - decoder::encode_b64(xdr::xdr_to_opaque(data.ext))); - mLedgerExtensions.emplace_back( - decoder::encode_b64(xdr::xdr_to_opaque(entry.ext))); - } - - public: - BulkUpsertDataOperation(Database& DB, - std::vector const& entries) - : mDB(DB) - { - for (auto const& e : entries) - { - accumulateEntry(e); - } - } - - BulkUpsertDataOperation(Database& DB, - std::vector const& entryIter) - : mDB(DB) - { - for (auto const& e : entryIter) - { - releaseAssert(e.entryExists()); - releaseAssert(e.entry().type() == - InternalLedgerEntryType::LEDGER_ENTRY); - accumulateEntry(e.entry().ledgerEntry()); - } - } - - void - doSociGenericOperation() - { - std::string sql = - "INSERT INTO accountdata ( " - "accountid, dataname, datavalue, lastmodified, extension, " - "ledgerext " - ") VALUES ( " - ":id, :v1, :v2, :v3, :v4, :v5 " - ") ON CONFLICT (accountid, dataname) DO UPDATE SET " - "datavalue = excluded.datavalue, " - "lastmodified = excluded.lastmodified, " - "extension = excluded.extension, " - "ledgerext = excluded.ledgerext"; - auto prep = mDB.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(mAccountIDs)); - st.exchange(soci::use(mDataNames)); - st.exchange(soci::use(mDataValues)); - st.exchange(soci::use(mLastModifieds)); - st.exchange(soci::use(mExtensions)); - st.exchange(soci::use(mLedgerExtensions)); - st.define_and_bind(); - { - auto timer = mDB.getUpsertTimer("data"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mAccountIDs.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } - - void - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - doSociGenericOperation(); - } -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strAccountIDs, strDataNames, strDataValues, - strLastModifieds, strExtensions, strLedgerExtensions; - - PGconn* conn = pg->conn_; - marshalToPGArray(conn, strAccountIDs, mAccountIDs); - marshalToPGArray(conn, strDataNames, mDataNames); - marshalToPGArray(conn, strDataValues, mDataValues); - marshalToPGArray(conn, strLastModifieds, mLastModifieds); - marshalToPGArray(conn, strExtensions, mExtensions); - marshalToPGArray(conn, strLedgerExtensions, mLedgerExtensions); - std::string sql = - "WITH r AS (SELECT " - "unnest(:ids::TEXT[]), " - "unnest(:v1::TEXT[]), " - "unnest(:v2::TEXT[]), " - "unnest(:v3::INT[]), " - "unnest(:v4::TEXT[]), " - "unnest(:v5::TEXT[]) " - ")" - "INSERT INTO accountdata ( " - "accountid, dataname, datavalue, lastmodified, extension, " - "ledgerext " - ") SELECT * FROM r " - "ON CONFLICT (accountid, dataname) DO UPDATE SET " - "datavalue = excluded.datavalue, " - "lastmodified = excluded.lastmodified, " - "extension = excluded.extension, " - "ledgerext = excluded.ledgerext"; - auto prep = mDB.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(strAccountIDs)); - st.exchange(soci::use(strDataNames)); - st.exchange(soci::use(strDataValues)); - st.exchange(soci::use(strLastModifieds)); - st.exchange(soci::use(strExtensions)); - st.exchange(soci::use(strLedgerExtensions)); - st.define_and_bind(); - { - auto timer = mDB.getUpsertTimer("data"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mAccountIDs.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif -}; - -class BulkDeleteDataOperation : public DatabaseTypeSpecificOperation -{ - Database& mDB; - LedgerTxnConsistency mCons; - std::vector mAccountIDs; - std::vector mDataNames; - - public: - BulkDeleteDataOperation(Database& DB, LedgerTxnConsistency cons, - std::vector const& entries) - : mDB(DB), mCons(cons) - { - for (auto const& e : entries) - { - releaseAssert(!e.entryExists()); - releaseAssert(e.key().type() == - InternalLedgerEntryType::LEDGER_ENTRY); - releaseAssert(e.key().ledgerKey().type() == DATA); - auto const& data = e.key().ledgerKey().data(); - mAccountIDs.emplace_back(KeyUtils::toStrKey(data.accountID)); - mDataNames.emplace_back(decoder::encode_b64(data.dataName)); - } - } - - void - doSociGenericOperation() - { - std::string sql = "DELETE FROM accountdata WHERE accountid = :id AND " - " dataname = :v1 "; - auto prep = mDB.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(mAccountIDs)); - st.exchange(soci::use(mDataNames)); - st.define_and_bind(); - { - auto timer = mDB.getDeleteTimer("data"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mAccountIDs.size() && - mCons == LedgerTxnConsistency::EXACT) - { - throw std::runtime_error("Could not update data in SQL"); - } - } - - void - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - doSociGenericOperation(); - } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strAccountIDs; - std::string strDataNames; - PGconn* conn = pg->conn_; - marshalToPGArray(conn, strAccountIDs, mAccountIDs); - marshalToPGArray(conn, strDataNames, mDataNames); - std::string sql = - "WITH r AS ( SELECT " - "unnest(:ids::TEXT[])," - "unnest(:v1::TEXT[])" - " ) " - "DELETE FROM accountdata WHERE (accountid, dataname) IN " - "(SELECT * FROM r)"; - auto prep = mDB.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(strAccountIDs)); - st.exchange(soci::use(strDataNames)); - st.define_and_bind(); - { - auto timer = mDB.getDeleteTimer("data"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mAccountIDs.size() && - mCons == LedgerTxnConsistency::EXACT) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif -}; - -void -LedgerTxnRoot::Impl::bulkUpsertAccountData( - std::vector const& entries) -{ - ZoneScoped; - ZoneValue(static_cast(entries.size())); - BulkUpsertDataOperation op(mApp.getDatabase(), entries); - mApp.getDatabase().doDatabaseTypeSpecificOperation(op); -} - -void -LedgerTxnRoot::Impl::bulkDeleteAccountData( - std::vector const& entries, LedgerTxnConsistency cons) -{ - ZoneScoped; - ZoneValue(static_cast(entries.size())); - BulkDeleteDataOperation op(mApp.getDatabase(), cons, entries); - mApp.getDatabase().doDatabaseTypeSpecificOperation(op); -} - -void -LedgerTxnRoot::Impl::dropData(bool rebuild) -{ - throwIfChild(); - mEntryCache.clear(); - mBestOffers.clear(); - - mApp.getDatabase().getSession() << "DROP TABLE IF EXISTS accountdata;"; - - if (rebuild) - { - std::string coll = mApp.getDatabase().getSimpleCollationClause(); - mApp.getDatabase().getSession() - << "CREATE TABLE accountdata" - << "(" - << "accountid VARCHAR(56) " << coll << " NOT NULL," - << "dataname VARCHAR(88) " << coll << " NOT NULL," - << "datavalue VARCHAR(112) NOT NULL," - "lastmodified INT NOT NULL," - "extension TEXT," - "ledgerext TEXT NOT NULL," - "PRIMARY KEY (accountid, dataname)" - ");"; - if (!mApp.getDatabase().isSqlite()) - { - mApp.getDatabase().getSession() - << "ALTER TABLE accountdata " - << "ALTER COLUMN accountid " - << "TYPE VARCHAR(56) COLLATE \"C\", " - << "ALTER COLUMN dataname " - << "TYPE VARCHAR(88) COLLATE \"C\""; - } - } -} - -class BulkLoadDataOperation - : public DatabaseTypeSpecificOperation> -{ - Database& mDb; - std::vector mAccountIDs; - std::vector mDataNames; - - std::vector - executeAndFetch(soci::statement& st) - { - std::string accountID, dataName, dataValue; - uint32_t lastModified; - std::string extension; - soci::indicator extensionInd; - std::string ledgerExtension; - soci::indicator ledgerExtInd; - - st.exchange(soci::into(accountID)); - st.exchange(soci::into(dataName)); - st.exchange(soci::into(dataValue)); - st.exchange(soci::into(lastModified)); - st.exchange(soci::into(extension, extensionInd)); - st.exchange(soci::into(ledgerExtension, ledgerExtInd)); - st.define_and_bind(); - { - auto timer = mDb.getSelectTimer("data"); - st.execute(true); - } - - std::vector res; - while (st.got_data()) - { - res.emplace_back(); - auto& le = res.back(); - le.data.type(DATA); - auto& de = le.data.data(); - - de.accountID = KeyUtils::fromStrKey(accountID); - decoder::decode_b64(dataName, de.dataName); - decoder::decode_b64(dataValue, de.dataValue); - le.lastModifiedLedgerSeq = lastModified; - - decodeOpaqueXDR(extension, extensionInd, de.ext); - - decodeOpaqueXDR(ledgerExtension, ledgerExtInd, le.ext); - - st.fetch(); - } - return res; - } - - public: - BulkLoadDataOperation(Database& db, UnorderedSet const& keys) - : mDb(db) - { - mAccountIDs.reserve(keys.size()); - mDataNames.reserve(keys.size()); - for (auto const& k : keys) - { - releaseAssert(k.type() == DATA); - mAccountIDs.emplace_back(KeyUtils::toStrKey(k.data().accountID)); - mDataNames.emplace_back(decoder::encode_b64(k.data().dataName)); - } - } - - virtual std::vector - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - releaseAssert(mAccountIDs.size() == mDataNames.size()); - - std::vector cstrAccountIDs; - std::vector cstrDataNames; - cstrAccountIDs.reserve(mAccountIDs.size()); - cstrDataNames.reserve(mDataNames.size()); - for (size_t i = 0; i < mAccountIDs.size(); ++i) - { - cstrAccountIDs.emplace_back(mAccountIDs[i].c_str()); - cstrDataNames.emplace_back(mDataNames[i].c_str()); - } - - std::string sqlJoin = - "SELECT x.value, y.value FROM " - "(SELECT rowid, value FROM carray(?, ?, 'char*') ORDER BY rowid) " - "AS x " - "INNER JOIN (SELECT rowid, value FROM carray(?, ?, 'char*') ORDER " - "BY rowid) AS y ON x.rowid = y.rowid"; - std::string sql = "WITH r AS (" + sqlJoin + - ") SELECT accountid, dataname, datavalue, " - "lastmodified, extension, " - "ledgerext " - "FROM accountdata WHERE (accountid, dataname) IN r"; - - auto prep = mDb.getPreparedStatement(sql); - auto be = prep.statement().get_backend(); - if (be == nullptr) - { - throw std::runtime_error("no sql backend"); - } - auto sqliteStatement = - dynamic_cast(be); - releaseAssertOrThrow(sqliteStatement); - auto st = sqliteStatement->stmt_; - - sqlite3_reset(st); - sqlite3_bind_pointer(st, 1, cstrAccountIDs.data(), "carray", 0); - sqlite3_bind_int(st, 2, static_cast(cstrAccountIDs.size())); - sqlite3_bind_pointer(st, 3, cstrDataNames.data(), "carray", 0); - sqlite3_bind_int(st, 4, static_cast(cstrDataNames.size())); - return executeAndFetch(prep.statement()); - } - -#ifdef USE_POSTGRES - std::vector - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - releaseAssert(mAccountIDs.size() == mDataNames.size()); - - std::string strAccountIDs; - std::string strDataNames; - marshalToPGArray(pg->conn_, strAccountIDs, mAccountIDs); - marshalToPGArray(pg->conn_, strDataNames, mDataNames); - - std::string sql = - "WITH r AS (SELECT unnest(:v1::TEXT[]), unnest(:v2::TEXT[])) " - "SELECT accountid, dataname, datavalue, lastmodified, extension, " - "ledgerext " - "FROM accountdata WHERE (accountid, dataname) IN (SELECT * FROM r)"; - - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(strAccountIDs)); - st.exchange(soci::use(strDataNames)); - return executeAndFetch(st); - } -#endif -}; - -UnorderedMap> -LedgerTxnRoot::Impl::bulkLoadData(UnorderedSet const& keys) const -{ - ZoneScoped; - ZoneValue(static_cast(keys.size())); - if (!keys.empty()) - { - BulkLoadDataOperation op(mApp.getDatabase(), keys); - return populateLoadedEntries( - keys, mApp.getDatabase().doDatabaseTypeSpecificOperation(op)); - } - else - { - return {}; - } -} -} diff --git a/src/ledger/LedgerTxnImpl.h b/src/ledger/LedgerTxnImpl.h index 82241b74ff..d9433abdc9 100644 --- a/src/ledger/LedgerTxnImpl.h +++ b/src/ledger/LedgerTxnImpl.h @@ -629,8 +629,6 @@ class LedgerTxnRoot::Impl void throwIfChild() const; - std::shared_ptr loadAccount(LedgerKey const& key) const; - std::shared_ptr loadData(LedgerKey const& key) const; std::shared_ptr loadOffer(LedgerKey const& key) const; std::vector loadAllOffers() const; std::deque::const_iterator @@ -646,57 +644,12 @@ class LedgerTxnRoot::Impl loadOffersByAccountAndAsset(AccountID const& accountID, Asset const& asset) const; std::vector loadOffers(StatementContext& prep) const; - std::vector loadInflationWinners(size_t maxWinners, - int64_t minBalance) const; - std::shared_ptr - loadTrustLine(LedgerKey const& key) const; - std::vector - loadPoolShareTrustLinesByAccountAndAsset(AccountID const& accountID, - Asset const& asset) const; - std::shared_ptr - loadClaimableBalance(LedgerKey const& key) const; - std::shared_ptr - loadLiquidityPool(LedgerKey const& key) const; - std::shared_ptr - loadContractData(LedgerKey const& key) const; - std::shared_ptr - loadContractCode(LedgerKey const& key) const; - std::shared_ptr - loadConfigSetting(LedgerKey const& key) const; - std::shared_ptr loadTTL(LedgerKey const& key) const; void bulkApply(BulkLedgerEntryChangeAccumulator& bleca, size_t bufferThreshold, LedgerTxnConsistency cons); - void bulkUpsertAccounts(std::vector const& entries); - void bulkDeleteAccounts(std::vector const& entries, - LedgerTxnConsistency cons); - void bulkUpsertTrustLines(std::vector const& entries); - void bulkDeleteTrustLines(std::vector const& entries, - LedgerTxnConsistency cons); void bulkUpsertOffers(std::vector const& entries); void bulkDeleteOffers(std::vector const& entries, LedgerTxnConsistency cons); - void bulkUpsertAccountData(std::vector const& entries); - void bulkDeleteAccountData(std::vector const& entries, - LedgerTxnConsistency cons); - void bulkUpsertClaimableBalance(std::vector const& entries); - void bulkDeleteClaimableBalance(std::vector const& entries, - LedgerTxnConsistency cons); - void bulkUpsertLiquidityPool(std::vector const& entries); - void bulkDeleteLiquidityPool(std::vector const& entries, - LedgerTxnConsistency cons); - void bulkUpsertContractData(std::vector const& entries); - void bulkDeleteContractData(std::vector const& entries, - LedgerTxnConsistency cons); - void bulkUpsertContractCode(std::vector const& entries); - void bulkDeleteContractCode(std::vector const& entries, - LedgerTxnConsistency cons); - void bulkUpsertConfigSettings(std::vector const& entries); - void bulkUpsertTTL(std::vector const& entries); - void bulkDeleteTTL(std::vector const& entries, - LedgerTxnConsistency cons); - - static std::string tableFromLedgerEntryType(LedgerEntryType let); // The entry cache maintains relatively strong invariants: // @@ -720,27 +673,8 @@ class LedgerTxnRoot::Impl BestOffersEntryPtr getFromBestOffers(Asset const& buying, Asset const& selling) const; - UnorderedMap> - bulkLoadAccounts(UnorderedSet const& keys) const; - UnorderedMap> - bulkLoadTrustLines(UnorderedSet const& keys) const; UnorderedMap> bulkLoadOffers(UnorderedSet const& keys) const; - UnorderedMap> - bulkLoadData(UnorderedSet const& keys) const; - UnorderedMap> - bulkLoadClaimableBalance(UnorderedSet const& keys) const; - UnorderedMap> - bulkLoadLiquidityPool(UnorderedSet const& keys) const; - UnorderedMap> - bulkLoadContractData(UnorderedSet const& keys) const; - UnorderedMap> - bulkLoadContractCode(UnorderedSet const& keys) const; - UnorderedMap> - bulkLoadConfigSettings(UnorderedSet const& keys) const; - UnorderedMap> - bulkLoadTTL(UnorderedSet const& keys) const; - std::deque::const_iterator loadNextBestOffersIntoCache(BestOffersEntryPtr cached, Asset const& buying, Asset const& selling); @@ -778,18 +712,8 @@ class LedgerTxnRoot::Impl // deleteOffersModifiedOnOrAfterLedger has no exception safety guarantees. void deleteOffersModifiedOnOrAfterLedger(uint32_t ledger) const; - // dropAccounts, dropData, dropOffers, and dropTrustLines have no exception - // safety guarantees. - void dropAccounts(bool rebuild); - void dropData(bool rebuild); - void dropOffers(bool rebuild); - void dropTrustLines(bool rebuild); - void dropClaimableBalances(bool rebuild); - void dropLiquidityPools(bool rebuild); - void dropContractData(bool rebuild); - void dropContractCode(bool rebuild); - void dropConfigSettings(bool rebuild); - void dropTTL(bool rebuild); + // no exception safety guarantees. + void dropOffers(); #ifdef BUILD_TESTS void resetForFuzzer(); diff --git a/src/ledger/LedgerTxnLiquidityPoolSQL.cpp b/src/ledger/LedgerTxnLiquidityPoolSQL.cpp deleted file mode 100644 index ae87665c5b..0000000000 --- a/src/ledger/LedgerTxnLiquidityPoolSQL.cpp +++ /dev/null @@ -1,420 +0,0 @@ -// Copyright 2020 Stellar Development Foundation and contributors. Licensed -// under the Apache License, Version 2.0. See the COPYING file at the root -// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 - -#include "ledger/LedgerTxnImpl.h" -#include "ledger/LedgerTypeUtils.h" -#include "ledger/NonSociRelatedException.h" -#include "main/Application.h" -#include "util/GlobalChecks.h" -#include "util/types.h" - -namespace stellar -{ - -static void -throwIfNotLiquidityPool(LedgerEntryType type) -{ - if (type != LIQUIDITY_POOL) - { - throw NonSociRelatedException("LedgerEntry is not a LIQUIDITY_POOL"); - } -} - -static std::string -getPrimaryKey(PoolID const& poolID) -{ - TrustLineAsset tla(ASSET_TYPE_POOL_SHARE); - tla.liquidityPoolID() = poolID; - return toOpaqueBase64(tla); -} - -std::shared_ptr -LedgerTxnRoot::Impl::loadLiquidityPool(LedgerKey const& key) const -{ - auto poolAsset = getPrimaryKey(key.liquidityPool().liquidityPoolID); - - std::string liquidityPoolEntryStr; - - std::string sql = "SELECT ledgerentry " - "FROM liquiditypool " - "WHERE poolasset= :poolasset"; - auto prep = mApp.getDatabase().getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::into(liquidityPoolEntryStr)); - st.exchange(soci::use(poolAsset)); - st.define_and_bind(); - { - auto timer = mApp.getDatabase().getSelectTimer("liquiditypool"); - st.execute(true); - } - if (!st.got_data()) - { - return nullptr; - } - - LedgerEntry le; - fromOpaqueBase64(le, liquidityPoolEntryStr); - throwIfNotLiquidityPool(le.data.type()); - - return std::make_shared(std::move(le)); -} - -class BulkLoadLiquidityPoolOperation - : public DatabaseTypeSpecificOperation> -{ - Database& mDb; - std::vector mPoolAssets; - - std::vector - executeAndFetch(soci::statement& st) - { - std::string liquidityPoolEntryStr; - - st.exchange(soci::into(liquidityPoolEntryStr)); - st.define_and_bind(); - { - auto timer = mDb.getSelectTimer("liquiditypool"); - st.execute(true); - } - - std::vector res; - while (st.got_data()) - { - res.emplace_back(); - auto& le = res.back(); - - fromOpaqueBase64(le, liquidityPoolEntryStr); - throwIfNotLiquidityPool(le.data.type()); - - st.fetch(); - } - return res; - } - - public: - BulkLoadLiquidityPoolOperation(Database& db, - UnorderedSet const& keys) - : mDb(db) - { - mPoolAssets.reserve(keys.size()); - for (auto const& k : keys) - { - throwIfNotLiquidityPool(k.type()); - mPoolAssets.emplace_back( - getPrimaryKey(k.liquidityPool().liquidityPoolID)); - } - } - - std::vector - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - std::vector cstrPoolAssets; - cstrPoolAssets.reserve(mPoolAssets.size()); - for (size_t i = 0; i < mPoolAssets.size(); ++i) - { - cstrPoolAssets.emplace_back(mPoolAssets[i].c_str()); - } - - std::string sql = "WITH r AS (SELECT value FROM carray(?, ?, 'char*')) " - "SELECT ledgerentry " - "FROM liquiditypool " - "WHERE poolasset IN r"; - - auto prep = mDb.getPreparedStatement(sql); - auto be = prep.statement().get_backend(); - if (be == nullptr) - { - throw std::runtime_error("no sql backend"); - } - auto sqliteStatement = - dynamic_cast(be); - releaseAssertOrThrow(sqliteStatement); - auto st = sqliteStatement->stmt_; - - sqlite3_reset(st); - sqlite3_bind_pointer(st, 1, cstrPoolAssets.data(), "carray", 0); - sqlite3_bind_int(st, 2, static_cast(cstrPoolAssets.size())); - return executeAndFetch(prep.statement()); - } - -#ifdef USE_POSTGRES - std::vector - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strPoolAssets; - marshalToPGArray(pg->conn_, strPoolAssets, mPoolAssets); - - std::string sql = "WITH r AS (SELECT unnest(:v1::TEXT[])) " - "SELECT ledgerentry " - "FROM liquiditypool " - "WHERE poolasset IN (SELECT * from r)"; - - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(strPoolAssets)); - return executeAndFetch(st); - } -#endif -}; - -UnorderedMap> -LedgerTxnRoot::Impl::bulkLoadLiquidityPool( - UnorderedSet const& keys) const -{ - if (!keys.empty()) - { - BulkLoadLiquidityPoolOperation op(mApp.getDatabase(), keys); - return populateLoadedEntries( - keys, mApp.getDatabase().doDatabaseTypeSpecificOperation(op)); - } - else - { - return {}; - } -} - -class BulkDeleteLiquidityPoolOperation - : public DatabaseTypeSpecificOperation -{ - Database& mDb; - LedgerTxnConsistency mCons; - std::vector mPoolAssets; - - public: - BulkDeleteLiquidityPoolOperation(Database& db, LedgerTxnConsistency cons, - std::vector const& entries) - : mDb(db), mCons(cons) - { - mPoolAssets.reserve(entries.size()); - for (auto const& e : entries) - { - releaseAssert(!e.entryExists()); - throwIfNotLiquidityPool(e.key().ledgerKey().type()); - mPoolAssets.emplace_back(getPrimaryKey( - e.key().ledgerKey().liquidityPool().liquidityPoolID)); - } - } - - void - doSociGenericOperation() - { - std::string sql = "DELETE FROM liquiditypool WHERE poolasset = :id"; - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(mPoolAssets)); - st.define_and_bind(); - { - auto timer = mDb.getDeleteTimer("liquiditypool"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mPoolAssets.size() && - mCons == LedgerTxnConsistency::EXACT) - { - throw std::runtime_error("Could not update data in SQL"); - } - } - - void - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - doSociGenericOperation(); - } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strPoolAssets; - marshalToPGArray(pg->conn_, strPoolAssets, mPoolAssets); - - std::string sql = "WITH r AS (SELECT unnest(:v1::TEXT[])) " - "DELETE FROM liquiditypool " - "WHERE poolasset IN (SELECT * FROM r)"; - - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(strPoolAssets)); - st.define_and_bind(); - { - auto timer = mDb.getDeleteTimer("liquiditypool"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mPoolAssets.size() && - mCons == LedgerTxnConsistency::EXACT) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif -}; - -void -LedgerTxnRoot::Impl::bulkDeleteLiquidityPool( - std::vector const& entries, LedgerTxnConsistency cons) -{ - BulkDeleteLiquidityPoolOperation op(mApp.getDatabase(), cons, entries); - mApp.getDatabase().doDatabaseTypeSpecificOperation(op); -} - -class BulkUpsertLiquidityPoolOperation - : public DatabaseTypeSpecificOperation -{ - Database& mDb; - std::vector mPoolAssets; - std::vector mAssetAs; - std::vector mAssetBs; - std::vector mLiquidityPoolEntries; - std::vector mLastModifieds; - - void - accumulateEntry(LedgerEntry const& entry) - { - throwIfNotLiquidityPool(entry.data.type()); - - auto const& lp = entry.data.liquidityPool(); - auto const& cp = lp.body.constantProduct(); - mPoolAssets.emplace_back(getPrimaryKey(lp.liquidityPoolID)); - mAssetAs.emplace_back(toOpaqueBase64(cp.params.assetA)); - mAssetBs.emplace_back(toOpaqueBase64(cp.params.assetB)); - mLiquidityPoolEntries.emplace_back(toOpaqueBase64(entry)); - mLastModifieds.emplace_back( - unsignedToSigned(entry.lastModifiedLedgerSeq)); - } - - public: - BulkUpsertLiquidityPoolOperation( - Database& Db, std::vector const& entryIter) - : mDb(Db) - { - for (auto const& e : entryIter) - { - releaseAssert(e.entryExists()); - accumulateEntry(e.entry().ledgerEntry()); - } - } - - void - doSociGenericOperation() - { - std::string sql = - "INSERT INTO liquiditypool " - "(poolasset, asseta, assetb, ledgerentry, lastmodified) " - "VALUES " - "( :id, :v1, :v2, :v3, :v4 ) " - "ON CONFLICT (poolasset) DO UPDATE SET " - "asseta = excluded.asseta, " - "assetb = excluded.assetb, " - "ledgerentry = excluded.ledgerentry, " - "lastmodified = excluded.lastmodified"; - - auto prep = mDb.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(mPoolAssets)); - st.exchange(soci::use(mAssetAs)); - st.exchange(soci::use(mAssetBs)); - st.exchange(soci::use(mLiquidityPoolEntries)); - st.exchange(soci::use(mLastModifieds)); - st.define_and_bind(); - { - auto timer = mDb.getUpsertTimer("liquiditypool"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mPoolAssets.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } - - void - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - doSociGenericOperation(); - } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strPoolAssets, strAssetAs, strAssetBs, - strLiquidityPoolEntry, strLastModifieds; - - PGconn* conn = pg->conn_; - marshalToPGArray(conn, strPoolAssets, mPoolAssets); - marshalToPGArray(conn, strAssetAs, mAssetAs); - marshalToPGArray(conn, strAssetBs, mAssetBs); - marshalToPGArray(conn, strLiquidityPoolEntry, mLiquidityPoolEntries); - marshalToPGArray(conn, strLastModifieds, mLastModifieds); - - std::string sql = - "WITH r AS " - "(SELECT unnest(:ids::TEXT[]), unnest(:v1::TEXT[]), " - "unnest(:v2::TEXT[]), unnest(:v3::TEXT[]), " - "unnest(:v4::INT[])) " - "INSERT INTO liquiditypool " - "(poolasset, asseta, assetb, ledgerentry, lastmodified) " - "SELECT * FROM r " - "ON CONFLICT (poolasset) DO UPDATE SET " - "asseta = excluded.asseta, " - "assetb = excluded.assetb, " - "ledgerentry = excluded.ledgerentry, " - "lastmodified = excluded.lastmodified"; - - auto prep = mDb.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(strPoolAssets)); - st.exchange(soci::use(strAssetAs)); - st.exchange(soci::use(strAssetBs)); - st.exchange(soci::use(strLiquidityPoolEntry)); - st.exchange(soci::use(strLastModifieds)); - st.define_and_bind(); - { - auto timer = mDb.getUpsertTimer("liquiditypool"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mPoolAssets.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif -}; - -void -LedgerTxnRoot::Impl::bulkUpsertLiquidityPool( - std::vector const& entries) -{ - BulkUpsertLiquidityPoolOperation op(mApp.getDatabase(), entries); - mApp.getDatabase().doDatabaseTypeSpecificOperation(op); -} - -void -LedgerTxnRoot::Impl::dropLiquidityPools(bool rebuild) -{ - throwIfChild(); - mEntryCache.clear(); - mBestOffers.clear(); - - mApp.getDatabase().getSession() << "DROP TABLE IF EXISTS liquiditypool;"; - - if (rebuild) - { - std::string coll = mApp.getDatabase().getSimpleCollationClause(); - // The primary key is poolasset (the base-64 opaque TrustLineAsset - // containing the PoolID) instead of poolid (the base-64 opaque PoolID) - // so that we can perform the join in load pool share trust lines by - // account and asset. - mApp.getDatabase().getSession() - << "CREATE TABLE liquiditypool (" - << "poolasset TEXT " << coll << " PRIMARY KEY, " - << "asseta TEXT " << coll << " NOT NULL, " - << "assetb TEXT " << coll << " NOT NULL, " - << "ledgerentry TEXT NOT NULL, " - << "lastmodified INT NOT NULL);"; - mApp.getDatabase().getSession() << "CREATE INDEX liquiditypoolasseta " - << "ON liquiditypool(asseta);"; - mApp.getDatabase().getSession() << "CREATE INDEX liquiditypoolassetb " - << "ON liquiditypool(assetb);"; - } -} -} diff --git a/src/ledger/LedgerTxnOfferSQL.cpp b/src/ledger/LedgerTxnOfferSQL.cpp index c70e86cb6e..4cf1b23bc2 100644 --- a/src/ledger/LedgerTxnOfferSQL.cpp +++ b/src/ledger/LedgerTxnOfferSQL.cpp @@ -652,7 +652,7 @@ LedgerTxnRoot::Impl::bulkDeleteOffers(std::vector const& entries, } void -LedgerTxnRoot::Impl::dropOffers(bool rebuild) +LedgerTxnRoot::Impl::dropOffers() { throwIfChild(); mEntryCache.clear(); @@ -660,44 +660,39 @@ LedgerTxnRoot::Impl::dropOffers(bool rebuild) mApp.getDatabase().getSession() << "DROP TABLE IF EXISTS offers;"; - if (rebuild) + std::string coll = mApp.getDatabase().getSimpleCollationClause(); + mApp.getDatabase().getSession() + << "CREATE TABLE offers" + << "(" + << "sellerid VARCHAR(56) " << coll << "NOT NULL," + << "offerid BIGINT NOT NULL CHECK (offerid >= " + "0)," + << "sellingasset TEXT " << coll << " NOT NULL," + << "buyingasset TEXT " << coll << " NOT NULL," + << "amount BIGINT NOT NULL CHECK (amount >= 0)," + "pricen INT NOT NULL," + "priced INT NOT NULL," + "price DOUBLE PRECISION NOT NULL," + "flags INT NOT NULL," + "lastmodified INT NOT NULL," + "extension TEXT NOT NULL," + "ledgerext TEXT NOT NULL," + "PRIMARY KEY (offerid)" + ");"; + mApp.getDatabase().getSession() + << "CREATE INDEX bestofferindex ON offers " + "(sellingasset,buyingasset,price,offerid);"; + mApp.getDatabase().getSession() << "CREATE INDEX offerbyseller ON offers " + "(sellerid);"; + if (!mApp.getDatabase().isSqlite()) { - std::string coll = mApp.getDatabase().getSimpleCollationClause(); - mApp.getDatabase().getSession() - << "CREATE TABLE offers" - << "(" - << "sellerid VARCHAR(56) " << coll << "NOT NULL," - << "offerid BIGINT NOT NULL CHECK (offerid >= " - "0)," - << "sellingasset TEXT " << coll << " NOT NULL," - << "buyingasset TEXT " << coll << " NOT NULL," - << "amount BIGINT NOT NULL CHECK (amount >= 0)," - "pricen INT NOT NULL," - "priced INT NOT NULL," - "price DOUBLE PRECISION NOT NULL," - "flags INT NOT NULL," - "lastmodified INT NOT NULL," - "extension TEXT NOT NULL," - "ledgerext TEXT NOT NULL," - "PRIMARY KEY (offerid)" - ");"; - mApp.getDatabase().getSession() - << "CREATE INDEX bestofferindex ON offers " - "(sellingasset,buyingasset,price,offerid);"; - mApp.getDatabase().getSession() - << "CREATE INDEX offerbyseller ON offers " - "(sellerid);"; - if (!mApp.getDatabase().isSqlite()) - { - mApp.getDatabase().getSession() - << "ALTER TABLE offers " - << "ALTER COLUMN sellerid " - << "TYPE VARCHAR(56) COLLATE \"C\", " - << "ALTER COLUMN buyingasset " - << "TYPE TEXT COLLATE \"C\", " - << "ALTER COLUMN sellingasset " - << "TYPE TEXT COLLATE \"C\""; - } + mApp.getDatabase().getSession() << "ALTER TABLE offers " + << "ALTER COLUMN sellerid " + << "TYPE VARCHAR(56) COLLATE \"C\", " + << "ALTER COLUMN buyingasset " + << "TYPE TEXT COLLATE \"C\", " + << "ALTER COLUMN sellingasset " + << "TYPE TEXT COLLATE \"C\""; } } diff --git a/src/ledger/LedgerTxnTTLSQL.cpp b/src/ledger/LedgerTxnTTLSQL.cpp deleted file mode 100644 index 1773bca835..0000000000 --- a/src/ledger/LedgerTxnTTLSQL.cpp +++ /dev/null @@ -1,382 +0,0 @@ - -// Copyright 2023 Stellar Development Foundation and contributors. Licensed -// under the Apache License, Version 2.0. See the COPYING file at the root -// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 - -#include "ledger/LedgerTxnImpl.h" -#include "ledger/LedgerTypeUtils.h" -#include "ledger/NonSociRelatedException.h" -#include "main/Application.h" -#include "util/GlobalChecks.h" -#include "util/types.h" - -namespace stellar -{ - -static void -throwIfNotTTL(LedgerEntryType type) -{ - if (type != TTL) - { - throw NonSociRelatedException("LedgerEntry is not TTL"); - } -} - -std::shared_ptr -LedgerTxnRoot::Impl::loadTTL(LedgerKey const& key) const -{ - auto keyHash = toOpaqueBase64(key.ttl().keyHash); - std::string ttlEntryStr; - - std::string sql = "SELECT ledgerentry " - "FROM ttl " - "WHERE keyhash = :keyHash"; - auto prep = mApp.getDatabase().getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::into(ttlEntryStr)); - st.exchange(soci::use(keyHash)); - st.define_and_bind(); - { - auto timer = mApp.getDatabase().getSelectTimer("ttl"); - st.execute(true); - } - if (!st.got_data()) - { - return nullptr; - } - - LedgerEntry le; - fromOpaqueBase64(le, ttlEntryStr); - throwIfNotTTL(le.data.type()); - - return std::make_shared(std::move(le)); -} -class BulkLoadTTLOperation - : public DatabaseTypeSpecificOperation> -{ - Database& mDb; - std::vector mKeyHashes; - - std::vector - executeAndFetch(soci::statement& st) - { - std::string ttlEntryStr; - - st.exchange(soci::into(ttlEntryStr)); - st.define_and_bind(); - { - auto timer = mDb.getSelectTimer("ttl"); - st.execute(true); - } - - std::vector res; - while (st.got_data()) - { - res.emplace_back(); - auto& le = res.back(); - - fromOpaqueBase64(le, ttlEntryStr); - throwIfNotTTL(le.data.type()); - - st.fetch(); - } - return res; - } - - public: - BulkLoadTTLOperation(Database& db, UnorderedSet const& keys) - : mDb(db) - { - mKeyHashes.reserve(keys.size()); - for (auto const& k : keys) - { - throwIfNotTTL(k.type()); - mKeyHashes.emplace_back(toOpaqueBase64(k.ttl().keyHash)); - } - } - - std::vector - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - std::vector cStrKeyHashes; - cStrKeyHashes.reserve(mKeyHashes.size()); - for (auto const& h : mKeyHashes) - { - cStrKeyHashes.emplace_back(h.c_str()); - } - std::string sql = "SELECT ledgerentry " - "FROM ttl " - "WHERE keyhash IN carray(?, ?, 'char*')"; - - auto prep = mDb.getPreparedStatement(sql); - auto be = prep.statement().get_backend(); - if (be == nullptr) - { - throw std::runtime_error("no sql backend"); - } - auto sqliteStatement = - dynamic_cast(be); - releaseAssertOrThrow(sqliteStatement); - auto st = sqliteStatement->stmt_; - - sqlite3_reset(st); - sqlite3_bind_pointer(st, 1, (void*)cStrKeyHashes.data(), "carray", 0); - sqlite3_bind_int(st, 2, static_cast(cStrKeyHashes.size())); - return executeAndFetch(prep.statement()); - } - -#ifdef USE_POSTGRES - std::vector - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strKeyHashes; - marshalToPGArray(pg->conn_, strKeyHashes, mKeyHashes); - - std::string sql = "WITH r AS (SELECT unnest(:v1::TEXT[])) " - "SELECT ledgerentry " - "FROM ttl " - "WHERE (keyHash) IN (SELECT * from r)"; - - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(strKeyHashes)); - return executeAndFetch(st); - } -#endif -}; - -UnorderedMap> -LedgerTxnRoot::Impl::bulkLoadTTL(UnorderedSet const& keys) const -{ - if (!keys.empty()) - { - BulkLoadTTLOperation op(mApp.getDatabase(), keys); - return populateLoadedEntries( - keys, mApp.getDatabase().doDatabaseTypeSpecificOperation(op)); - } - else - { - return {}; - } -} - -class BulkDeleteTTLOperation : public DatabaseTypeSpecificOperation -{ - Database& mDb; - LedgerTxnConsistency mCons; - std::vector mKeyHashes; - - public: - BulkDeleteTTLOperation(Database& db, LedgerTxnConsistency cons, - std::vector const& entries) - : mDb(db), mCons(cons) - { - mKeyHashes.reserve(entries.size()); - for (auto const& e : entries) - { - releaseAssertOrThrow(!e.entryExists()); - throwIfNotTTL(e.key().ledgerKey().type()); - mKeyHashes.emplace_back( - toOpaqueBase64(e.key().ledgerKey().ttl().keyHash)); - } - } - - void - doSociGenericOperation() - { - std::string sql = "DELETE FROM ttl WHERE keyhash = :id"; - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(mKeyHashes)); - st.define_and_bind(); - { - auto timer = mDb.getDeleteTimer("ttl"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mKeyHashes.size() && - mCons == LedgerTxnConsistency::EXACT) - { - throw std::runtime_error("Could not update data in SQL"); - } - } - - void - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - doSociGenericOperation(); - } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strKeyHashes; - marshalToPGArray(pg->conn_, strKeyHashes, mKeyHashes); - - std::string sql = "WITH r AS (SELECT unnest(:v1::TEXT[])) " - "DELETE FROM ttl " - "WHERE keyHash IN (SELECT * FROM r)"; - - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(strKeyHashes)); - st.define_and_bind(); - { - auto timer = mDb.getDeleteTimer("ttl"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mKeyHashes.size() && - mCons == LedgerTxnConsistency::EXACT) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif -}; - -void -LedgerTxnRoot::Impl::bulkDeleteTTL(std::vector const& entries, - LedgerTxnConsistency cons) -{ - BulkDeleteTTLOperation op(mApp.getDatabase(), cons, entries); - mApp.getDatabase().doDatabaseTypeSpecificOperation(op); -} - -class BulkUpsertTTLOperation : public DatabaseTypeSpecificOperation -{ - Database& mDb; - std::vector mKeyHashes; - std::vector mTTLEntries; - std::vector mLastModifieds; - - void - accumulateEntry(LedgerEntry const& entry) - { - throwIfNotTTL(entry.data.type()); - - mKeyHashes.emplace_back(toOpaqueBase64(entry.data.ttl().keyHash)); - mTTLEntries.emplace_back(toOpaqueBase64(entry)); - mLastModifieds.emplace_back( - unsignedToSigned(entry.lastModifiedLedgerSeq)); - } - - public: - BulkUpsertTTLOperation(Database& Db, - std::vector const& entryIter) - : mDb(Db) - { - for (auto const& e : entryIter) - { - releaseAssert(e.entryExists()); - accumulateEntry(e.entry().ledgerEntry()); - } - } - - void - doSociGenericOperation() - { - std::string sql = "INSERT INTO ttl " - "(keyhash, ledgerentry, lastmodified) " - "VALUES " - "( :keyHash, :v1, :v2 ) " - "ON CONFLICT (keyhash) DO UPDATE SET " - "ledgerentry = excluded.ledgerentry, " - "lastmodified = excluded.lastmodified"; - - auto prep = mDb.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(mKeyHashes)); - st.exchange(soci::use(mTTLEntries)); - st.exchange(soci::use(mLastModifieds)); - st.define_and_bind(); - { - auto timer = mDb.getUpsertTimer("ttl"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mKeyHashes.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } - - void - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - doSociGenericOperation(); - } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strKeyHashes, strTTLEntries, strLastModifieds; - - PGconn* conn = pg->conn_; - marshalToPGArray(conn, strKeyHashes, mKeyHashes); - marshalToPGArray(conn, strTTLEntries, mTTLEntries); - marshalToPGArray(conn, strLastModifieds, mLastModifieds); - - std::string sql = "WITH r AS " - "(SELECT unnest(:v1::TEXT[]), " - "unnest(:v2::TEXT[]), unnest(:v3::INT[])) " - "INSERT INTO ttl " - "(keyHash, ledgerentry, lastmodified) " - "SELECT * FROM r " - "ON CONFLICT (keyhash) DO UPDATE SET " - "ledgerentry = excluded.ledgerentry, " - "lastmodified = excluded.lastmodified"; - - auto prep = mDb.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(strKeyHashes)); - st.exchange(soci::use(strTTLEntries)); - st.exchange(soci::use(strLastModifieds)); - st.define_and_bind(); - { - auto timer = mDb.getUpsertTimer("ttl"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mKeyHashes.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif -}; - -void -LedgerTxnRoot::Impl::bulkUpsertTTL(std::vector const& entries) -{ - BulkUpsertTTLOperation op(mApp.getDatabase(), entries); - mApp.getDatabase().doDatabaseTypeSpecificOperation(op); -} - -void -LedgerTxnRoot::Impl::dropTTL(bool rebuild) -{ - throwIfChild(); - mEntryCache.clear(); - mBestOffers.clear(); - - std::string coll = mApp.getDatabase().getSimpleCollationClause(); - - mApp.getDatabase().getSession() << "DROP TABLE IF EXISTS ttl;"; - - if (rebuild) - { - mApp.getDatabase().getSession() - << "CREATE TABLE ttl (" - << "keyhash TEXT " << coll << " NOT NULL, " - << "ledgerentry TEXT " << coll << " NOT NULL, " - << "lastmodified INT NOT NULL, " - << "PRIMARY KEY (keyhash));"; - if (!mApp.getDatabase().isSqlite()) - { - mApp.getDatabase().getSession() << "ALTER TABLE ttl " - << "ALTER COLUMN keyhash " - << "TYPE TEXT COLLATE \"C\";"; - } - } -} - -} \ No newline at end of file diff --git a/src/ledger/LedgerTxnTrustLineSQL.cpp b/src/ledger/LedgerTxnTrustLineSQL.cpp deleted file mode 100644 index 5481bc5185..0000000000 --- a/src/ledger/LedgerTxnTrustLineSQL.cpp +++ /dev/null @@ -1,522 +0,0 @@ -// Copyright 2017 Stellar Development Foundation and contributors. Licensed -// under the Apache License, Version 2.0. See the COPYING file at the root -// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 - -#include "crypto/KeyUtils.h" -#include "crypto/SecretKey.h" -#include "database/Database.h" -#include "database/DatabaseTypeSpecificOperation.h" -#include "ledger/LedgerTxnImpl.h" -#include "ledger/LedgerTypeUtils.h" -#include "ledger/NonSociRelatedException.h" -#include "main/Application.h" -#include "util/GlobalChecks.h" -#include "util/Logging.h" -#include "util/XDROperators.h" -#include "util/types.h" -#include - -namespace stellar -{ - -void -validateTrustLineKey(uint32_t ledgerVersion, LedgerKey const& key) -{ - auto const& asset = key.trustLine().asset; - - if (!isAssetValid(asset, ledgerVersion)) - { - throw NonSociRelatedException("TrustLine asset is invalid"); - } - else if (asset.type() == ASSET_TYPE_NATIVE) - { - throw NonSociRelatedException("XLM TrustLine?"); - } - else if (isIssuer(key.trustLine().accountID, asset)) - { - throw NonSociRelatedException("TrustLine accountID is issuer"); - } -} - -std::shared_ptr -LedgerTxnRoot::Impl::loadTrustLine(LedgerKey const& key) const -{ - ZoneScoped; - - validateTrustLineKey(mHeader->ledgerVersion, key); - - std::string accountIDStr = KeyUtils::toStrKey(key.trustLine().accountID); - auto asset = toOpaqueBase64(key.trustLine().asset); - - std::string trustLineEntryStr; - - auto prep = mApp.getDatabase().getPreparedStatement( - "SELECT ledgerentry " - " FROM trustlines " - "WHERE accountid= :id AND asset= :asset"); - auto& st = prep.statement(); - st.exchange(soci::into(trustLineEntryStr)); - st.exchange(soci::use(accountIDStr)); - st.exchange(soci::use(asset)); - st.define_and_bind(); - { - auto timer = mApp.getDatabase().getSelectTimer("trust"); - st.execute(true); - } - if (!st.got_data()) - { - return nullptr; - } - - LedgerEntry le; - fromOpaqueBase64(le, trustLineEntryStr); - if (le.data.type() != TRUSTLINE) - { - throw NonSociRelatedException("Loaded non-trustline entry"); - } - - return std::make_shared(std::move(le)); -} - -std::vector -LedgerTxnRoot::Impl::loadPoolShareTrustLinesByAccountAndAsset( - AccountID const& accountID, Asset const& asset) const -{ - ZoneScoped; - - std::string accountIDStr = KeyUtils::toStrKey(accountID); - auto assetStr = toOpaqueBase64(asset); - - std::string trustLineEntryStr; - - auto prep = mApp.getDatabase().getPreparedStatement( - "SELECT trustlines.ledgerentry " - "FROM trustlines " - "INNER JOIN liquiditypool " - "ON trustlines.asset = liquiditypool.poolasset " - "AND trustlines.accountid = :v1 " - "AND (liquiditypool.asseta = :v2 OR liquiditypool.assetb = :v3)"); - auto& st = prep.statement(); - st.exchange(soci::into(trustLineEntryStr)); - st.exchange(soci::use(accountIDStr)); - st.exchange(soci::use(assetStr)); - st.exchange(soci::use(assetStr)); - st.define_and_bind(); - { - auto timer = mApp.getDatabase().getSelectTimer("trust"); - st.execute(true); - } - - std::vector trustLines; - while (st.got_data()) - { - trustLines.emplace_back(); - fromOpaqueBase64(trustLines.back(), trustLineEntryStr); - if (trustLines.back().data.type() != TRUSTLINE) - { - throw NonSociRelatedException("Loaded non-trustline entry"); - } - st.fetch(); - } - return trustLines; -} - -class BulkUpsertTrustLinesOperation : public DatabaseTypeSpecificOperation -{ - Database& mDB; - std::vector mAccountIDs; - std::vector mAssets; - std::vector mTrustLineEntries; - std::vector mLastModifieds; - - public: - BulkUpsertTrustLinesOperation(Database& DB, - std::vector const& entries, - uint32_t ledgerVersion) - : mDB(DB) - { - mAccountIDs.reserve(entries.size()); - mAssets.reserve(entries.size()); - mTrustLineEntries.reserve(entries.size()); - mLastModifieds.reserve(entries.size()); - - for (auto const& e : entries) - { - releaseAssert(e.entryExists()); - releaseAssert(e.entry().type() == - InternalLedgerEntryType::LEDGER_ENTRY); - auto const& le = e.entry().ledgerEntry(); - releaseAssert(le.data.type() == TRUSTLINE); - - auto const& tl = le.data.trustLine(); - - validateTrustLineKey(ledgerVersion, e.key().ledgerKey()); - - mAccountIDs.emplace_back(KeyUtils::toStrKey(tl.accountID)); - mAssets.emplace_back(toOpaqueBase64(tl.asset)); - mTrustLineEntries.emplace_back(toOpaqueBase64(le)); - mLastModifieds.emplace_back( - unsignedToSigned(le.lastModifiedLedgerSeq)); - } - } - - void - doSociGenericOperation() - { - std::string sql = "INSERT INTO trustlines ( " - "accountid, asset, ledgerentry, lastmodified)" - "VALUES ( " - ":id, :v1, :v2, :v3 " - ") ON CONFLICT (accountid, asset) DO UPDATE SET " - "ledgerentry = excluded.ledgerentry, " - "lastmodified = excluded.lastmodified"; - auto prep = mDB.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(mAccountIDs)); - st.exchange(soci::use(mAssets)); - st.exchange(soci::use(mTrustLineEntries)); - st.exchange(soci::use(mLastModifieds)); - st.define_and_bind(); - { - auto timer = mDB.getUpsertTimer("trustline"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mAccountIDs.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } - - void - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - doSociGenericOperation(); - } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - PGconn* conn = pg->conn_; - - std::string strAccountIDs, strAssets, strTrustLineEntries, - strLastModifieds; - - marshalToPGArray(conn, strAccountIDs, mAccountIDs); - marshalToPGArray(conn, strAssets, mAssets); - marshalToPGArray(conn, strTrustLineEntries, mTrustLineEntries); - marshalToPGArray(conn, strLastModifieds, mLastModifieds); - - std::string sql = "WITH r AS (SELECT " - "unnest(:ids::TEXT[]), " - "unnest(:v1::TEXT[]), " - "unnest(:v2::TEXT[]), " - "unnest(:v3::INT[])) " - "INSERT INTO trustlines ( " - "accountid, asset, ledgerEntry, lastmodified" - ") SELECT * from r " - "ON CONFLICT (accountid, asset) DO UPDATE SET " - "ledgerentry = excluded.ledgerentry, " - "lastmodified = excluded.lastmodified"; - auto prep = mDB.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(strAccountIDs)); - st.exchange(soci::use(strAssets)); - st.exchange(soci::use(strTrustLineEntries)); - st.exchange(soci::use(strLastModifieds)); - st.define_and_bind(); - { - auto timer = mDB.getUpsertTimer("trustline"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mAccountIDs.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif -}; - -class BulkDeleteTrustLinesOperation : public DatabaseTypeSpecificOperation -{ - Database& mDB; - LedgerTxnConsistency mCons; - std::vector mAccountIDs; - std::vector mAssets; - - public: - BulkDeleteTrustLinesOperation(Database& DB, LedgerTxnConsistency cons, - std::vector const& entries, - uint32_t ledgerVersion) - : mDB(DB), mCons(cons) - { - mAccountIDs.reserve(entries.size()); - mAssets.reserve(entries.size()); - for (auto const& e : entries) - { - releaseAssert(!e.entryExists()); - releaseAssert(e.key().type() == - InternalLedgerEntryType::LEDGER_ENTRY); - releaseAssert(e.key().ledgerKey().type() == TRUSTLINE); - auto const& tl = e.key().ledgerKey().trustLine(); - - validateTrustLineKey(ledgerVersion, e.key().ledgerKey()); - - mAccountIDs.emplace_back(KeyUtils::toStrKey(tl.accountID)); - mAssets.emplace_back(toOpaqueBase64(tl.asset)); - } - } - - void - doSociGenericOperation() - { - std::string sql = "DELETE FROM trustlines WHERE accountid = :id " - "AND asset = :v1"; - auto prep = mDB.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(mAccountIDs)); - st.exchange(soci::use(mAssets)); - st.define_and_bind(); - { - auto timer = mDB.getDeleteTimer("trustline"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mAccountIDs.size() && - mCons == LedgerTxnConsistency::EXACT) - { - throw std::runtime_error("Could not update data in SQL"); - } - } - - void - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - doSociGenericOperation(); - } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strAccountIDs, strAssets; - PGconn* conn = pg->conn_; - marshalToPGArray(conn, strAccountIDs, mAccountIDs); - marshalToPGArray(conn, strAssets, mAssets); - std::string sql = "WITH r AS (SELECT " - "unnest(:ids::TEXT[]), " - "unnest(:v1::TEXT[])" - ") " - "DELETE FROM trustlines WHERE " - "(accountid, asset) IN (SELECT * FROM r)"; - auto prep = mDB.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(strAccountIDs)); - st.exchange(soci::use(strAssets)); - st.define_and_bind(); - { - auto timer = mDB.getDeleteTimer("trustline"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mAccountIDs.size() && - mCons == LedgerTxnConsistency::EXACT) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif -}; - -void -LedgerTxnRoot::Impl::bulkUpsertTrustLines( - std::vector const& entries) -{ - ZoneScoped; - ZoneValue(static_cast(entries.size())); - BulkUpsertTrustLinesOperation op(mApp.getDatabase(), entries, - mHeader->ledgerVersion); - mApp.getDatabase().doDatabaseTypeSpecificOperation(op); -} - -void -LedgerTxnRoot::Impl::bulkDeleteTrustLines( - std::vector const& entries, LedgerTxnConsistency cons) -{ - ZoneScoped; - ZoneValue(static_cast(entries.size())); - BulkDeleteTrustLinesOperation op(mApp.getDatabase(), cons, entries, - mHeader->ledgerVersion); - mApp.getDatabase().doDatabaseTypeSpecificOperation(op); -} - -void -LedgerTxnRoot::Impl::dropTrustLines(bool rebuild) -{ - throwIfChild(); - mEntryCache.clear(); - mBestOffers.clear(); - - mApp.getDatabase().getSession() << "DROP TABLE IF EXISTS trustlines;"; - - if (rebuild) - { - std::string coll = mApp.getDatabase().getSimpleCollationClause(); - mApp.getDatabase().getSession() - << "CREATE TABLE trustlines" - << "(" - << "accountid VARCHAR(56) " << coll << " NOT NULL," - << "asset TEXT " << coll << " NOT NULL," - << "ledgerentry TEXT NOT NULL," - << "lastmodified INT NOT NULL," - << "PRIMARY KEY (accountid, asset));"; - } -} - -class BulkLoadTrustLinesOperation - : public DatabaseTypeSpecificOperation> -{ - Database& mDb; - std::vector mAccountIDs; - std::vector mAssets; - - std::vector - executeAndFetch(soci::statement& st) - { - std::string accountID, asset, trustLineEntryStr; - - st.exchange(soci::into(accountID)); - st.exchange(soci::into(asset)); - st.exchange(soci::into(trustLineEntryStr)); - st.define_and_bind(); - { - auto timer = mDb.getSelectTimer("trust"); - st.execute(true); - } - - std::vector res; - while (st.got_data()) - { - res.emplace_back(); - auto& le = res.back(); - - fromOpaqueBase64(le, trustLineEntryStr); - releaseAssert(le.data.type() == TRUSTLINE); - releaseAssert(le.data.trustLine().asset.type() != - ASSET_TYPE_NATIVE); - - st.fetch(); - } - return res; - } - - public: - BulkLoadTrustLinesOperation(Database& db, - UnorderedSet const& keys) - : mDb(db) - { - mAccountIDs.reserve(keys.size()); - mAssets.reserve(keys.size()); - - for (auto const& k : keys) - { - releaseAssert(k.type() == TRUSTLINE); - if (k.trustLine().asset.type() == ASSET_TYPE_NATIVE) - { - throw NonSociRelatedException( - "TrustLine asset can't be native"); - } - - mAccountIDs.emplace_back( - KeyUtils::toStrKey(k.trustLine().accountID)); - mAssets.emplace_back(toOpaqueBase64(k.trustLine().asset)); - } - } - - virtual std::vector - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - releaseAssert(mAccountIDs.size() == mAssets.size()); - - std::vector cstrAccountIDs; - std::vector cstrAssets; - cstrAccountIDs.reserve(mAccountIDs.size()); - cstrAssets.reserve(mAssets.size()); - for (size_t i = 0; i < mAccountIDs.size(); ++i) - { - cstrAccountIDs.emplace_back(mAccountIDs[i].c_str()); - cstrAssets.emplace_back(mAssets[i].c_str()); - } - - std::string sqlJoin = "SELECT x.value, y.value FROM " - "(SELECT rowid, value FROM carray(?, ?, " - "'char*') ORDER BY rowid) " - "AS x " - "INNER JOIN (SELECT rowid, value FROM " - "carray(?, ?, 'char*') ORDER " - "BY rowid) AS y ON x.rowid = y.rowid "; - std::string sql = "WITH r AS (" + sqlJoin + - ") SELECT accountid, asset, ledgerentry " - "FROM trustlines WHERE (accountid, asset) IN r"; - - auto prep = mDb.getPreparedStatement(sql); - auto be = prep.statement().get_backend(); - if (be == nullptr) - { - throw std::runtime_error("no sql backend"); - } - auto sqliteStatement = - dynamic_cast(be); - releaseAssertOrThrow(sqliteStatement); - auto st = sqliteStatement->stmt_; - - sqlite3_reset(st); - sqlite3_bind_pointer(st, 1, cstrAccountIDs.data(), "carray", 0); - sqlite3_bind_int(st, 2, static_cast(cstrAccountIDs.size())); - sqlite3_bind_pointer(st, 3, cstrAssets.data(), "carray", 0); - sqlite3_bind_int(st, 4, static_cast(cstrAssets.size())); - return executeAndFetch(prep.statement()); - } - -#ifdef USE_POSTGRES - virtual std::vector - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - releaseAssert(mAccountIDs.size() == mAssets.size()); - - std::string strAccountIDs; - std::string strAssets; - marshalToPGArray(pg->conn_, strAccountIDs, mAccountIDs); - marshalToPGArray(pg->conn_, strAssets, mAssets); - - auto prep = mDb.getPreparedStatement( - "WITH r AS (SELECT unnest(:v1::TEXT[]), " - "unnest(:v2::TEXT[])) SELECT accountid, asset, " - "ledgerentry " - " FROM trustlines " - "WHERE (accountid, asset) IN (SELECT * " - "FROM r)"); - auto& st = prep.statement(); - st.exchange(soci::use(strAccountIDs)); - st.exchange(soci::use(strAssets)); - return executeAndFetch(st); - } -#endif -}; - -UnorderedMap> -LedgerTxnRoot::Impl::bulkLoadTrustLines( - UnorderedSet const& keys) const -{ - ZoneScoped; - ZoneValue(static_cast(keys.size())); - if (!keys.empty()) - { - BulkLoadTrustLinesOperation op(mApp.getDatabase(), keys); - return populateLoadedEntries( - keys, mApp.getDatabase().doDatabaseTypeSpecificOperation(op)); - } - else - { - return {}; - } -} -} diff --git a/src/ledger/test/InMemoryLedgerTxn.cpp b/src/ledger/test/InMemoryLedgerTxn.cpp index e213ace9ba..beeab8d270 100644 --- a/src/ledger/test/InMemoryLedgerTxn.cpp +++ b/src/ledger/test/InMemoryLedgerTxn.cpp @@ -360,9 +360,9 @@ InMemoryLedgerTxn::getPoolShareTrustLinesByAccountAndAsset( } void -InMemoryLedgerTxn::dropOffers(bool rebuild) +InMemoryLedgerTxn::dropOffers() { - mRealRootForOffers.dropOffers(rebuild); + mRealRootForOffers.dropOffers(); } uint64_t diff --git a/src/ledger/test/InMemoryLedgerTxn.h b/src/ledger/test/InMemoryLedgerTxn.h index 6a14d217fa..7e2f3d9ee7 100644 --- a/src/ledger/test/InMemoryLedgerTxn.h +++ b/src/ledger/test/InMemoryLedgerTxn.h @@ -133,7 +133,7 @@ class InMemoryLedgerTxn : public LedgerTxn getBestOffer(Asset const& buying, Asset const& selling, OfferDescriptor const& worseThan) override; - void dropOffers(bool rebuild) override; + void dropOffers() override; uint64_t countOffers(LedgerRange const& ledgers) const override; void deleteOffersModifiedOnOrAfterLedger(uint32_t ledger) const override; diff --git a/src/ledger/test/InMemoryLedgerTxnRoot.cpp b/src/ledger/test/InMemoryLedgerTxnRoot.cpp index 3d37ba9ae7..4ff9ca435f 100644 --- a/src/ledger/test/InMemoryLedgerTxnRoot.cpp +++ b/src/ledger/test/InMemoryLedgerTxnRoot.cpp @@ -109,52 +109,7 @@ InMemoryLedgerTxnRoot::deleteOffersModifiedOnOrAfterLedger( } void -InMemoryLedgerTxnRoot::dropAccounts(bool) -{ -} - -void -InMemoryLedgerTxnRoot::dropData(bool) -{ -} - -void -InMemoryLedgerTxnRoot::dropOffers(bool) -{ -} - -void -InMemoryLedgerTxnRoot::dropTrustLines(bool) -{ -} - -void -InMemoryLedgerTxnRoot::dropClaimableBalances(bool) -{ -} - -void -InMemoryLedgerTxnRoot::dropLiquidityPools(bool) -{ -} - -void -InMemoryLedgerTxnRoot::dropContractData(bool) -{ -} - -void -InMemoryLedgerTxnRoot::dropContractCode(bool) -{ -} - -void -InMemoryLedgerTxnRoot::dropConfigSettings(bool) -{ -} - -void -InMemoryLedgerTxnRoot::dropTTL(bool) +InMemoryLedgerTxnRoot::dropOffers() { } diff --git a/src/ledger/test/InMemoryLedgerTxnRoot.h b/src/ledger/test/InMemoryLedgerTxnRoot.h index 1fb4fd4617..9b925172aa 100644 --- a/src/ledger/test/InMemoryLedgerTxnRoot.h +++ b/src/ledger/test/InMemoryLedgerTxnRoot.h @@ -68,16 +68,7 @@ class InMemoryLedgerTxnRoot : public AbstractLedgerTxnParent void deleteOffersModifiedOnOrAfterLedger(uint32_t ledger) const override; - void dropAccounts(bool rebuild) override; - void dropData(bool rebuild) override; - void dropOffers(bool rebuild) override; - void dropTrustLines(bool rebuild) override; - void dropClaimableBalances(bool rebuild) override; - void dropLiquidityPools(bool rebuild) override; - void dropContractData(bool rebuild) override; - void dropContractCode(bool rebuild) override; - void dropConfigSettings(bool rebuild) override; - void dropTTL(bool rebuild) override; + void dropOffers() override; double getPrefetchHitRate() const override; uint32_t prefetchClassic(UnorderedSet const& keys) override; uint32_t prefetchSoroban(UnorderedSet const& keys, diff --git a/src/main/ApplicationImpl.cpp b/src/main/ApplicationImpl.cpp index 409c819030..8c9b2beafe 100644 --- a/src/main/ApplicationImpl.cpp +++ b/src/main/ApplicationImpl.cpp @@ -185,8 +185,7 @@ maybeRebuildLedger(Application& app, bool applyBuckets) app.getDatabase().clearPreparedStatementCache(); soci::transaction tx(app.getDatabase().getSession()); LOG_INFO(DEFAULT_LOG, "Dropping offers"); - app.getLedgerTxnRoot().dropOffers(/*rebuild=*/true); - + app.getLedgerTxnRoot().dropOffers(); tx.commit(); // No transaction is needed. ApplyBucketsWork breaks the apply into many @@ -643,12 +642,18 @@ ApplicationImpl::validateAndLogConfig() } auto pageSizeExp = mConfig.BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT; + + // If the page size is less than 256 bytes, it is essentially + // indexing individual keys, so page size should be set to 0 + // instead. + static auto const pageSizeMinExponent = 8; + + // Any exponent above 31 will cause overflow + static auto const pageSizeMaxExponent = 31; + if (pageSizeExp != 0) { - // If the page size is less than 256 bytes, it is essentially - // indexing individual keys, so page size should be set to 0 - // instead. - if (pageSizeExp < 8) + if (pageSizeExp < pageSizeMinExponent) { throw std::invalid_argument( "BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT " @@ -656,8 +661,7 @@ ApplicationImpl::validateAndLogConfig() "indexing"); } - // Check if pageSize will cause overflow - if (pageSizeExp > 31) + if (pageSizeExp > pageSizeMaxExponent) { throw std::invalid_argument( "BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT " diff --git a/src/main/CommandLine.cpp b/src/main/CommandLine.cpp index d3728c5541..0c2a3c717c 100644 --- a/src/main/CommandLine.cpp +++ b/src/main/CommandLine.cpp @@ -1177,9 +1177,8 @@ runNewDB(CommandLineArgs const& args) }; return runWithHelp(args, - { - configurationParser(configOption), - }, + {configurationParser(configOption), + minimalDBParser(minimalForInMemoryMode)}, [&] { auto cfg = configOption.getConfig(); initializeDatabase(cfg); @@ -1486,8 +1485,9 @@ run(CommandLineArgs const& args) args, {configurationParser(configOption), disableBucketGCParser(disableBucketGC), - metadataOutputStreamParser(stream), - waitForConsensusParser(waitForConsensus)}, + metadataOutputStreamParser(stream), inMemoryParser(inMemory), + waitForConsensusParser(waitForConsensus), + startAtLedgerParser(startAtLedger), startAtHashParser(startAtHash)}, [&] { Config cfg; std::shared_ptr clock; diff --git a/src/main/Config.cpp b/src/main/Config.cpp index da8fb98551..9e5ff11363 100644 --- a/src/main/Config.cpp +++ b/src/main/Config.cpp @@ -1064,7 +1064,7 @@ Config::processConfig(std::shared_ptr t) }}, {"BACKGROUND_OVERLAY_PROCESSING", [&]() { BACKGROUND_OVERLAY_PROCESSING = readBool(item); }}, - // TODO: Flags are no longer supported, remove in next release. + // https://github.com/stellar/stellar-core/issues/4581 {"BACKGROUND_EVICTION_SCAN", [&]() { CLOG_WARNING( @@ -1087,8 +1087,7 @@ Config::processConfig(std::shared_ptr t) "DEPRECATED_SQL_LEDGER_STATE is deprecated and " "ignored. Please remove from config"); }}, - // Still support EXPERIMENTAL_BUCKETLIST_DB* flags for - // captive-core for 21.0 release, remove in 21.1 release + // https://github.com/stellar/stellar-core/issues/4581 {"EXPERIMENTAL_BUCKETLIST_DB", [&]() { CLOG_WARNING( diff --git a/src/main/PersistentState.h b/src/main/PersistentState.h index 7dc359ae2e..06eab873aa 100644 --- a/src/main/PersistentState.h +++ b/src/main/PersistentState.h @@ -28,6 +28,7 @@ class PersistentState kRebuildLedger, kLastSCPDataXDR, kTxSet, + // https://github.com/stellar/stellar-core/issues/4582 kDBBackend, kLastEntry, }; diff --git a/test-tx-meta-baseline-current/InvokeHostFunctionTests.json b/test-tx-meta-baseline-current/InvokeHostFunctionTests.json index 6386579190..6f2d88e86e 100644 --- a/test-tx-meta-baseline-current/InvokeHostFunctionTests.json +++ b/test-tx-meta-baseline-current/InvokeHostFunctionTests.json @@ -1387,9 +1387,7 @@ "bKDF6V5IzTo=", "bKDF6V5IzTo=" ], - "temp entry eviction|BucketListDB|background scan" : [ "bKDF6V5IzTo=", "bKDF6V5IzTo=" ], - "temp entry eviction|BucketListDB|legacy main thread scan" : [ "bKDF6V5IzTo=", "bKDF6V5IzTo=" ], - "temp entry eviction|sql" : [ "bKDF6V5IzTo=", "bKDF6V5IzTo=" ], + "temp entry eviction" : [ "bKDF6V5IzTo=", "bKDF6V5IzTo=" ], "transaction validation diagnostics" : [ "bKDF6V5IzTo=" ], "version test" : [ "766L+TYsWqA=" ] } diff --git a/test-tx-meta-baseline-next/InvokeHostFunctionTests.json b/test-tx-meta-baseline-next/InvokeHostFunctionTests.json index 9fc2508fea..c5dfd2de4e 100644 --- a/test-tx-meta-baseline-next/InvokeHostFunctionTests.json +++ b/test-tx-meta-baseline-next/InvokeHostFunctionTests.json @@ -1391,9 +1391,7 @@ "bKDF6V5IzTo=", "bKDF6V5IzTo=" ], - "temp entry eviction|BucketListDB|background scan" : [ "bKDF6V5IzTo=", "bKDF6V5IzTo=" ], - "temp entry eviction|BucketListDB|legacy main thread scan" : [ "bKDF6V5IzTo=", "bKDF6V5IzTo=" ], - "temp entry eviction|sql" : [ "bKDF6V5IzTo=", "bKDF6V5IzTo=" ], + "temp entry eviction" : [ "bKDF6V5IzTo=", "bKDF6V5IzTo=" ], "transaction validation diagnostics" : [ "bKDF6V5IzTo=" ], "version test" : [ "766L+TYsWqA=" ] } From 4fac6e90d5642cd0aa297f50cc0008b7da25aebd Mon Sep 17 00:00:00 2001 From: Garand Tyson Date: Wed, 18 Dec 2024 16:08:44 -0800 Subject: [PATCH 11/17] Change github action files to ubuntu-22.04 --- .github/workflows/build.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index cfa7d6b61f..80e15ee0c6 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -15,13 +15,13 @@ jobs: complete: if: always() needs: [fmt, cargo-deny, rust-check-git-rev-deps, build] - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - if: contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') run: exit 1 fmt: - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v3 - run: rustup component add rustfmt @@ -29,7 +29,7 @@ jobs: - run: cargo fmt --all --check cargo-deny: - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 strategy: matrix: checks: @@ -47,13 +47,13 @@ jobs: arguments: rust-check-git-rev-deps: - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v3 - uses: stellar/actions/rust-check-git-rev-deps@main build: - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 env: CACHED_PATHS: | ~/.ccache From 786e6c486272ee1750048ff7bae16414675cb07f Mon Sep 17 00:00:00 2001 From: Dmytro Kozhevin Date: Mon, 16 Dec 2024 18:57:25 -0500 Subject: [PATCH 12/17] Some Soroban loadgen/test fixes. - Increase the refundable fee for Wasm uploads - Revert the accidental default instructions change - Add some fixes to the loadgen test to (hopefully) ensure there are no failures - Bump base instructions to 2.5M to account for the increased wasm size. --- Builds/VisualStudio/stellar-core.vcxproj | 8 +- .../VisualStudio/stellar-core.vcxproj.filters | 24 +++--- src/simulation/TxGenerator.cpp | 29 ++++---- src/simulation/test/LoadGeneratorTests.cpp | 74 ++----------------- 4 files changed, 38 insertions(+), 97 deletions(-) diff --git a/Builds/VisualStudio/stellar-core.vcxproj b/Builds/VisualStudio/stellar-core.vcxproj index d7641cdd7e..7f2ecfb9c4 100644 --- a/Builds/VisualStudio/stellar-core.vcxproj +++ b/Builds/VisualStudio/stellar-core.vcxproj @@ -616,8 +616,6 @@ exit /b 0 - - @@ -639,6 +637,8 @@ exit /b 0 + + @@ -1068,8 +1068,6 @@ exit /b 0 - - @@ -1085,6 +1083,8 @@ exit /b 0 + + diff --git a/Builds/VisualStudio/stellar-core.vcxproj.filters b/Builds/VisualStudio/stellar-core.vcxproj.filters index 4ef8433844..bafcf17ed5 100644 --- a/Builds/VisualStudio/stellar-core.vcxproj.filters +++ b/Builds/VisualStudio/stellar-core.vcxproj.filters @@ -900,12 +900,6 @@ ledger - - ledger - - - ledger - ledger @@ -1401,6 +1395,12 @@ bucket + + ledger\tests + + + ledger\tests +
@@ -2096,12 +2096,6 @@ ledger - - ledger - - - ledger - ledger @@ -2449,6 +2443,12 @@ bucket + + ledger\tests + + + ledger\tests + diff --git a/src/simulation/TxGenerator.cpp b/src/simulation/TxGenerator.cpp index f00578628b..ca33ee333d 100644 --- a/src/simulation/TxGenerator.cpp +++ b/src/simulation/TxGenerator.cpp @@ -19,7 +19,7 @@ constexpr uint32_t DEFAULT_WASM_BYTES = 35 * 1024; constexpr uint32_t DEFAULT_NUM_DATA_ENTRIES = 2; constexpr uint32_t DEFAULT_IO_KILOBYTES = 1; constexpr uint32_t DEFAULT_TX_SIZE_BYTES = 256; -constexpr uint64_t DEFAULT_INSTRUCTIONS = 28'000'000; +constexpr uint32_t DEFAULT_INSTRUCTIONS = 28'000'000; // Sample from a discrete distribution of `values` with weights `weights`. // Returns `defaultValue` if `values` is empty. @@ -373,16 +373,16 @@ TxGenerator::invokeSorobanLoadTransaction( // instruction count is not perfect. Some TXs will fail due to exceeding // resource limitations, but failures will be rare and those failures // will happen at apply time, so they will still generate significant load. - uint64_t const baseInstructionCount = 1'500'000; - uint64_t const instructionsPerGuestCycle = 80; - uint64_t const instructionsPerHostCycle = 5030; + uint32_t const baseInstructionCount = 2'500'000; + uint32_t const instructionsPerGuestCycle = 80; + uint32_t const instructionsPerHostCycle = 5030; // Very rough estimates. - uint64_t const instructionsPerKbWritten = 50000; + uint32_t const instructionsPerKbWritten = 50000; // instructionsPerPaddingByte is just a value we know works. We use an auth // payload as padding, so it consumes instructions on the host side. - uint64_t const instructionsPerPaddingByte = 100; + uint32_t const instructionsPerPaddingByte = 100; SorobanResources resources; resources.footprint.readOnly = instance.readOnlyKeys; @@ -451,9 +451,10 @@ TxGenerator::invokeSorobanLoadTransaction( instructionsPerPaddingByte * paddingBytes; // Pick random number of cycles between bounds - uint64_t targetInstructions = sampleDiscrete( - appCfg.LOADGEN_INSTRUCTIONS_FOR_TESTING, - appCfg.LOADGEN_INSTRUCTIONS_DISTRIBUTION_FOR_TESTING, 0u); + uint32_t targetInstructions = + sampleDiscrete(appCfg.LOADGEN_INSTRUCTIONS_FOR_TESTING, + appCfg.LOADGEN_INSTRUCTIONS_DISTRIBUTION_FOR_TESTING, + DEFAULT_INSTRUCTIONS); // Factor in instructions for storage targetInstructions = baseInstructionCount + instructionsForStorageAndAuth >= @@ -463,12 +464,12 @@ TxGenerator::invokeSorobanLoadTransaction( instructionsForStorageAndAuth; // Randomly select a number of guest cycles - uint64_t guestCyclesMax = targetInstructions / instructionsPerGuestCycle; - uint64_t guestCycles = rand_uniform(0, guestCyclesMax); + uint32_t guestCyclesMax = targetInstructions / instructionsPerGuestCycle; + uint32_t guestCycles = rand_uniform(0, guestCyclesMax); // Rest of instructions consumed by host cycles targetInstructions -= guestCycles * instructionsPerGuestCycle; - uint64_t hostCycles = targetInstructions / instructionsPerHostCycle; + uint32_t hostCycles = targetInstructions / instructionsPerHostCycle; auto guestCyclesU64 = makeU64(guestCycles); auto hostCyclesU64 = makeU64(hostCycles); @@ -490,7 +491,7 @@ TxGenerator::invokeSorobanLoadTransaction( increaseOpSize(op, paddingBytes); - int64_t instructionCount = + uint32_t instructionCount = baseInstructionCount + hostCycles * instructionsPerHostCycle + guestCycles * instructionsPerGuestCycle + instructionsForStorageAndAuth; resources.instructions = instructionCount; @@ -995,7 +996,7 @@ TxGenerator::sorobanRandomWasmTransaction(uint32_t ledgerNum, int64_t resourceFee = sorobanResourceFee( mApp, resources, 5000 + static_cast(wasmSize), 100); // Roughly cover the rent fee. - resourceFee += 100000; + resourceFee += 1'000'000; auto tx = sorobanTransactionFrameFromOps(mApp.getNetworkID(), *account, {uploadOp}, {}, resources, inclusionFee, resourceFee); diff --git a/src/simulation/test/LoadGeneratorTests.cpp b/src/simulation/test/LoadGeneratorTests.cpp index 35150f06f8..5b04e0867d 100644 --- a/src/simulation/test/LoadGeneratorTests.cpp +++ b/src/simulation/test/LoadGeneratorTests.cpp @@ -475,8 +475,8 @@ TEST_CASE("generate soroban load", "[loadgen][soroban]") cfg.mTxMaxSizeBytes * cfg.mLedgerMaxTxCount; }, simulation); - auto const numInstances = 10; - auto const numSorobanTxs = 100; + auto const numInstances = nAccounts; + auto const numSorobanTxs = 150; numTxsBefore = getSuccessfulTxCount(); @@ -509,8 +509,7 @@ TEST_CASE("generate soroban load", "[loadgen][soroban]") /* txRate */ 1); invokeLoadCfg.getMutSorobanConfig().nInstances = numInstances; - constexpr int maxInvokeFail = 10; - invokeLoadCfg.setMinSorobanPercentSuccess(100 - maxInvokeFail); + invokeLoadCfg.setMinSorobanPercentSuccess(100); loadGen.generateLoad(invokeLoadCfg); completeCount = complete.count(); @@ -525,15 +524,8 @@ TEST_CASE("generate soroban load", "[loadgen][soroban]") {"ledger", "apply-soroban", "success"}); auto& txsFailed = node->getMetrics().NewCounter( {"ledger", "apply-soroban", "failure"}); - - // Because we can't preflight TXs, some invocations will fail due to too - // few resources. This is expected, as our instruction counts are - // approximations. The following checks will make sure all set up - // phases succeeded, so only the invoke phase may have acceptable failed - // TXs - REQUIRE(txsSucceeded.count() > - numTxsBefore + numSorobanTxs - maxInvokeFail); - REQUIRE(txsFailed.count() < maxInvokeFail); + REQUIRE(txsSucceeded.count() == numTxsBefore + numSorobanTxs); + REQUIRE(txsFailed.count() == 0); } auto instanceKeys = loadGen.getContractInstanceKeysForTesting(); @@ -595,16 +587,7 @@ TEST_CASE("generate soroban load", "[loadgen][soroban]") constexpr uint32_t uploadWeight = 5; mixCfg.sorobanUploadWeight = uploadWeight; - // Because we can't preflight TXs, some invocations will fail due to too - // few resources. This is expected, as our instruction counts are - // approximations. Additionally, many upload transactions will fail as - // they are likely to generate invalid wasm. Therefore, we check that - // all but `maxInvokeFail + 1.5 * uploadWeight` transactions succeed. In - // case the random sampling produces more upload transactions than - // expected, we allow for a 50% margin of error on the number of upload - // transactions. - constexpr int maxSorobanFail = 1.5 * uploadWeight + maxInvokeFail; - mixLoadCfg.setMinSorobanPercentSuccess(100 - maxSorobanFail); + mixLoadCfg.setMinSorobanPercentSuccess(100); loadGen.generateLoad(mixLoadCfg); auto numSuccessBefore = getSuccessfulTxCount(); @@ -620,54 +603,11 @@ TEST_CASE("generate soroban load", "[loadgen][soroban]") // Check results for (auto node : nodes) { - auto& totalSucceeded = - node->getMetrics().NewCounter({"ledger", "apply", "success"}); auto& totalFailed = node->getMetrics().NewCounter({"ledger", "apply", "failure"}); - auto& sorobanSucceeded = node->getMetrics().NewCounter( - {"ledger", "apply-soroban", "success"}); - auto& sorobanFailed = node->getMetrics().NewCounter( - {"ledger", "apply-soroban", "failure"}); - - // Total number of classic transactions - int64_t classicTotal = - totalSucceeded.count() + totalFailed.count() - - sorobanSucceeded.count() - sorobanFailed.count(); - - // All classic transaction should succeed - REQUIRE(totalSucceeded.count() - sorobanSucceeded.count() == - classicTotal); - // All failures should be soroban failures) - REQUIRE(totalFailed.count() == sorobanFailed.count()); - - // Check soroban results - REQUIRE(sorobanSucceeded.count() > numSuccessBefore + numMixedTxs - - classicTotal - - maxSorobanFail); - REQUIRE(sorobanFailed.count() <= maxSorobanFail + numFailedBefore); + REQUIRE(totalFailed.count() == 0); } } - - // Test minimum percent success with too many transactions that fail to - // apply by requiring a 100% success rate for SOROBAN_UPLOAD mode - SECTION("Too many failed transactions") - { - auto uploadFailCfg = GeneratedLoadConfig::txLoad( - LoadGenMode::SOROBAN_UPLOAD, nAccounts, numSorobanTxs, - /* txRate */ 1); - - // Set success percentage to 100% and leave other parameters at default. - uploadFailCfg.setMinSorobanPercentSuccess(100); - - // LoadGen should fail - loadGen.generateLoad(uploadFailCfg); - auto& fail = - app.getMetrics().NewMeter({"loadgen", "run", "failed"}, "run"); - auto failCount = fail.count(); - simulation->crankUntil([&]() { return fail.count() == failCount + 1; }, - 300 * Herder::EXP_LEDGER_TIMESPAN_SECONDS, - false); - } } TEST_CASE("Multi-op pretend transactions are valid", "[loadgen]") From 8600f796d5be2994fe733668f71def18f611db5c Mon Sep 17 00:00:00 2001 From: marta-lokhova Date: Mon, 16 Dec 2024 18:47:44 -0800 Subject: [PATCH 13/17] Drop support for cursors --- docs/quick-reference.md | 3 +- docs/software/commands.md | 22 --- docs/stellar-core_example.cfg | 4 - src/bucket/test/BucketManagerTests.cpp | 5 +- src/database/Database.cpp | 7 +- src/herder/test/HerderTests.cpp | 48 ----- src/history/test/HistoryTests.cpp | 8 +- src/main/ApplicationImpl.cpp | 4 - src/main/CommandHandler.cpp | 76 -------- src/main/CommandHandler.h | 3 - src/main/Config.cpp | 13 -- src/main/Config.h | 3 - src/main/ExternalQueue.cpp | 245 ------------------------- src/main/ExternalQueue.h | 47 ----- src/main/Maintainer.cpp | 21 ++- src/main/test/ExternalQueueTests.cpp | 46 ----- 16 files changed, 28 insertions(+), 527 deletions(-) delete mode 100644 src/main/ExternalQueue.cpp delete mode 100644 src/main/ExternalQueue.h delete mode 100644 src/main/test/ExternalQueueTests.cpp diff --git a/docs/quick-reference.md b/docs/quick-reference.md index b52cf8ad4f..bcb0029302 100644 --- a/docs/quick-reference.md +++ b/docs/quick-reference.md @@ -61,8 +61,7 @@ You have some control over which peers you're connected to: ### Maintenance -Core keeps old meta around for Horizon and other systems. As cursors get updated, automatic -maintenance normally deletes more than enough for the node to use a constant amount of disk space. +Core keeps historical data needed for publish (such as SCP history) Sometimes you need to clean up more than this (for example, if you have a large maintenance debt). In this case running the command `maintenance?count=100000000` (integer is a large number, bigger than your max backlog) will perform the full maintenance. diff --git a/docs/software/commands.md b/docs/software/commands.md index faec8d2ad3..f1ee33d208 100644 --- a/docs/software/commands.md +++ b/docs/software/commands.md @@ -244,11 +244,6 @@ Most commands return their results in JSON format. `connect?peer=NAME&port=NNN`
Triggers the instance to connect to peer NAME at port NNN. -* **dropcursor** - `dropcursor?id=ID`
- Deletes the tracking cursor identified by `id`. See `setcursor` for - more information. - * **droppeer** `droppeer?node=NODE_ID[&ban=D]`
Drops peer identified by NODE_ID, when D is 1 the peer is also banned. @@ -307,23 +302,6 @@ Most commands return their results in JSON format. * `delayed`: participating in the latest consensus rounds, but slower than others. * `agree`: running just fine. -* **setcursor** - `setcursor?id=ID&cursor=N`
- Sets or creates a cursor identified by `ID` with value `N`. ID is an - uppercase AlphaNum, N is an uint32 that represents the last ledger sequence - number that the instance ID processed. Cursors are used by dependent services - to tell stellar-core which data can be safely deleted by the instance. The - data is historical data stored in the SQL tables such as txhistory or - ledgerheaders. When all consumers processed the data for ledger sequence N - the data can be safely removed by the instance. The actual deletion is - performed by invoking the `maintenance` endpoint or on startup. See also - `dropcursor`. - -* **getcursor** - `getcursor?[id=ID]`
- Gets the cursor identified by `ID`. If ID is not defined then all cursors - will be returned. - * **scp** `scp?[limit=n][&fullkeys=false]`
Returns a JSON object with the internal state of the SCP engine for the last diff --git a/docs/stellar-core_example.cfg b/docs/stellar-core_example.cfg index d98a0f8fe5..fba9e6d437 100644 --- a/docs/stellar-core_example.cfg +++ b/docs/stellar-core_example.cfg @@ -291,10 +291,6 @@ KNOWN_PEERS=[ "core-testnet2.stellar.org", "core-testnet3.stellar.org"] -# KNOWN_CURSORS (list of strings) default is empty -# Set of cursors added at each startup with value '1'. -KNOWN_CURSORS=["HORIZON"] - ####################### ## SCP settings diff --git a/src/bucket/test/BucketManagerTests.cpp b/src/bucket/test/BucketManagerTests.cpp index 0a92aa29f3..8eb393c789 100644 --- a/src/bucket/test/BucketManagerTests.cpp +++ b/src/bucket/test/BucketManagerTests.cpp @@ -22,7 +22,7 @@ #include "lib/catch.hpp" #include "main/Application.h" #include "main/Config.h" -#include "main/ExternalQueue.h" +#include "main/Maintainer.h" #include "test/TestUtils.h" #include "test/test.h" #include "util/GlobalChecks.h" @@ -614,8 +614,7 @@ TEST_CASE_VERSIONS( clock.crank(false); // Trim history after publishing whenever possible. - ExternalQueue ps(*app); - ps.deleteOldEntries(50000); + app->getMaintainer().performMaintenance(50000); } }); } diff --git a/src/database/Database.cpp b/src/database/Database.cpp index 73cccdb6f6..a60d252936 100644 --- a/src/database/Database.cpp +++ b/src/database/Database.cpp @@ -24,7 +24,6 @@ #include "history/HistoryManager.h" #include "ledger/LedgerHeaderUtils.h" #include "ledger/LedgerTxn.h" -#include "main/ExternalQueue.h" #include "main/PersistentState.h" #include "overlay/BanManager.h" #include "overlay/OverlayManager.h" @@ -63,7 +62,7 @@ bool Database::gDriversRegistered = false; // smallest schema version supported static unsigned long const MIN_SCHEMA_VERSION = 21; -static unsigned long const SCHEMA_VERSION = 23; +static unsigned long const SCHEMA_VERSION = 24; // These should always match our compiled version precisely, since we are // using a bundled version to get access to carray(). But in case someone @@ -219,6 +218,9 @@ Database::applySchemaUpgrade(unsigned long vers) mApp.getHistoryManager().dropSQLBasedPublish(); Upgrades::dropSupportUpgradeHistory(*this); break; + case 24: + getSession() << "DROP TABLE IF EXISTS pubsub;"; + break; default: throw std::runtime_error("Unknown DB schema version"); } @@ -477,7 +479,6 @@ Database::initialize() Upgrades::dropAll(*this); OverlayManager::dropAll(*this); PersistentState::dropAll(*this); - ExternalQueue::dropAll(*this); LedgerHeaderUtils::dropAll(*this); // No need to re-create txhistory, will be dropped during // upgradeToCurrentSchema anyway diff --git a/src/herder/test/HerderTests.cpp b/src/herder/test/HerderTests.cpp index d627c71a97..a1fb07475a 100644 --- a/src/herder/test/HerderTests.cpp +++ b/src/herder/test/HerderTests.cpp @@ -212,54 +212,6 @@ TEST_CASE_VERSIONS("standalone", "[herder][acceptance]") REQUIRE(c1.loadSequenceNumber() == expectedC1Seq); } } - - SECTION("Queue processing test") - { - app->getCommandHandler().manualCmd("maintenance?queue=true"); - - while (app->getLedgerManager().getLastClosedLedgerNum() < - (app->getHistoryManager().getCheckpointFrequency() + 5)) - { - app->getClock().crank(true); - } - - app->getCommandHandler().manualCmd("setcursor?id=A1&cursor=1"); - app->getCommandHandler().manualCmd("maintenance?queue=true"); - auto& db = app->getDatabase(); - auto& sess = db.getSession(); - - app->getCommandHandler().manualCmd("setcursor?id=A2&cursor=3"); - app->getCommandHandler().manualCmd("maintenance?queue=true"); - auto lh = LedgerHeaderUtils::loadBySequence(db, sess, 2); - REQUIRE(!!lh); - - app->getCommandHandler().manualCmd("setcursor?id=A1&cursor=2"); - // this should delete items older than sequence 2 - app->getCommandHandler().manualCmd("maintenance?queue=true"); - lh = LedgerHeaderUtils::loadBySequence(db, sess, 2); - REQUIRE(!lh); - lh = LedgerHeaderUtils::loadBySequence(db, sess, 3); - REQUIRE(!!lh); - - // this should delete items older than sequence 3 - SECTION("set min to 3 by update") - { - app->getCommandHandler().manualCmd( - "setcursor?id=A1&cursor=3"); - app->getCommandHandler().manualCmd( - "maintenance?queue=true"); - lh = LedgerHeaderUtils::loadBySequence(db, sess, 3); - REQUIRE(!lh); - } - SECTION("set min to 3 by deletion") - { - app->getCommandHandler().manualCmd("dropcursor?id=A1"); - app->getCommandHandler().manualCmd( - "maintenance?queue=true"); - lh = LedgerHeaderUtils::loadBySequence(db, sess, 3); - REQUIRE(!lh); - } - } } }); } diff --git a/src/history/test/HistoryTests.cpp b/src/history/test/HistoryTests.cpp index b880f8ea69..059aa19a97 100644 --- a/src/history/test/HistoryTests.cpp +++ b/src/history/test/HistoryTests.cpp @@ -16,7 +16,7 @@ #include "historywork/GzipFileWork.h" #include "historywork/PutHistoryArchiveStateWork.h" #include "ledger/LedgerManager.h" -#include "main/ExternalQueue.h" +#include "main/Maintainer.h" #include "main/PersistentState.h" #include "process/ProcessManager.h" #include "test/TestAccount.h" @@ -1375,8 +1375,7 @@ TEST_CASE("persist publish queue", "[history][publish][acceptance]") REQUIRE(hm0.getMinLedgerQueuedToPublish() == 7); // Trim history after publishing. - ExternalQueue ps(*app0); - ps.deleteOldEntries(50000); + app0->getMaintainer().performMaintenance(50000); } cfg.MAX_CONCURRENT_SUBPROCESSES = 32; @@ -1395,8 +1394,7 @@ TEST_CASE("persist publish queue", "[history][publish][acceptance]") clock.crank(true); // Trim history after publishing whenever possible. - ExternalQueue ps(*app1); - ps.deleteOldEntries(50000); + app1->getMaintainer().performMaintenance(50000); } // We should have either an empty publish queue or a // ledger sometime after the 5th checkpoint diff --git a/src/main/ApplicationImpl.cpp b/src/main/ApplicationImpl.cpp index 8c9b2beafe..482ceef177 100644 --- a/src/main/ApplicationImpl.cpp +++ b/src/main/ApplicationImpl.cpp @@ -38,7 +38,6 @@ #include "main/AppConnector.h" #include "main/ApplicationUtils.h" #include "main/CommandHandler.h" -#include "main/ExternalQueue.h" #include "main/Maintainer.h" #include "main/StellarCoreVersion.h" #include "medida/counter.h" @@ -720,9 +719,6 @@ ApplicationImpl::startServices() { // restores Herder's state before starting overlay mHerder->start(); - // set known cursors before starting maintenance job - ExternalQueue ps(*this); - ps.setInitialCursors(mConfig.KNOWN_CURSORS); mMaintainer->start(); if (mConfig.MODE_AUTO_STARTS_OVERLAY) { diff --git a/src/main/CommandHandler.cpp b/src/main/CommandHandler.cpp index c2b61e87cb..a8602c0973 100644 --- a/src/main/CommandHandler.cpp +++ b/src/main/CommandHandler.cpp @@ -36,8 +36,6 @@ #include "xdr/Stellar-transaction.h" #include "xdrpp/marshal.h" -#include "ExternalQueue.h" - #ifdef BUILD_TESTS #include "simulation/LoadGenerator.h" #include "test/TestAccount.h" @@ -89,9 +87,6 @@ CommandHandler::CommandHandler(Application& app) : mApp(app) mServer->add404(std::bind(&CommandHandler::fileNotFound, this, _1, _2)); if (mApp.getConfig().modeStoresAnyHistory()) { - addRoute("dropcursor", &CommandHandler::dropcursor); - addRoute("getcursor", &CommandHandler::getcursor); - addRoute("setcursor", &CommandHandler::setcursor); addRoute("maintenance", &CommandHandler::maintenance); } @@ -1010,77 +1005,6 @@ CommandHandler::tx(std::string const& params, std::string& retStr) retStr = Json::FastWriter().write(root); } -void -CommandHandler::dropcursor(std::string const& params, std::string& retStr) -{ - ZoneScoped; - std::map map; - http::server::server::parseParams(params, map); - std::string const& id = map["id"]; - - if (!ExternalQueue::validateResourceID(id)) - { - retStr = "Invalid resource id"; - } - else - { - ExternalQueue ps(mApp); - ps.deleteCursor(id); - retStr = "Done"; - } -} - -void -CommandHandler::setcursor(std::string const& params, std::string& retStr) -{ - ZoneScoped; - std::map map; - http::server::server::parseParams(params, map); - std::string const& id = map["id"]; - - uint32 cursor = parseRequiredParam(map, "cursor"); - - if (!ExternalQueue::validateResourceID(id)) - { - retStr = "Invalid resource id"; - } - else - { - ExternalQueue ps(mApp); - ps.setCursorForResource(id, cursor); - retStr = "Done"; - } -} - -void -CommandHandler::getcursor(std::string const& params, std::string& retStr) -{ - ZoneScoped; - Json::Value root; - std::map map; - http::server::server::parseParams(params, map); - std::string const& id = map["id"]; - - // the decision was made not to check validity here - // because there are subsequent checks for that in - // ExternalQueue and if an exception is thrown for - // validity there, the ret format is technically more - // correct for the mime type - ExternalQueue ps(mApp); - std::map curMap; - int counter = 0; - ps.getCursorForResource(id, curMap); - root["cursors"][0]; - for (auto cursor : curMap) - { - root["cursors"][counter]["id"] = cursor.first; - root["cursors"][counter]["cursor"] = cursor.second; - counter++; - } - - retStr = root.toStyledString(); -} - void CommandHandler::maintenance(std::string const& params, std::string& retStr) { diff --git a/src/main/CommandHandler.h b/src/main/CommandHandler.h index d259730286..8474f07a89 100644 --- a/src/main/CommandHandler.h +++ b/src/main/CommandHandler.h @@ -49,7 +49,6 @@ class CommandHandler void bans(std::string const& params, std::string& retStr); void connect(std::string const& params, std::string& retStr); - void dropcursor(std::string const& params, std::string& retStr); void dropPeer(std::string const& params, std::string& retStr); void info(std::string const& params, std::string& retStr); void ll(std::string const& params, std::string& retStr); @@ -61,8 +60,6 @@ class CommandHandler void peers(std::string const& params, std::string& retStr); void selfCheck(std::string const&, std::string& retStr); void quorum(std::string const& params, std::string& retStr); - void setcursor(std::string const& params, std::string& retStr); - void getcursor(std::string const& params, std::string& retStr); void scpInfo(std::string const& params, std::string& retStr); void tx(std::string const& params, std::string& retStr); void unban(std::string const& params, std::string& retStr); diff --git a/src/main/Config.cpp b/src/main/Config.cpp index 9e5ff11363..9bbecbad16 100644 --- a/src/main/Config.cpp +++ b/src/main/Config.cpp @@ -8,7 +8,6 @@ #include "herder/Herder.h" #include "history/HistoryArchive.h" #include "ledger/LedgerManager.h" -#include "main/ExternalQueue.h" #include "main/StellarCoreVersion.h" #include "scp/LocalNode.h" #include "scp/QuorumSetUtils.h" @@ -1133,18 +1132,6 @@ Config::processConfig(std::shared_ptr t) [&]() { BUCKETLIST_DB_PERSIST_INDEX = readBool(item); }}, {"METADATA_DEBUG_LEDGERS", [&]() { METADATA_DEBUG_LEDGERS = readInt(item); }}, - {"KNOWN_CURSORS", - [&]() { - KNOWN_CURSORS = readArray(item); - for (auto const& c : KNOWN_CURSORS) - { - if (!ExternalQueue::validateResourceID(c)) - { - throw std::invalid_argument(fmt::format( - FMT_STRING("invalid cursor: \"{}\""), c)); - } - } - }}, {"RUN_STANDALONE", [&]() { RUN_STANDALONE = readBool(item); }}, {"CATCHUP_COMPLETE", [&]() { CATCHUP_COMPLETE = readBool(item); }}, diff --git a/src/main/Config.h b/src/main/Config.h index 0bd69449c1..97d8e6fb9b 100644 --- a/src/main/Config.h +++ b/src/main/Config.h @@ -546,9 +546,6 @@ class Config : public std::enable_shared_from_this // at a premium. uint32_t METADATA_DEBUG_LEDGERS; - // Set of cursors added at each startup with value '1'. - std::vector KNOWN_CURSORS; - // maximum protocol version supported by the application, can be overridden // in tests uint32_t LEDGER_PROTOCOL_VERSION; diff --git a/src/main/ExternalQueue.cpp b/src/main/ExternalQueue.cpp deleted file mode 100644 index 61578971cb..0000000000 --- a/src/main/ExternalQueue.cpp +++ /dev/null @@ -1,245 +0,0 @@ -// Copyright 2015 Stellar Development Foundation and contributors. Licensed -// under the Apache License, Version 2.0. See the COPYING file at the root -// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 - -#include "ExternalQueue.h" - -#include "Application.h" -#include "database/Database.h" -#include "ledger/LedgerManager.h" -#include "util/GlobalChecks.h" -#include "util/Logging.h" -#include -#include -#include - -namespace stellar -{ - -using namespace std; - -string ExternalQueue::kSQLCreateStatement = - "CREATE TABLE IF NOT EXISTS pubsub (" - "resid CHARACTER(32) PRIMARY KEY," - "lastread INTEGER" - "); "; - -ExternalQueue::ExternalQueue(Application& app) : mApp(app) -{ -} - -void -ExternalQueue::dropAll(Database& db) -{ - db.getSession() << "DROP TABLE IF EXISTS pubsub;"; - - soci::statement st = db.getSession().prepare << kSQLCreateStatement; - st.execute(true); -} - -bool -ExternalQueue::validateResourceID(std::string const& resid) -{ - static std::regex re("^[A-Z][A-Z0-9]{0,31}$"); - return std::regex_match(resid, re); -} - -void -ExternalQueue::setInitialCursors(std::vector const& initialResids) -{ - for (auto const& resid : initialResids) - { - addCursorForResource(resid, 1); - } -} - -void -ExternalQueue::addCursorForResource(std::string const& resid, uint32 cursor) -{ - ZoneScoped; - if (getCursor(resid).empty()) - { - setCursorForResource(resid, cursor); - } -} - -void -ExternalQueue::setCursorForResource(std::string const& resid, uint32 cursor) -{ - ZoneScoped; - checkID(resid); - - std::string old(getCursor(resid)); - if (old.empty()) - { - ZoneNamedN(insertPubsubZone, "insert pubsub", true); - auto prep = mApp.getDatabase().getPreparedStatement( - "INSERT INTO pubsub (resid, lastread) VALUES (:n, :v);"); - auto& st = prep.statement(); - st.exchange(soci::use(resid)); - st.exchange(soci::use(cursor)); - st.define_and_bind(); - st.execute(true); - if (st.get_affected_rows() != 1) - { - throw std::runtime_error("Could not insert data in SQL"); - } - } - else - { - auto prep = mApp.getDatabase().getPreparedStatement( - "UPDATE pubsub SET lastread = :v WHERE resid = :n;"); - - auto& st = prep.statement(); - st.exchange(soci::use(cursor)); - st.exchange(soci::use(resid)); - st.define_and_bind(); - { - ZoneNamedN(updatePubsubZone, "update pubsub", true); - st.execute(true); - } - } -} - -void -ExternalQueue::getCursorForResource(std::string const& resid, - std::map& curMap) -{ - ZoneScoped; - // no resid set, get all cursors - if (resid.empty()) - { - std::string n; - uint32_t v; - - auto& db = mApp.getDatabase(); - auto prep = - db.getPreparedStatement("SELECT resid, lastread FROM pubsub;"); - auto& st = prep.statement(); - st.exchange(soci::into(n)); - st.exchange(soci::into(v)); - st.define_and_bind(); - { - ZoneNamedN(selectPubsubZone, "select pubsub", true); - st.execute(true); - } - - while (st.got_data()) - { - curMap[n] = v; - st.fetch(); - } - } - else - { - // if resid is set attempt to look up the cursor - // and add it to the map if anything is found - std::string cursor = getCursor(resid); - if (!cursor.empty()) - { - curMap[resid] = strtoul(cursor.c_str(), NULL, 0); - } - } -} - -void -ExternalQueue::deleteCursor(std::string const& resid) -{ - ZoneScoped; - checkID(resid); - - { - ZoneNamedN(deletePubsubZone, "delete pubsub", true); - auto prep = mApp.getDatabase().getPreparedStatement( - "DELETE FROM pubsub WHERE resid = :n;"); - auto& st = prep.statement(); - st.exchange(soci::use(resid)); - st.define_and_bind(); - st.execute(true); - } -} - -void -ExternalQueue::deleteOldEntries(uint32 count) -{ - ZoneScoped; - auto& db = mApp.getDatabase(); - int m; - soci::indicator minIndicator; - soci::statement st = - (db.getSession().prepare << "SELECT MIN(lastread) FROM pubsub", - soci::into(m, minIndicator)); - { - ZoneNamedN(selectPubsubZone, "select pubsub", true); - st.execute(true); - } - - // rmin is the minimum of all last-reads, which means that remote - // subscribers are ok with us deleting any history N <= rmin. - // If we do not have subscribers, take this as maxint, and just - // use the LCL/checkpoint number (see below) to control trimming. - uint32_t rmin = std::numeric_limits::max(); - if (st.got_data() && minIndicator == soci::indicator::i_ok) - { - rmin = static_cast(m); - } - - // Next calculate the minimum of the LCL and/or any queued checkpoint. - uint32_t lcl = mApp.getLedgerManager().getLastClosedLedgerNum(); - uint32_t ql = mApp.getHistoryManager().getMinLedgerQueuedToPublish(); - uint32_t qmin = ql == 0 ? lcl : std::min(ql, lcl); - - // Next calculate, given qmin, the first ledger it'd be _safe to - // delete_ while still keeping everything required to publish. - // So if qmin is (for example) 0x7f = 127, then we want to keep 64 - // ledgers before that, and therefore can erase 0x3f = 63 and less. - uint32_t freq = mApp.getHistoryManager().getCheckpointFrequency(); - uint32_t lmin = qmin >= freq ? qmin - freq : 0; - - // Cumulative minimum is the lesser of the requirements of history - // publication and the requirements of our pubsub subscribers. - uint32_t cmin = std::min(lmin, rmin); - - CLOG_INFO(History, - "Trimming history <= ledger {} (rmin={}, qmin={}, lmin={})", cmin, - rmin, qmin, lmin); - - mApp.getLedgerManager().deleteOldEntries(mApp.getDatabase(), cmin, count); -} - -void -ExternalQueue::checkID(std::string const& resid) -{ - if (!validateResourceID(resid)) - { - throw std::invalid_argument("invalid resource ID"); - } -} - -std::string -ExternalQueue::getCursor(std::string const& resid) -{ - ZoneScoped; - checkID(resid); - std::string res; - - auto& db = mApp.getDatabase(); - auto prep = db.getPreparedStatement( - "SELECT lastread FROM pubsub WHERE resid = :n;"); - auto& st = prep.statement(); - st.exchange(soci::into(res)); - st.exchange(soci::use(resid)); - st.define_and_bind(); - { - ZoneNamedN(selectPubsubZone, "select pubsub", true); - st.execute(true); - } - - if (!st.got_data()) - { - res.clear(); - } - - return res; -} -} diff --git a/src/main/ExternalQueue.h b/src/main/ExternalQueue.h deleted file mode 100644 index ba11598ba0..0000000000 --- a/src/main/ExternalQueue.h +++ /dev/null @@ -1,47 +0,0 @@ -#pragma once - -// Copyright 2015 Stellar Development Foundation and contributors. Licensed -// under the Apache License, Version 2.0. See the COPYING file at the root -// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 - -#include "main/Application.h" -#include "xdr/Stellar-types.h" -#include - -namespace stellar -{ - -class ExternalQueue -{ - public: - ExternalQueue(Application& app); - - static void dropAll(Database& db); - - // checks if a given resource ID is well formed - static bool validateResourceID(std::string const& resid); - - // sets initial cursors for given resource (if not already present) - void setInitialCursors(std::vector const& initialResids); - // sets the cursor of a given resource if not already present - void addCursorForResource(std::string const& resid, uint32 cursor); - // sets the cursor of a given resource - void setCursorForResource(std::string const& resid, uint32 cursor); - // gets the cursor of a given resource, gets all cursors of resid is empty - void getCursorForResource(std::string const& resid, - std::map& curMap); - // deletes the subscription for the resource - void deleteCursor(std::string const& resid); - - // safely delete data, maximum count entries from each table - void deleteOldEntries(uint32 count); - - private: - void checkID(std::string const& resid); - std::string getCursor(std::string const& resid); - - static std::string kSQLCreateStatement; - - Application& mApp; -}; -} diff --git a/src/main/Maintainer.cpp b/src/main/Maintainer.cpp index 7d28854c08..0b11d7658b 100644 --- a/src/main/Maintainer.cpp +++ b/src/main/Maintainer.cpp @@ -3,8 +3,9 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "main/Maintainer.h" +#include "ledger/LedgerManager.h" +#include "main/Application.h" #include "main/Config.h" -#include "main/ExternalQueue.h" #include "util/GlobalChecks.h" #include "util/LogSlowExecution.h" #include "util/Logging.h" @@ -72,7 +73,21 @@ Maintainer::performMaintenance(uint32_t count) "performance issue: check database or perform a large manual " "maintenance followed by database maintenance. Maintenance took", std::chrono::seconds{2}); - ExternalQueue ps{mApp}; - ps.deleteOldEntries(count); + + // Calculate the minimum of the LCL and/or any queued checkpoint. + uint32_t lcl = mApp.getLedgerManager().getLastClosedLedgerNum(); + uint32_t ql = mApp.getHistoryManager().getMinLedgerQueuedToPublish(); + uint32_t qmin = ql == 0 ? lcl : std::min(ql, lcl); + + // Next calculate, given qmin, the first ledger it'd be _safe to + // delete_ while still keeping everything required to publish. + // So if qmin is (for example) 0x7f = 127, then we want to keep 64 + // ledgers before that, and therefore can erase 0x3f = 63 and less. + uint32_t freq = mApp.getHistoryManager().getCheckpointFrequency(); + uint32_t lmin = qmin >= freq ? qmin - freq : 0; + + CLOG_INFO(History, "Trimming history <= ledger {}", lmin); + + mApp.getLedgerManager().deleteOldEntries(mApp.getDatabase(), lmin, count); } } diff --git a/src/main/test/ExternalQueueTests.cpp b/src/main/test/ExternalQueueTests.cpp deleted file mode 100644 index e5af50427f..0000000000 --- a/src/main/test/ExternalQueueTests.cpp +++ /dev/null @@ -1,46 +0,0 @@ -#ifdef USE_POSTGRES -// Copyright 2014 Stellar Development Foundation and contributors. Licensed -// under the Apache License, Version 2.0. See the COPYING file at the root -// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 - -#include "lib/catch.hpp" -#include "main/Application.h" -#include "main/CommandHandler.h" -#include "main/Config.h" -#include "main/ExternalQueue.h" -#include "simulation/Simulation.h" -#include "test/TestUtils.h" -#include "test/test.h" - -using namespace stellar; - -TEST_CASE("cursors", "[externalqueue]") -{ - VirtualClock clock; - Config const& cfg = getTestConfig(0, Config::TESTDB_POSTGRESQL); - Application::pointer app = createTestApplication(clock, cfg); - - ExternalQueue ps(*app); - std::map curMap; - app->getCommandHandler().manualCmd("setcursor?id=FOO&cursor=123"); - app->getCommandHandler().manualCmd("setcursor?id=BAR&cursor=456"); - - SECTION("get non-existent cursor") - { - ps.getCursorForResource("NONEXISTENT", curMap); - REQUIRE(curMap.size() == 0); - } - - SECTION("get single cursor") - { - ps.getCursorForResource("FOO", curMap); - REQUIRE(curMap.size() == 1); - } - - SECTION("get all cursors") - { - ps.getCursorForResource("", curMap); - REQUIRE(curMap.size() == 2); - } -} -#endif From 99865fd42674385478ca1144d119429c0df53835 Mon Sep 17 00:00:00 2001 From: Siddharth Suresh Date: Fri, 20 Dec 2024 09:59:55 -0800 Subject: [PATCH 14/17] Update apply-load docs --- docs/software/commands.md | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/docs/software/commands.md b/docs/software/commands.md index f1ee33d208..b1a39e517c 100644 --- a/docs/software/commands.md +++ b/docs/software/commands.md @@ -20,20 +20,17 @@ Common options can be placed at any place in the command line. Command options can only by placed after command. * **apply-load**: Applies Soroban transactions by repeatedly generating transactions and closing -them directly through the LedgerManager. The parameters specified below configure the network limits, and -they're all required - **--ledger-max-instructions N**, **--ledger-max-read-entries N**, **--ledger-max-write-entries N**, **--ledger-max-read-byte N**, **--ledger-max-write-bytes N**, **--ledger-max-tx-size N**, **--ledger-max-tx-count N**. This command will generate enough transactions to fill up a synthetic transaction queue (it's just a list of transactions with the same limits as the real queue), and then create a transaction set off of that to +them directly through the LedgerManager. This command will generate enough transactions to fill up a synthetic transaction queue (it's just a list of transactions with the same limits as the real queue), and then create a transaction set off of that to apply. * At the moment, the Soroban transactions are generated using some of the same config parameters as the **generateload** command. Specifically, `ARTIFICIALLY_GENERATE_LOAD_FOR_TESTING=true`, - `LOADGEN_NUM_DATA_ENTRIES_FOR_TESTING`, - `LOADGEN_NUM_DATA_ENTRIES_DISTRIBUTION_FOR_TESTING`, - `LOADGEN_IO_KILOBYTES_FOR_TESTING`, - `LOADGEN_IO_KILOBYTES_DISTRIBUTION_FOR_TESTING`, - `LOADGEN_TX_SIZE_BYTES_FOR_TESTING`, - `LOADGEN_TX_SIZE_BYTES_DISTRIBUTION_FOR_TESTING`, `LOADGEN_INSTRUCTIONS_FOR_TESTING`, and - `LOADGEN_INSTRUCTIONS_DISTRIBUTION_FOR_TESTING`. + `LOADGEN_INSTRUCTIONS_DISTRIBUTION_FOR_TESTING`. In addition to those, you must also set the + limit related settings - `APPLY_LOAD_LEDGER_MAX_INSTRUCTIONS`, `APPLY_LOAD_TX_MAX_INSTRUCTIONS`, `APPLY_LOAD_LEDGER_MAX_READ_LEDGER_ENTRIES`, `APPLY_LOAD_TX_MAX_READ_LEDGER_ENTRIES`, `APPLY_LOAD_LEDGER_MAX_WRITE_LEDGER_ENTRIES`, `APPLY_LOAD_TX_MAX_WRITE_LEDGER_ENTRIES`, `APPLY_LOAD_LEDGER_MAX_READ_BYTES`, `APPLY_LOAD_TX_MAX_READ_BYTES`, `APPLY_LOAD_LEDGER_MAX_WRITE_BYTES`, `APPLY_LOAD_TX_MAX_WRITE_BYTES`, `APPLY_LOAD_MAX_TX_SIZE_BYTES`, `APPLY_LOAD_MAX_LEDGER_TX_SIZE_BYTES`, `APPLY_LOAD_MAX_CONTRACT_EVENT_SIZE_BYTES`, `APPLY_LOAD_MAX_TX_COUNT`. +* `apply-load` will also generate a synthetic bucket list using `APPLY_LOAD_BL_SIMULATED_LEDGERS`, `APPLY_LOAD_BL_WRITE_FREQUENCY`, `APPLY_LOAD_BL_BATCH_SIZE`, `APPLY_LOAD_BL_LAST_BATCH_LEDGERS`, `APPLY_LOAD_BL_LAST_BATCH_SIZE`. These have default values set in `Config.h`. +* There are additional `APPLY_LOAD_*` related config settings that can be used to configure +`apply-load`, and you can learn more about these from the comments in `Config.h`. * **catchup **: Perform catchup from history archives without connecting to network. For new instances (with empty history From e4ec43d46862c20cd800afa77da4a267282064dd Mon Sep 17 00:00:00 2001 From: Siddharth Suresh Date: Fri, 20 Dec 2024 12:41:25 -0800 Subject: [PATCH 15/17] Update security protocol release notes --- docs/software/security-protocol-release-notes.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/software/security-protocol-release-notes.md b/docs/software/security-protocol-release-notes.md index b77dd9e5a7..1b6621aab5 100644 --- a/docs/software/security-protocol-release-notes.md +++ b/docs/software/security-protocol-release-notes.md @@ -53,6 +53,10 @@ It then follows that: # List of releases +## v22.0.0 (2024-11-06) + +* `Ledger` - protocol - Fixed an issue with read only TTL extensions not being charged for when initiated within a contract. + ## v20.4.0 (2024-04-08) * `Overlay` - security - remote crash could be triggered due to race condition in error code processing from a third party library. From 084e7f2fed024fc5363398f5882c015854ae5a99 Mon Sep 17 00:00:00 2001 From: anupsdf Date: Fri, 20 Dec 2024 15:18:02 -0800 Subject: [PATCH 16/17] Update INVARIANT_CHECKS documentation --- docs/stellar-core_example.cfg | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/docs/stellar-core_example.cfg b/docs/stellar-core_example.cfg index fba9e6d437..77699bbb94 100644 --- a/docs/stellar-core_example.cfg +++ b/docs/stellar-core_example.cfg @@ -425,6 +425,8 @@ RUN_STANDALONE=false # on bucket apply. # Strings specified are matched (as regex) against the list of invariants. # For example, to enable all invariants use ".*" +# INVARIANT_CHECKS = [.*] +# # List of invariants: # - "AccountSubEntriesCountIsValid" # Setting this will cause additional work on each operation apply - it @@ -440,7 +442,8 @@ RUN_STANDALONE=false # detailed information about what is checked see the comment in the header # invariant/BucketListIsConsistentWithDatabase.h. # The overhead may cause a system to catch-up more than once before being -# in sync with the network. +# in sync with the network. This may also increase startup time as it checks +# the entire state of the database. # - "CacheIsConsistentWithDatabase" # Setting this will cause additional work on each operation apply - it # checks if internal cache of ledger entries is consistent with content of @@ -466,7 +469,12 @@ RUN_STANDALONE=false # in the header invariant/LiabilitiesMatchOffers.h. # The overhead may cause slower systems to not perform as fast as the rest # of the network, caution is advised when using this. -INVARIANT_CHECKS = [] +INVARIANT_CHECKS = [ "AccountSubEntriesCountIsValid", +"ConservationOfLumens", +"ConstantProductInvariant", +"LedgerEntryIsValid", +"LiabilitiesMatchOffers", +"SponsorshipCountIsValid" ] # MANUAL_CLOSE (true or false) defaults to false From 3f8a1ced6d289883136d50a7d84393e607b062f9 Mon Sep 17 00:00:00 2001 From: Dmytro Kozhevin Date: Mon, 23 Dec 2024 18:01:59 -0500 Subject: [PATCH 17/17] Use re-calibrated costs for loadgen. Also update the `generate soroban load` test to not use the config for genesis and go through the protocol upgrade instead, so that it resembles the supercluster missions closer. --- src/herder/Upgrades.cpp | 10 ++++++++++ src/simulation/test/LoadGeneratorTests.cpp | 19 +++++++++++++++++-- 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/src/herder/Upgrades.cpp b/src/herder/Upgrades.cpp index 689b7d0139..a20e314d10 100644 --- a/src/herder/Upgrades.cpp +++ b/src/herder/Upgrades.cpp @@ -1199,6 +1199,16 @@ Upgrades::applyVersionUpgrade(Application& app, AbstractLedgerTxn& ltx, if (needUpgradeToVersion(SOROBAN_PROTOCOL_VERSION, prevVersion, newVersion)) { SorobanNetworkConfig::createLedgerEntriesForV20(ltx, app); +#ifdef BUILD_TESTS + // Update the costs in case if we're in loadgen mode, so that the costs + // reflect the most recent calibration on p20. This would break + // if we tried to replay the ledger, but we shouldn't be combining load + // generation with the ledger replay. + if (app.getConfig().ARTIFICIALLY_GENERATE_LOAD_FOR_TESTING) + { + SorobanNetworkConfig::updateRecalibratedCostTypesForV20(ltx); + } +#endif } if (needUpgradeToVersion(ProtocolVersion::V_21, prevVersion, newVersion)) { diff --git a/src/simulation/test/LoadGeneratorTests.cpp b/src/simulation/test/LoadGeneratorTests.cpp index 5b04e0867d..c5bc9e3879 100644 --- a/src/simulation/test/LoadGeneratorTests.cpp +++ b/src/simulation/test/LoadGeneratorTests.cpp @@ -197,8 +197,9 @@ TEST_CASE("generate soroban load", "[loadgen][soroban]") Simulation::pointer simulation = Topologies::pair(Simulation::OVER_LOOPBACK, networkID, [&](int i) { auto cfg = getTestConfig(i); - cfg.TESTING_UPGRADE_MAX_TX_SET_SIZE = 5000; - // Use tight bounds to we can verify storage works properly + cfg.USE_CONFIG_FOR_GENESIS = false; + cfg.ARTIFICIALLY_GENERATE_LOAD_FOR_TESTING = true; + // Use tight bounds to we can verify storage works properly cfg.LOADGEN_NUM_DATA_ENTRIES_FOR_TESTING = {numDataEntries}; cfg.LOADGEN_NUM_DATA_ENTRIES_DISTRIBUTION_FOR_TESTING = {1}; cfg.LOADGEN_IO_KILOBYTES_FOR_TESTING = {ioKiloBytes}; @@ -220,6 +221,20 @@ TEST_CASE("generate soroban load", "[loadgen][soroban]") auto nodes = simulation->getNodes(); auto& app = *nodes[0]; // pick a node to generate load + Upgrades::UpgradeParameters scheduledUpgrades; + auto lclCloseTime = + VirtualClock::from_time_t(app.getLedgerManager() + .getLastClosedLedgerHeader() + .header.scpValue.closeTime); + scheduledUpgrades.mUpgradeTime = lclCloseTime; + scheduledUpgrades.mProtocolVersion = + Config::CURRENT_LEDGER_PROTOCOL_VERSION; + for (auto const& node : nodes) + { + node->getHerder().setUpgrades(scheduledUpgrades); + } + simulation->crankForAtLeast(std::chrono::seconds(20), false); + auto& loadGen = app.getLoadGenerator(); auto getSuccessfulTxCount = [&]() { return nodes[0]