Skip to content

Commit

Permalink
Revert "Revert "Revert "Snapshot release triggered compaction without…
Browse files Browse the repository at this point in the history
… multiple tombstones (facebook#8357)" (facebook#8410)" (facebook#8438)"

This reverts commit f89423a.

Signed-off-by: tabokie <[email protected]>
  • Loading branch information
tabokie committed Aug 30, 2022
1 parent 3cd757c commit 40551e2
Show file tree
Hide file tree
Showing 4 changed files with 4 additions and 31 deletions.
2 changes: 0 additions & 2 deletions HISTORY.md
Original file line number Diff line number Diff line change
Expand Up @@ -286,8 +286,6 @@ Note: The next release will be major release 7.0. See https://github.com/faceboo
* BackupEngineOptions::sync (default true) now applies to restoring backups in addition to creating backups. This could slow down restores, but ensures they are fully persisted before returning OK. (Consider increasing max_background_operations to improve performance.)

## 6.23.0 (2021-07-16)
### Behavior Changes
* Obsolete keys in the bottommost level that were preserved for a snapshot will now be cleaned upon snapshot release in all cases. This form of compaction (snapshot release triggered compaction) previously had an artificial limitation that multiple tombstones needed to be present.
### Bug Fixes
* Blob file checksums are now printed in hexadecimal format when using the `manifest_dump` `ldb` command.
* `GetLiveFilesMetaData()` now populates the `temperature`, `oldest_ancester_time`, and `file_creation_time` fields of its `LiveFileMetaData` results when the information is available. Previously these fields always contained zero indicating unknown.
Expand Down
3 changes: 2 additions & 1 deletion db/version_set.cc
Original file line number Diff line number Diff line change
Expand Up @@ -3319,7 +3319,8 @@ void VersionStorageInfo::ComputeBottommostFilesMarkedForCompaction() {
bottommost_files_mark_threshold_ = kMaxSequenceNumber;
for (auto& level_and_file : bottommost_files_) {
if (!level_and_file.second->being_compacted &&
level_and_file.second->fd.largest_seqno != 0) {
level_and_file.second->fd.largest_seqno != 0 &&
level_and_file.second->num_deletions > 1) {
// largest_seqno might be nonzero due to containing the final key in an
// earlier compaction, whose seqnum we didn't zero out. Multiple deletions
// ensures the file really contains deleted or overwritten keys.
Expand Down
12 changes: 2 additions & 10 deletions utilities/blob_db/blob_db_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -571,6 +571,7 @@ TEST_F(BlobDBTest, EnableDisableCompressionGC) {
Random rnd(301);
BlobDBOptions bdb_options;
bdb_options.min_blob_size = 0;
bdb_options.enable_garbage_collection = true;
bdb_options.garbage_collection_cutoff = 1.0;
bdb_options.disable_background_tasks = true;
bdb_options.compression = kSnappyCompression;
Expand Down Expand Up @@ -599,11 +600,6 @@ TEST_F(BlobDBTest, EnableDisableCompressionGC) {
ASSERT_EQ(2, blob_files.size());
ASSERT_EQ(kNoCompression, blob_files[1]->GetCompressionType());

// Enable GC. If we do it earlier the snapshot release triggered compaction
// may compact files and trigger GC before we can verify there are two files.
bdb_options.enable_garbage_collection = true;
Reopen(bdb_options);

// Trigger compaction
ASSERT_OK(blob_db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
blob_db_impl()->TEST_DeleteObsoleteFiles();
Expand Down Expand Up @@ -642,6 +638,7 @@ TEST_F(BlobDBTest, ChangeCompressionGC) {
Random rnd(301);
BlobDBOptions bdb_options;
bdb_options.min_blob_size = 0;
bdb_options.enable_garbage_collection = true;
bdb_options.garbage_collection_cutoff = 1.0;
bdb_options.disable_background_tasks = true;
bdb_options.compression = kLZ4Compression;
Expand Down Expand Up @@ -671,11 +668,6 @@ TEST_F(BlobDBTest, ChangeCompressionGC) {
ASSERT_EQ(2, blob_files.size());
ASSERT_EQ(kSnappyCompression, blob_files[1]->GetCompressionType());

// Enable GC. If we do it earlier the snapshot release triggered compaction
// may compact files and trigger GC before we can verify there are two files.
bdb_options.enable_garbage_collection = true;
Reopen(bdb_options);

ASSERT_OK(blob_db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
VerifyDB(data);

Expand Down
18 changes: 0 additions & 18 deletions utilities/transactions/write_prepared_transaction_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2584,7 +2584,6 @@ TEST_P(WritePreparedTransactionTest, ReleaseSnapshotDuringCompaction) {
const size_t snapshot_cache_bits = 7; // same as default
const size_t commit_cache_bits = 0; // minimum commit cache
UpdateTransactionDBOptions(snapshot_cache_bits, commit_cache_bits);
options.disable_auto_compactions = true;
ASSERT_OK(ReOpen());

ASSERT_OK(db->Put(WriteOptions(), "key1", "value1_1"));
Expand All @@ -2604,13 +2603,7 @@ TEST_P(WritePreparedTransactionTest, ReleaseSnapshotDuringCompaction) {
VerifyKeys({{"key1", "value1_1"}}, snapshot2);
// Add a flush to avoid compaction to fallback to trivial move.

// The callback might be called twice, record the calling state to
// prevent double calling.
bool callback_finished = false;
auto callback = [&](void*) {
if (callback_finished) {
return;
}
// Release snapshot1 after CompactionIterator init.
// CompactionIterator need to figure out the earliest snapshot
// that can see key1:value1_2 is kMaxSequenceNumber, not
Expand All @@ -2619,7 +2612,6 @@ TEST_P(WritePreparedTransactionTest, ReleaseSnapshotDuringCompaction) {
// Add some keys to advance max_evicted_seq.
ASSERT_OK(db->Put(WriteOptions(), "key3", "value3"));
ASSERT_OK(db->Put(WriteOptions(), "key4", "value4"));
callback_finished = true;
};
SyncPoint::GetInstance()->SetCallBack("CompactionIterator:AfterInit",
callback);
Expand All @@ -2641,7 +2633,6 @@ TEST_P(WritePreparedTransactionTest, ReleaseSnapshotDuringCompaction2) {
const size_t snapshot_cache_bits = 7; // same as default
const size_t commit_cache_bits = 0; // minimum commit cache
UpdateTransactionDBOptions(snapshot_cache_bits, commit_cache_bits);
options.disable_auto_compactions = true;
ASSERT_OK(ReOpen());

ASSERT_OK(db->Put(WriteOptions(), "key1", "value1"));
Expand Down Expand Up @@ -2692,7 +2683,6 @@ TEST_P(WritePreparedTransactionTest, ReleaseSnapshotDuringCompaction3) {
const size_t snapshot_cache_bits = 7; // same as default
const size_t commit_cache_bits = 1; // commit cache size = 2
UpdateTransactionDBOptions(snapshot_cache_bits, commit_cache_bits);
options.disable_auto_compactions = true;
ASSERT_OK(ReOpen());

// Add a dummy key to evict v2 commit cache, but keep v1 commit cache.
Expand Down Expand Up @@ -2722,18 +2712,11 @@ TEST_P(WritePreparedTransactionTest, ReleaseSnapshotDuringCompaction3) {
add_dummy();
auto* s2 = db->GetSnapshot();

// The callback might be called twice, record the calling state to
// prevent double calling.
bool callback_finished = false;
auto callback = [&](void*) {
if (callback_finished) {
return;
}
db->ReleaseSnapshot(s1);
// Add some dummy entries to trigger s1 being cleanup from old_commit_map.
add_dummy();
add_dummy();
callback_finished = true;
};
SyncPoint::GetInstance()->SetCallBack("CompactionIterator:AfterInit",
callback);
Expand All @@ -2751,7 +2734,6 @@ TEST_P(WritePreparedTransactionTest, ReleaseEarliestSnapshotDuringCompaction) {
const size_t snapshot_cache_bits = 7; // same as default
const size_t commit_cache_bits = 0; // minimum commit cache
UpdateTransactionDBOptions(snapshot_cache_bits, commit_cache_bits);
options.disable_auto_compactions = true;
ASSERT_OK(ReOpen());

ASSERT_OK(db->Put(WriteOptions(), "key1", "value1"));
Expand Down

0 comments on commit 40551e2

Please sign in to comment.