From 16d94642725ed65864fd535d012c4fdfcdc812dc Mon Sep 17 00:00:00 2001 From: wasm-forge <122647775+wasm-forge@users.noreply.github.com> Date: Mon, 16 Sep 2024 21:47:10 +0200 Subject: [PATCH] sparse file support --- src/fs.rs | 36 ++++- src/runtime/structure_helpers.rs | 8 ++ src/runtime/types.rs | 7 +- src/storage.rs | 1 + src/storage/dummy.rs | 1 + src/storage/iterator.rs | 239 +++++++++++++++++++++++++++++++ src/storage/journal.rs | 2 + src/storage/stable.rs | 147 ++++++++++--------- src/storage/transient.rs | 2 + src/storage/types.rs | 126 +++++++++++++++- 10 files changed, 497 insertions(+), 72 deletions(-) create mode 100644 src/storage/iterator.rs diff --git a/src/fs.rs b/src/fs.rs index b92e7da..d523274 100644 --- a/src/fs.rs +++ b/src/fs.rs @@ -547,6 +547,7 @@ impl FileSystem { #[cfg(test)] mod tests { + use ic_stable_structures::memory_manager::{MemoryId, MemoryManager}; use ic_stable_structures::{Memory, VectorMemory}; use crate::{ @@ -556,7 +557,10 @@ mod tests { structure_helpers::find_node, types::{FdStat, OpenFlags}, }, - storage::types::{FileSize, FileType}, + storage::{ + stable::StableStorage, + types::{FileSize, FileType}, + }, test_utils::{ new_vector_memory, read_text_file, test_fs, test_fs_setups, test_fs_transient, write_text_fd, write_text_file, @@ -1353,6 +1357,32 @@ mod tests { println!("{:?}", buf); } + #[test] + fn reading_mounted_memory_after_upgrade() { + let memory_manager = MemoryManager::init(new_vector_memory()); + let memory = memory_manager.get(MemoryId::new(1)); + + let storage = StableStorage::new_with_memory_manager(&memory_manager, 200..210); + let mut fs = FileSystem::new(Box::new(storage)).unwrap(); + fs.mount_memory_file("test.txt", Box::new(memory.clone())) + .unwrap(); + + let root_fd = fs.root_fd(); + let content = "ABCDEFG123"; + write_text_file(&mut fs, root_fd, "test.txt", content, 2).unwrap(); + + // imitate canister upgrade (we keep the memory manager but recreate the file system with the same virtual memories) + let storage = StableStorage::new_with_memory_manager(&memory_manager, 200..210); + let mut fs = FileSystem::new(Box::new(storage)).unwrap(); + fs.mount_memory_file("test.txt", Box::new(memory.clone())) + .unwrap(); + let root_fd = fs.root_fd(); + + let content = read_text_file(&mut fs, root_fd, "test.txt", 0, 100); + + assert_eq!(content, "ABCDEFG123ABCDEFG123"); + } + #[test] fn deleting_mounted_file_fails() { let memory: VectorMemory = new_vector_memory(); @@ -1367,7 +1397,7 @@ mod tests { let res = fs.remove_file(root_fd, "test.txt"); assert!( - res.is_err(), + res == Err(Error::CannotRemoveMountedMemoryFile), "Deleting a mounted file should not be allowed!" ); @@ -1478,4 +1508,6 @@ mod tests { assert!(res.is_err()); } } + + // test sparse files } diff --git a/src/runtime/structure_helpers.rs b/src/runtime/structure_helpers.rs index ace6a8a..8ac52b1 100644 --- a/src/runtime/structure_helpers.rs +++ b/src/runtime/structure_helpers.rs @@ -118,6 +118,13 @@ pub fn create_dir_entry( } let node = storage.new_node(); + + let chunk_type = if entry_type == FileType::RegularFile { + Some(storage.chunk_type()) + } else { + None + }; + storage.put_metadata( node, Metadata { @@ -132,6 +139,7 @@ pub fn create_dir_entry( }, first_dir_entry: None, last_dir_entry: None, + chunk_type, }, ); diff --git a/src/runtime/types.rs b/src/runtime/types.rs index cb5e9ae..543faed 100644 --- a/src/runtime/types.rs +++ b/src/runtime/types.rs @@ -1,4 +1,5 @@ use bitflags::bitflags; +use serde::{Deserialize, Serialize}; #[derive(Copy, Clone, Debug)] pub struct FdStat { @@ -43,10 +44,10 @@ impl ChunkSize { ]; } -#[derive(Clone, Copy, Debug, PartialEq)] +#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] pub enum ChunkType { - V1, - V2, + V1 = 1, + V2 = 2, } bitflags! { diff --git a/src/storage.rs b/src/storage.rs index ef50dc4..687c785 100644 --- a/src/storage.rs +++ b/src/storage.rs @@ -9,6 +9,7 @@ use crate::{ mod allocator; pub mod dummy; +mod iterator; mod journal; pub mod stable; pub mod transient; diff --git a/src/storage/dummy.rs b/src/storage/dummy.rs index da38e2b..a9b1931 100644 --- a/src/storage/dummy.rs +++ b/src/storage/dummy.rs @@ -137,6 +137,7 @@ mod tests { times: Times::default(), first_dir_entry: Some(42), last_dir_entry: Some(24), + chunk_type: None, }, ) } diff --git a/src/storage/iterator.rs b/src/storage/iterator.rs new file mode 100644 index 0000000..73506d8 --- /dev/null +++ b/src/storage/iterator.rs @@ -0,0 +1,239 @@ +use crate::storage::types::FileChunkIndex; +use crate::storage::types::FileChunkPtr; +use crate::storage::FileSize; +use crate::storage::Node; +use ic_stable_structures; +use ic_stable_structures::memory_manager::VirtualMemory; +use ic_stable_structures::BTreeMap; +use ic_stable_structures::Memory; +use std::collections::HashMap; + +pub(crate) struct ChunkV2Iterator<'a, M: Memory> { + node: Node, + last_index_excluded: FileChunkIndex, + cur_index: FileChunkIndex, + + is_prefetched: bool, + prefetched_pointers: HashMap<(Node, FileChunkIndex), FileChunkPtr>, + + last_index: (Node, FileChunkIndex, FileChunkPtr), + v2_chunk_ptr: &'a mut BTreeMap<(Node, FileChunkIndex), FileChunkPtr, VirtualMemory>, +} + +impl<'a, M: Memory> ChunkV2Iterator<'a, M> { + pub fn new( + node: Node, + offset: FileSize, + file_size: FileSize, + chunk_size: FileSize, + last_index: (Node, FileChunkIndex, FileChunkPtr), + v2_chunk_ptr: &'a mut BTreeMap<(Node, FileChunkIndex), FileChunkPtr, VirtualMemory>, + ) -> Self { + let cur_index = (offset / chunk_size) as FileChunkIndex; + let last_index_excluded = (file_size / chunk_size + 1) as FileChunkIndex; + + Self { + node, + last_index_excluded, + cur_index, + is_prefetched: false, + prefetched_pointers: HashMap::new(), + last_index, + v2_chunk_ptr, + } + } +} + +impl<'a, M: Memory> Iterator for ChunkV2Iterator<'a, M> { + type Item = ((Node, FileChunkIndex), Option); + + fn next(&mut self) -> Option { + // we are at the end of the list, return None + if self.cur_index >= self.last_index_excluded { + return None; + } + + // try get cached item first + let last = self.last_index; + if last.0 == self.node && last.1 == self.cur_index { + let res = Some(((self.node, self.cur_index), Some(last.2))); + self.cur_index += 1; + // return cached value + return res; + } + + // cache failed, resort to reading the ranged values from the iterator + if !self.is_prefetched { + let range = (self.node, self.cur_index)..(self.node, self.last_index_excluded); + let items = self.v2_chunk_ptr.range(range); + + for (k, v) in items { + self.prefetched_pointers.insert(k, v); + } + + self.is_prefetched = true; + } + + let found: Option = self + .prefetched_pointers + .get(&(self.node, self.cur_index)) + .copied(); + + let res = Some(((self.node, self.cur_index), found)); + + self.cur_index += 1; + + res + } +} + +#[cfg(test)] +mod tests { + use crate::fs::FileSize; + use crate::storage::iterator::ChunkV2Iterator; + use crate::storage::stable::StableStorage; + use crate::storage::types::{FileType, Metadata, Node, Times}; + use crate::storage::Storage; + use crate::test_utils::new_vector_memory; + use ic_stable_structures::Memory; + + fn create_file_with_size(size: FileSize, storage: &mut StableStorage) -> Node { + let node = storage.new_node(); + + storage.put_metadata( + node, + Metadata { + node, + file_type: FileType::RegularFile, + link_count: 1, + size, + times: Times::default(), + first_dir_entry: Some(42), + last_dir_entry: Some(24), + chunk_type: Some(storage.chunk_type()), + }, + ); + node + } + + #[test] + fn iterate_short_file() { + let mut storage = StableStorage::new(new_vector_memory()); + let node = create_file_with_size(0, &mut storage); + let write_size = storage.chunk_size() * 3 - 100; + + let buf = vec![142u8; write_size]; + + storage.write(node, 0, &*buf).unwrap(); + + let meta = storage.get_metadata(node).unwrap(); + let file_size = meta.size; + + let iterator = ChunkV2Iterator::new( + node, + 30, + file_size, + storage.chunk_size() as FileSize, + storage.last_index, + &mut storage.v2_chunk_ptr, + ); + + let res_vec: Vec<_> = iterator.collect(); + + assert!(res_vec[0].1.is_some()); + assert!(res_vec[1].1.is_some()); + assert!(res_vec[2].1.is_some()); + + println!("{:?}", res_vec); + } + + #[test] + fn iterate_file_with_size_and_no_stored_chunks() { + let mut storage = StableStorage::new(new_vector_memory()); + let write_size = (storage.chunk_size() * 3 - 100) as FileSize; + + let node = create_file_with_size(write_size, &mut storage); + + let meta = storage.get_metadata(node).unwrap(); + let file_size = meta.size; + + let iterator = ChunkV2Iterator::new( + node, + 30, + file_size, + storage.chunk_size() as FileSize, + storage.last_index, + &mut storage.v2_chunk_ptr, + ); + + let res_vec: Vec<_> = iterator.collect(); + + assert!(res_vec[0].1.is_none()); + assert!(res_vec[1].1.is_none()); + assert!(res_vec[2].1.is_none()); + + println!("{:?}", res_vec); + } + + #[test] + fn iterate_file_missing_chunk_in_the_middle() { + let mut storage = StableStorage::new(new_vector_memory()); + let node = create_file_with_size(0, &mut storage); + + let write_size = (storage.chunk_size() * 3 - 200) as FileSize; + + storage.write(node, 10, &[142u8; 100]).unwrap(); + storage.write(node, write_size, &[142u8; 100]).unwrap(); + + let meta = storage.get_metadata(node).unwrap(); + let file_size = meta.size; + + let iterator = ChunkV2Iterator::new( + node, + 30, + file_size, + storage.chunk_size() as FileSize, + storage.last_index, + &mut storage.v2_chunk_ptr, + ); + + let res_vec: Vec<_> = iterator.collect(); + + println!("{:?}", res_vec); + + assert!(res_vec[0].1.is_some()); + assert!(res_vec[1].1.is_none()); + assert!(res_vec[2].1.is_some()); + } + + #[test] + fn iterate_file_only_middle_chunk_is_present() { + let mut storage = StableStorage::new(new_vector_memory()); + let file_size = (storage.chunk_size() * 3 - 200) as FileSize; + let node = create_file_with_size(file_size, &mut storage); + + let write_size = (storage.chunk_size() * 2 - 200) as FileSize; + + storage.write(node, write_size, &[142u8; 102]).unwrap(); + + let meta = storage.get_metadata(node).unwrap(); + let file_size = meta.size; + + let iterator = ChunkV2Iterator::new( + node, + 30, + file_size, + storage.chunk_size() as FileSize, + storage.last_index, + &mut storage.v2_chunk_ptr, + ); + + let res_vec: Vec<_> = iterator.collect(); + + println!("{:?}", res_vec); + + assert!(res_vec[0].1.is_none()); + assert!(res_vec[1].1.is_some()); + assert!(res_vec[2].1.is_none()); + } +} diff --git a/src/storage/journal.rs b/src/storage/journal.rs index 2ee663c..7c3072d 100644 --- a/src/storage/journal.rs +++ b/src/storage/journal.rs @@ -132,6 +132,7 @@ mod tests { }, first_dir_entry: None, last_dir_entry: Some(876), + chunk_type: None, }; let mut node2 = 0; @@ -224,6 +225,7 @@ mod tests { }, first_dir_entry: None, last_dir_entry: Some(876), + chunk_type: None, }; journal.write_mounted_meta(&123, &meta); diff --git a/src/storage/stable.rs b/src/storage/stable.rs index f73ef51..456f289 100644 --- a/src/storage/stable.rs +++ b/src/storage/stable.rs @@ -17,6 +17,7 @@ use crate::{ use super::{ allocator::ChunkPtrAllocator, + iterator::ChunkV2Iterator, journal::CacheJournal, types::{ DirEntry, DirEntryIndex, FileChunk, FileChunkIndex, FileChunkPtr, FileSize, FileType, @@ -79,7 +80,7 @@ pub struct StableStorage { filechunk: BTreeMap<(Node, FileChunkIndex), FileChunk, VirtualMemory>, mounted_meta: BTreeMap>, - v2_chunk_ptr: BTreeMap<(Node, FileChunkIndex), FileChunkPtr, VirtualMemory>, + pub(crate) v2_chunk_ptr: BTreeMap<(Node, FileChunkIndex), FileChunkPtr, VirtualMemory>, v2_chunks: VirtualMemory, v2_allocator: ChunkPtrAllocator, @@ -94,7 +95,7 @@ pub struct StableStorage { chunk_type: ChunkType, // primitive cache - last_index: (Node, FileChunkIndex, FileChunkPtr), + pub(crate) last_index: (Node, FileChunkIndex, FileChunkPtr), // only use it with normal files (not mounted) last_metadata: (Node, Metadata), @@ -225,6 +226,7 @@ impl StableStorage { times: Times::default(), first_dir_entry: None, last_dir_entry: None, + chunk_type: None, }; result.put_metadata(ROOT_NODE, metadata); } @@ -276,7 +278,7 @@ impl StableStorage { assert!(len <= self.v2_allocator.chunk_size()); let chunk_ptr = if self.last_index.0 == node && self.last_index.1 == index { - self.last_index.2 + self.last_index.2 as FileSize } else if let Some(ptr) = self.v2_chunk_ptr.get(&(node, index)) { ptr } else { @@ -298,6 +300,8 @@ impl StableStorage { self.last_index = (node, index, chunk_ptr); + grow_memory(&self.v2_chunks, chunk_ptr + offset + buf.len() as FileSize); + self.v2_chunks.write(chunk_ptr + offset, buf); } @@ -309,6 +313,8 @@ impl StableStorage { buf: &mut [u8], ) -> Result { let start_index = (offset / FILE_CHUNK_SIZE_V1 as FileSize) as FileChunkIndex; + let end_index = ((offset + buf.len() as FileSize) / FILE_CHUNK_SIZE_V1 as FileSize + 1) + as FileChunkIndex; let mut chunk_offset = offset - start_index as FileSize * FILE_CHUNK_SIZE_V1 as FileSize; @@ -317,28 +323,48 @@ impl StableStorage { let mut size_read: FileSize = 0; let mut remainder = file_size - offset; - for ((nd, _idx), value) in self.filechunk.range(range) { - assert!(nd == node); + let mut iter = self.filechunk.range(range); + let mut cur_fetched = None; + + for cur_index in start_index..end_index { + let chunk_space = FILE_CHUNK_SIZE_V1 as FileSize - chunk_offset; + + let to_read = remainder + .min(chunk_space) + .min(buf.len() as FileSize - size_read); // finished reading, buffer full if size_read == buf.len() as FileSize { break; } - let chunk_space = FILE_CHUNK_SIZE_V1 as FileSize - chunk_offset; + if cur_fetched.is_none() { + cur_fetched = iter.next(); + } - let to_read = remainder - .min(chunk_space) - .min(buf.len() as FileSize - size_read); + let read_buf = &mut buf[size_read as usize..size_read as usize + to_read as usize]; - let write_buf = &mut buf[size_read as usize..size_read as usize + to_read as usize]; + if let Some(((nd, idx), ref value)) = cur_fetched { + if idx == cur_index { + assert!(nd == node); - write_buf.copy_from_slice( - &value.bytes[chunk_offset as usize..chunk_offset as usize + to_read as usize], - ); + read_buf.copy_from_slice( + &value.bytes + [chunk_offset as usize..chunk_offset as usize + to_read as usize], + ); - chunk_offset = 0; + // consume token + cur_fetched = None; + } else { + // fill up with zeroes + read_buf.iter_mut().for_each(|m| *m = 0) + } + } else { + // fill up with zeroes + read_buf.iter_mut().for_each(|m| *m = 0) + } + chunk_offset = 0; size_read += to_read; remainder -= to_read; } @@ -353,54 +379,35 @@ impl StableStorage { file_size: FileSize, buf: &mut [u8], ) -> Result { - // compute remainder to read - let mut remainder = file_size - offset; - // early exit if nothing left to read - if remainder == 0 { + if offset >= file_size { return Ok(0 as FileSize); } + // compute remainder to read + let mut remainder = file_size - offset; + let chunk_size = self.chunk_size(); let start_index = (offset / chunk_size as FileSize) as FileChunkIndex; - let end_index = - ((offset + buf.len() as FileSize) / chunk_size as FileSize + 1) as FileChunkIndex; let mut chunk_offset = offset - start_index as FileSize * chunk_size as FileSize; - let mut range = (node, start_index)..(node, end_index); + //let end_index = ((offset + buf.len() as FileSize) / chunk_size as FileSize + 1) as FileChunkIndex; + //let mut range = (node, start_index)..(node, end_index); let mut size_read: FileSize = 0; - if self.last_index.0 == node && self.last_index.1 == start_index { - // - let chunk_ptr = self.last_index.2; - - let chunk_space = chunk_size as FileSize - chunk_offset; - - let to_read = remainder - .min(chunk_space) - .min(buf.len() as FileSize - size_read); - - let read_buf = &mut buf[size_read as usize..size_read as usize + to_read as usize]; - - self.v2_chunks.read(chunk_ptr + chunk_offset, read_buf); - - chunk_offset = 0; - - size_read += to_read; - remainder -= to_read; - - range = (node, start_index + 1)..(node, end_index); - } - - // early exit, if managed to completely read from the cached ptr - if size_read == buf.len() as FileSize { - return Ok(size_read); - } + let read_iter = ChunkV2Iterator::new( + node, + offset, + file_size, + self.chunk_size() as FileSize, + self.last_index, + &mut self.v2_chunk_ptr, + ); - for ((nd, idx), chunk_ptr) in self.v2_chunk_ptr.range(range) { + for ((nd, idx), chunk_ptr) in read_iter { assert!(nd == node); // finished reading, buffer full @@ -416,14 +423,17 @@ impl StableStorage { let read_buf = &mut buf[size_read as usize..size_read as usize + to_read as usize]; - self.v2_chunks.read(chunk_ptr + chunk_offset, read_buf); + if let Some(cptr) = chunk_ptr { + self.v2_chunks.read(cptr + chunk_offset, read_buf); + self.last_index = (node, idx, cptr); + } else { + // fill read buffer with 0 + read_buf.iter_mut().for_each(|m| *m = 0) + } chunk_offset = 0; - size_read += to_read; remainder -= to_read; - - self.last_index = (node, idx, chunk_ptr); } Ok(size_read) @@ -555,7 +565,7 @@ impl Storage for StableStorage { Ok(size_read) } - // Write file at the current file cursor, the cursor position will NOT be updated after reading. + // Write file at the current file cursor, the cursor position will NOT be updated after writing. fn write(&mut self, node: Node, offset: FileSize, buf: &[u8]) -> Result { let mut metadata = self.get_metadata(node)?; @@ -567,19 +577,23 @@ impl Storage for StableStorage { let end = offset + buf.len() as FileSize; let mut written_size = 0; - // decide if we use v2, first based on configuration, second: based on actual content - let mut use_v2 = self.chunk_type == ChunkType::V2; - - if metadata.size > 0 { - // try to find any v2 node, othersize use v1 - - // TODO: make this work with the iterator to support sparse files - let ptr = self.v2_chunk_ptr.get(&(node, 0)); - - if ptr.is_none() { - use_v2 = false; + // decide if we use v2 chunks for writing + let use_v2 = match metadata.chunk_type { + Some(ChunkType::V2) => true, + Some(ChunkType::V1) => false, + + // try to figure out, which chunk type to use + None => { + if metadata.size > 0 { + // try to find any v2 node, othersize use v1 + let ptr = self.v2_chunk_ptr.range((node, 0)..(node + 1, 0)).next(); + + ptr.is_some() + } else { + self.chunk_type() == ChunkType::V2 + } } - } + }; if use_v2 { let chunk_infos = get_chunk_infos(offset, end, self.chunk_size()); @@ -645,6 +659,7 @@ impl Storage for StableStorage { for (k, _v) in self.v2_chunk_ptr.range(range) { chunks.push(k); } + for (nd, idx) in chunks.into_iter() { assert!(nd == node); let removed = self.v2_chunk_ptr.remove(&(node, idx)); @@ -829,6 +844,7 @@ mod tests { times: Times::default(), first_dir_entry: Some(42), last_dir_entry: Some(24), + chunk_type: Some(storage.chunk_type()), }, ); let metadata = storage.get_metadata(node).unwrap(); @@ -838,6 +854,7 @@ mod tests { assert_eq!(metadata.first_dir_entry, Some(42)); assert_eq!(metadata.last_dir_entry, Some(24)); storage.write(node, 0, &[42; 10]).unwrap(); + let mut buf = [0; 10]; storage.read(node, 0, &mut buf).unwrap(); assert_eq!(buf, [42; 10]); diff --git a/src/storage/transient.rs b/src/storage/transient.rs index 8ce494d..572cd16 100644 --- a/src/storage/transient.rs +++ b/src/storage/transient.rs @@ -50,6 +50,7 @@ impl TransientStorage { times: Times::default(), first_dir_entry: None, last_dir_entry: None, + chunk_type: None, }; let mut result = Self { header: Header { @@ -396,6 +397,7 @@ mod tests { times: Times::default(), first_dir_entry: None, last_dir_entry: None, + chunk_type: Some(storage.chunk_type()), }, ); storage.write(node, 0, &[42; 10]).unwrap(); diff --git a/src/storage/types.rs b/src/storage/types.rs index 7837162..3ac4625 100644 --- a/src/storage/types.rs +++ b/src/storage/types.rs @@ -1,4 +1,4 @@ -use crate::error::Error; +use crate::{error::Error, fs::ChunkType}; use ic_stable_structures::storable::Bound; use serde::{Deserialize, Serialize}; @@ -31,7 +31,7 @@ pub(crate) struct ChunkHandle { } // A file consists of multiple file chunks. -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq)] pub struct FileChunk { pub bytes: [u8; FILE_CHUNK_SIZE_V1], } @@ -91,6 +91,7 @@ pub struct Metadata { pub times: Times, pub first_dir_entry: Option, pub last_dir_entry: Option, + pub chunk_type: Option, } impl ic_stable_structures::Storable for Metadata { @@ -228,3 +229,124 @@ impl ic_stable_structures::Storable for DirEntry { const BOUND: ic_stable_structures::storable::Bound = Bound::Unbounded; } + +#[cfg(test)] +mod tests { + use crate::fs::ChunkType; + + use super::{DirEntryIndex, FileSize, FileType, Node, Times}; + use serde::{Deserialize, Serialize}; + + // Old node structure. + #[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)] + pub struct MetadataOld { + pub node: Node, + pub file_type: FileType, + pub link_count: u64, + pub size: FileSize, + pub times: Times, + pub first_dir_entry: Option, + pub last_dir_entry: Option, + } + + // New node structure. + #[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)] + pub struct MetadataNew { + pub node: Node, + pub file_type: FileType, + pub link_count: u64, + pub size: FileSize, + pub times: Times, + pub first_dir_entry: Option, + pub last_dir_entry: Option, + pub chunk_type: Option, + } + + fn meta_to_bytes(meta: &MetadataOld) -> std::borrow::Cow<[u8]> { + let mut buf = vec![]; + ciborium::ser::into_writer(meta, &mut buf).unwrap(); + std::borrow::Cow::Owned(buf) + } + + fn meta_from_bytes(bytes: std::borrow::Cow<[u8]>) -> MetadataNew { + ciborium::de::from_reader(bytes.as_ref()).unwrap() + } + + #[test] + fn store_old_load_new() { + let meta_old = MetadataOld { + node: 23, + file_type: FileType::RegularFile, + link_count: 3, + size: 123, + times: Times::default(), + first_dir_entry: Some(23), + last_dir_entry: Some(35), + }; + + let bytes = meta_to_bytes(&meta_old); + + let meta_new = meta_from_bytes(bytes); + + assert_eq!(meta_new.node, meta_old.node); + assert_eq!(meta_new.file_type, meta_old.file_type); + assert_eq!(meta_new.link_count, meta_old.link_count); + assert_eq!(meta_new.size, meta_old.size); + assert_eq!(meta_new.times, meta_old.times); + assert_eq!(meta_new.first_dir_entry, meta_old.first_dir_entry); + assert_eq!(meta_new.last_dir_entry, meta_old.last_dir_entry); + assert_eq!(meta_new.chunk_type, None); + } + + #[test] + fn store_old_load_new_both_none() { + let meta_old = MetadataOld { + node: 23, + file_type: FileType::RegularFile, + link_count: 3, + size: 123, + times: Times::default(), + first_dir_entry: None, + last_dir_entry: None, + }; + + let bytes = meta_to_bytes(&meta_old); + + let meta_new = meta_from_bytes(bytes); + + assert_eq!(meta_new.node, meta_old.node); + assert_eq!(meta_new.file_type, meta_old.file_type); + assert_eq!(meta_new.link_count, meta_old.link_count); + assert_eq!(meta_new.size, meta_old.size); + assert_eq!(meta_new.times, meta_old.times); + assert_eq!(meta_new.first_dir_entry, meta_old.first_dir_entry); + assert_eq!(meta_new.last_dir_entry, meta_old.last_dir_entry); + assert_eq!(meta_new.chunk_type, None); + } + + #[test] + fn store_old_load_new_first_none() { + let meta_old = MetadataOld { + node: 23, + file_type: FileType::RegularFile, + link_count: 3, + size: 123, + times: Times::default(), + first_dir_entry: None, + last_dir_entry: Some(23), + }; + + let bytes = meta_to_bytes(&meta_old); + + let meta_new = meta_from_bytes(bytes); + + assert_eq!(meta_new.node, meta_old.node); + assert_eq!(meta_new.file_type, meta_old.file_type); + assert_eq!(meta_new.link_count, meta_old.link_count); + assert_eq!(meta_new.size, meta_old.size); + assert_eq!(meta_new.times, meta_old.times); + assert_eq!(meta_new.first_dir_entry, meta_old.first_dir_entry); + assert_eq!(meta_new.last_dir_entry, meta_old.last_dir_entry); + assert_eq!(meta_new.chunk_type, None); + } +}