Skip to content

Commit

Permalink
ensure that chunk size is used when writing
Browse files Browse the repository at this point in the history
Ensure atomicity at the file level by making sure all of the file meta
fits within one chunk.
  • Loading branch information
lulf committed Apr 26, 2024
1 parent 308827a commit 07f6b8f
Show file tree
Hide file tree
Showing 2 changed files with 17 additions and 8 deletions.
12 changes: 8 additions & 4 deletions src/file.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,8 @@ pub use crate::page::ReadError;
use crate::page::{ChunkHeader, DehydratedPageReader, Header, PageHeader, PageReader, PageWriter};
use crate::types::{OptionPageID, PageID};

pub const PAGE_MAX_PAYLOAD_SIZE: usize = PAGE_SIZE - PageHeader::SIZE - size_of::<DataHeader>() - ChunkHeader::SIZE;
pub const PAGE_MAX_PAYLOAD_SIZE: usize =
PAGE_SIZE - PageHeader::SIZE - size_of::<DataHeader>() - (page::CHUNKS_PER_PAGE * ChunkHeader::SIZE);

pub type FileID = u8;

Expand Down Expand Up @@ -93,6 +94,7 @@ pub struct FileManager<F: Flash> {

impl<F: Flash> FileManager<F> {
pub fn new(flash: F, random_seed: u32) -> Self {
assert!(FILE_COUNT * FileMeta::SIZE <= page::MAX_CHUNK_SIZE);
Self {
flash,
random: random_seed,
Expand Down Expand Up @@ -1378,9 +1380,7 @@ impl FileWriter {

trace!(
"flush_header: page={:?} h={:?} record_boundary={:?}",
page_id,
header,
self.record_boundary
page_id, header, self.record_boundary
);

self.seq = next_seq;
Expand Down Expand Up @@ -1414,6 +1414,10 @@ impl FileWriter {
}
Some(w) => {
let n = w.write(&mut m.flash, data).await?;
if w.is_chunk_full() {
// Commit when we wrote a whole chunk
w.commit(&mut m.flash).await?;
}
data = &data[n..];
if n == 0 {
self.next_page(m).await?;
Expand Down
13 changes: 9 additions & 4 deletions src/page.rs
Original file line number Diff line number Diff line change
Expand Up @@ -53,11 +53,12 @@ pub unsafe trait Header: Sized {
const MAGIC: u32;
}

const MAX_CHUNK_SIZE: usize = if config::MAX_CHUNK_SIZE > (PAGE_SIZE - PageHeader::SIZE - ChunkHeader::SIZE) {
pub(crate) const MAX_CHUNK_SIZE: usize = if config::MAX_CHUNK_SIZE > (PAGE_SIZE - PageHeader::SIZE - ChunkHeader::SIZE) {
PAGE_SIZE - PageHeader::SIZE - ChunkHeader::SIZE
} else {
config::MAX_CHUNK_SIZE
};
pub(crate) const CHUNKS_PER_PAGE: usize = (PAGE_SIZE - PageHeader::SIZE) / (MAX_CHUNK_SIZE + ChunkHeader::SIZE);

async fn write_header<F: Flash, H: Header>(flash: &mut F, page_id: PageID, header: H) -> Result<(), F::Error> {
assert!(size_of::<H>() <= MAX_HEADER_SIZE);
Expand Down Expand Up @@ -433,8 +434,12 @@ impl<H: Header> PageWriter<H> {
self.page_id
}

pub fn is_chunk_full(&self) -> bool {
self.chunk_pos == MAX_CHUNK_SIZE
}

pub async fn write<F: Flash>(&mut self, flash: &mut F, data: &[u8]) -> Result<usize, Error<F::Error>> {
let max_write = PAGE_SIZE.saturating_sub(self.chunk_offset + ChunkHeader::SIZE + self.chunk_pos).min(MAX_CHUNK_SIZE);
let max_write = PAGE_SIZE.saturating_sub(self.chunk_offset + (ChunkHeader::SIZE * CHUNKS_PER_PAGE) + self.chunk_pos).min(MAX_CHUNK_SIZE);
let total_n = data.len().min(max_write);
if total_n == 0 {
return Ok(0);
Expand Down Expand Up @@ -513,7 +518,6 @@ impl<H: Header> PageWriter<H> {
// nothing to commit.
return Ok(());
}

self.erase_if_needed(flash).await.map_err(Error::Flash)?;

// flush align buf.
Expand All @@ -535,6 +539,7 @@ impl<H: Header> PageWriter<H> {
#[cfg(feature = "crc")]
crc: self.crc.finish(),
};

flash
.write(self.page_id as _, self.chunk_offset, &h.to_bytes())
.await
Expand Down Expand Up @@ -625,7 +630,7 @@ mod tests {
}

const HEADER: TestHeader = TestHeader { foo: 123456 };
const MAX_PAYLOAD: usize = PAGE_SIZE - PageHeader::SIZE - size_of::<TestHeader>() - ChunkHeader::SIZE;
const MAX_PAYLOAD: usize = PAGE_SIZE - PageHeader::SIZE - size_of::<TestHeader>() - (ChunkHeader::SIZE * CHUNKS_PER_PAGE);

#[test_log::test]
fn test_crc32() {
Expand Down

0 comments on commit 07f6b8f

Please sign in to comment.