Skip to content

Commit

Permalink
add method to File to get extents on-disk
Browse files Browse the repository at this point in the history
  • Loading branch information
agrif authored and rafalh committed May 29, 2022
1 parent 2d6bca2 commit 4892fb1
Showing 1 changed file with 44 additions and 0 deletions.
44 changes: 44 additions & 0 deletions src/file.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,17 @@ pub struct File<'a, IO: ReadWriteSeek, TP, OCC> {
fs: &'a FileSystem<IO, TP, OCC>,
}

/// An extent containing a file's data on disk.
///
/// This is created by the `extents` method on `File`, and represents
/// a byte range on the disk that contains a file's data. All values
/// are in bytes.
#[derive(Clone, Debug)]
pub struct Extent {
pub offset: u64,
pub size: u32,
}

impl<'a, IO: ReadWriteSeek, TP, OCC> File<'a, IO, TP, OCC> {
pub(crate) fn new(
first_cluster: Option<u32>,
Expand Down Expand Up @@ -74,6 +85,39 @@ impl<'a, IO: ReadWriteSeek, TP, OCC> File<'a, IO, TP, OCC> {
}
}

/// Get the extents of a file on disk.
///
/// This returns an iterator over the byte ranges on-disk occupied by
/// this file.
pub fn extents(&mut self) -> impl Iterator<Item=Result<Extent, Error<IO::Error>>> + 'a {

let fs = self.fs;
let cluster_size = fs.cluster_size();
let mut bytes_left = match self.size() {
Some(s) => s,
None => return None.into_iter().flatten(),
};
let first = match self.first_cluster {
Some(f) => f,
None => return None.into_iter().flatten(),
};

Some(core::iter::once(Ok(first)).chain(fs.cluster_iter(first))
.map(move |cluster_err| {
match cluster_err {
Ok(cluster) => {
let size = cluster_size.min(bytes_left);
bytes_left -= size;
Ok(Extent {
offset: fs.offset_from_cluster(cluster),
size: size,
})
},
Err(e) => Err(e),
}
})).into_iter().flatten()
}

pub(crate) fn abs_pos(&self) -> Option<u64> {
// Returns current position relative to filesystem start
// Note: when between clusters it returns position after previous cluster
Expand Down

0 comments on commit 4892fb1

Please sign in to comment.