diff --git a/benchmark/src/main.rs b/benchmark/src/main.rs index d24ec7d3..6e5c93f3 100644 --- a/benchmark/src/main.rs +++ b/benchmark/src/main.rs @@ -74,6 +74,9 @@ async fn main() -> Result<(), Box> { let mgrcfg = RevisionManagerConfig::builder() .node_cache_size(args.cache_size) + .free_list_cache_size( + NonZeroUsize::new(2 * args.batch_size as usize).expect("batch size > 0"), + ) .max_revisions(args.revisions) .build(); let cfg = DbConfig::builder() diff --git a/firewood/src/manager.rs b/firewood/src/manager.rs index 5c96d047..ca5c59ef 100644 --- a/firewood/src/manager.rs +++ b/firewood/src/manager.rs @@ -23,6 +23,9 @@ pub struct RevisionManagerConfig { #[builder(default_code = "NonZero::new(20480).expect(\"non-zero\")")] node_cache_size: NonZero, + + #[builder(default_code = "NonZero::new(10000).expect(\"non-zero\")")] + free_list_cache_size: NonZero, } type CommittedRevision = Arc>; @@ -62,7 +65,12 @@ impl RevisionManager { truncate: bool, config: RevisionManagerConfig, ) -> Result { - let storage = Arc::new(FileBacked::new(filename, config.node_cache_size, truncate)?); + let storage = Arc::new(FileBacked::new( + filename, + config.node_cache_size, + config.free_list_cache_size, + truncate, + )?); let nodestore = match truncate { true => Arc::new(NodeStore::new_empty_committed(storage.clone())?), false => Arc::new(NodeStore::open(storage.clone())?), diff --git a/storage/src/linear/filebacked.rs b/storage/src/linear/filebacked.rs index 701a79b6..94573cae 100644 --- a/storage/src/linear/filebacked.rs +++ b/storage/src/linear/filebacked.rs @@ -28,6 +28,7 @@ use super::{ReadableStorage, WritableStorage}; pub struct FileBacked { fd: Mutex, cache: Mutex>>, + free_list_cache: Mutex>>, } impl FileBacked { @@ -35,6 +36,7 @@ impl FileBacked { pub fn new( path: PathBuf, node_cache_size: NonZero, + free_list_cache_size: NonZero, truncate: bool, ) -> Result { let fd = OpenOptions::new() @@ -47,6 +49,7 @@ impl FileBacked { Ok(Self { fd: Mutex::new(fd), cache: Mutex::new(LruCache::new(node_cache_size)), + free_list_cache: Mutex::new(LruCache::new(free_list_cache_size)), }) } } @@ -68,11 +71,15 @@ impl ReadableStorage for FileBacked { fn read_cached_node(&self, addr: LinearAddress) -> Option> { let mut guard = self.cache.lock().expect("poisoned lock"); let cached = guard.get(&addr).cloned(); - if cached.is_some() { - counter!("firewood.node.cache.hit").increment(1); - } else { - counter!("firewood.node.cache.miss").increment(1); - } + counter!("firewood.cache.node", "type" => if cached.is_some() { "hit" } else { "miss" }) + .increment(1); + cached + } + + fn free_list_cache(&self, addr: LinearAddress) -> Option> { + let mut guard = self.free_list_cache.lock().expect("poisoned lock"); + let cached = guard.pop(&addr); + counter!("firewood.cache.freelist", "type" => if cached.is_some() { "hit" } else { "miss" }).increment(1); cached } } @@ -102,4 +109,9 @@ impl WritableStorage for FileBacked { guard.pop(addr); } } + + fn add_to_free_list_cache(&self, addr: LinearAddress, next: Option) { + let mut guard = self.free_list_cache.lock().expect("poisoned lock"); + guard.put(addr, next); + } } diff --git a/storage/src/linear/mod.rs b/storage/src/linear/mod.rs index 866cecd3..e60755b2 100644 --- a/storage/src/linear/mod.rs +++ b/storage/src/linear/mod.rs @@ -47,6 +47,11 @@ pub trait ReadableStorage: Debug + Sync + Send { fn read_cached_node(&self, _addr: LinearAddress) -> Option> { None } + + /// Fetch the next pointer from the freelist cache + fn free_list_cache(&self, _addr: LinearAddress) -> Option> { + None + } } /// Trait for writable storage. @@ -73,4 +78,7 @@ pub trait WritableStorage: ReadableStorage { /// Invalidate all nodes that are part of a specific revision, as these will never be referenced again fn invalidate_cached_nodes<'a>(&self, _addresses: impl Iterator) {} + + /// Add a new entry to the freelist cache + fn add_to_free_list_cache(&self, _addr: LinearAddress, _next: Option) {} } diff --git a/storage/src/nodestore.rs b/storage/src/nodestore.rs index b0d4dec8..ad7b467b 100644 --- a/storage/src/nodestore.rs +++ b/storage/src/nodestore.rs @@ -379,13 +379,17 @@ impl NodeStore, S> { if let Some(free_stored_area_addr) = self.header.free_lists[index] { // Update the free list head. // Skip the index byte and Area discriminant byte - let free_area_addr = free_stored_area_addr.get() + 2; - let free_head_stream = self.storage.stream_from(free_area_addr)?; - let free_head: FreeArea = bincode::deserialize_from(free_head_stream) - .map_err(|e| Error::new(ErrorKind::InvalidData, e))?; + if let Some(free_head) = self.storage.free_list_cache(free_stored_area_addr) { + self.header.free_lists[index] = free_head; + } else { + let free_area_addr = free_stored_area_addr.get() + 2; + let free_head_stream = self.storage.stream_from(free_area_addr)?; + let free_head: FreeArea = bincode::deserialize_from(free_head_stream) + .map_err(|e| Error::new(ErrorKind::InvalidData, e))?; - // Update the free list to point to the next free block. - self.header.free_lists[index] = free_head.next_free_block; + // Update the free list to point to the next free block. + self.header.free_lists[index] = free_head.next_free_block; + } // Return the address of the newly allocated block. return Ok(Some((free_stored_area_addr, index as AreaIndex))); @@ -462,6 +466,9 @@ impl NodeStore { self.storage.write(addr.into(), &stored_area_bytes)?; + self.storage + .add_to_free_list_cache(addr, self.header.free_lists[area_size_index as usize]); + // The newly freed block is now the head of the free list. self.header.free_lists[area_size_index as usize] = Some(addr);