From da3f2c82a3b3ef173880671dfbda83adddb4c30a Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Sun, 14 Sep 2025 17:24:10 +0800 Subject: [PATCH 1/3] Apply cargo clippy --fix --all --- src/db.rs | 2 +- src/heap.rs | 5 ++--- src/metadata_store.rs | 5 ++--- src/object_cache.rs | 7 +++---- src/tree.rs | 25 ++++++++++--------------- 5 files changed, 18 insertions(+), 26 deletions(-) diff --git a/src/db.rs b/src/db.rs index 22e0f5463..40f5edf89 100644 --- a/src/db.rs +++ b/src/db.rs @@ -580,7 +580,7 @@ impl Db { ); self.collection_name_mapping - .insert(name_ref, &collection_id.0.to_le_bytes())?; + .insert(name_ref, collection_id.0.to_le_bytes())?; trees.insert(collection_id, tree.clone()); diff --git a/src/heap.rs b/src/heap.rs index 38926540b..0e545fd73 100644 --- a/src/heap.rs +++ b/src/heap.rs @@ -726,13 +726,12 @@ impl Heap { // initialize directories if not present let mut was_recovered = true; for p in [path, &slabs_dir] { - if let Err(e) = fs::read_dir(p) { - if e.kind() == io::ErrorKind::NotFound { + if let Err(e) = fs::read_dir(p) + && e.kind() == io::ErrorKind::NotFound { fallible!(fs::create_dir_all(p)); was_recovered = false; continue; } - } maybe!(fs::File::open(p).and_then(|f| f.sync_all()))?; } diff --git a/src/metadata_store.rs b/src/metadata_store.rs index 4e265fb36..e17e8ae6f 100644 --- a/src/metadata_store.rs +++ b/src/metadata_store.rs @@ -284,11 +284,10 @@ impl MetadataStore { let path = storage_directory.as_ref(); // initialize directories if not present - if let Err(e) = fs::read_dir(path) { - if e.kind() == io::ErrorKind::NotFound { + if let Err(e) = fs::read_dir(path) + && e.kind() == io::ErrorKind::NotFound { fallible!(fs::create_dir_all(path)); } - } let _ = fs::File::create(path.join(WARN)); diff --git a/src/object_cache.rs b/src/object_cache.rs index ea6a31517..5f696a8b6 100644 --- a/src/object_cache.rs +++ b/src/object_cache.rs @@ -355,7 +355,7 @@ impl ObjectCache { self.flush_epoch.current_flush_epoch() } - pub fn check_into_flush_epoch(&self) -> FlushEpochGuard { + pub fn check_into_flush_epoch(&self) -> FlushEpochGuard<'_> { self.flush_epoch.check_in() } @@ -761,13 +761,12 @@ impl ObjectCache { continue; }; - if let Some(ref inner) = object.inner.read().leaf { - if let Some(dirty) = inner.dirty_flush_epoch { + if let Some(ref inner) = object.inner.read().leaf + && let Some(dirty) = inner.dirty_flush_epoch { assert!(dirty > flush_through_epoch); // This object will be rewritten anyway when its dirty epoch gets flushed continue; } - } let data = match self.read(fragmented_object_id) { Some(Ok(data)) => data, diff --git a/src/tree.rs b/src/tree.rs index f7530c57d..01b86ecec 100644 --- a/src/tree.rs +++ b/src/tree.rs @@ -358,8 +358,8 @@ impl Tree { continue; } - if let Some(ref hi) = leaf.hi { - if &**hi <= key { + if let Some(ref hi) = leaf.hi + && &**hi <= key { let size = leaf.in_memory_size; log::trace!( "key overshoot in page_in - search key {:?}, node hi {:?}", @@ -379,7 +379,6 @@ impl Tree { continue; } - } return Ok((low_key, write, node)); } } @@ -660,15 +659,14 @@ impl Tree { continue; } - if let Some(ref hi) = leaf.hi { - if &**hi <= key { + if let Some(ref hi) = leaf.hi + && &**hi <= key { log::trace!("key overshoot on leaf_for_key"); // cache maintenance occurs in Drop for LeafReadGuard drop(leaf_guard); hint::spin_loop(); continue; } - } if leaf.lo != node.low_key { // TODO determine why this rare situation occurs and better @@ -719,8 +717,8 @@ impl Tree { assert_ne!(max_unflushed_epoch, flush_epoch_guard.epoch()); } - if let Some(old_dirty_epoch) = leaf.dirty_flush_epoch { - if old_dirty_epoch != flush_epoch_guard.epoch() { + if let Some(old_dirty_epoch) = leaf.dirty_flush_epoch + && old_dirty_epoch != flush_epoch_guard.epoch() { assert!(old_dirty_epoch < flush_epoch_guard.epoch()); log::trace!( @@ -732,7 +730,6 @@ impl Tree { self.cooperatively_serialize_leaf(node.object_id, &mut *leaf); } - } Ok(LeafWriteGuard { flush_epoch_guard, @@ -1396,12 +1393,11 @@ impl Tree { if let Some((_lo, w, _id)) = &last { let leaf = w.leaf.as_ref().unwrap(); assert!(&leaf.lo <= key); - if let Some(hi) = &leaf.hi { - if hi <= key { + if let Some(hi) = &leaf.hi + && hi <= key { let (lo, w, n) = last.take().unwrap(); acquired_locks.insert(lo, (w, n)); } - } } if last.is_none() { // TODO evaluate whether this is correct, as page_in performs @@ -2056,13 +2052,12 @@ impl Iterator for Iter { let leaf = node.leaf_read.leaf.as_ref().unwrap(); - if let Some(leaf_hi) = &leaf.hi { - if leaf_hi <= &search_key { + if let Some(leaf_hi) = &leaf.hi + && leaf_hi <= &search_key { // concurrent merge, retry log::trace!("undershot in interator, retrying search"); continue; } - } if leaf.lo > search_key { // concurrent successor split, retry From 37a9561bdaa6a1fb44202bb38a4ed4463d778fbf Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Sun, 14 Sep 2025 17:27:57 +0800 Subject: [PATCH 2/3] =?UTF-8?q?Fix=20cargo=20clippy=20errors:=20=E2=9D=AF?= =?UTF-8?q?=20cargo=20clippy=20--all=20warning:=20`panic`=20setting=20is?= =?UTF-8?q?=20ignored=20for=20`test`=20profile=20=20=20=20=20Checking=20sl?= =?UTF-8?q?ed=20v1.0.0-alpha.124=20(/home/exec/Projects/github.com/spaceja?= =?UTF-8?q?m/sled)=20error:=20calling=20`set=5Flen()`=20immediately=20afte?= =?UTF-8?q?r=20reserving=20a=20buffer=20creates=20uninitialized=20values?= =?UTF-8?q?=20=20=20=20-->=20src/heap.rs:516:9=20=20=20=20=20|=20516=20|?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20let=20mut=20data=20=3D=20Vec::with?= =?UTF-8?q?=5Fcapacity(self.slot=5Fsize);=20=20=20=20=20|=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^?= =?UTF-8?q?=20517=20|=20=20=20=20=20=20=20=20=20unsafe=20{=20518=20|=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20data.set=5Flen(self.slot=5Fsiz?= =?UTF-8?q?e);=20=20=20=20=20|=20=20=20=20=20=20=20=20=20=20=20=20=20^^^^^?= =?UTF-8?q?^^^^^^^^^^^^^^^^^^^^^^^=20=20=20=20=20|=20=20=20=20=20=3D=20hel?= =?UTF-8?q?p:=20initialize=20the=20buffer=20or=20wrap=20the=20content=20in?= =?UTF-8?q?=20`MaybeUninit`=20=20=20=20=20=3D=20help:=20for=20further=20in?= =?UTF-8?q?formation=20visit=20https://rust-lang.github.io/rust-clippy/mas?= =?UTF-8?q?ter/index.html#uninit=5Fvec=20=20=20=20=20=3D=20note:=20`#[deny?= =?UTF-8?q?(clippy::uninit=5Fvec)]`=20on=20by=20default?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit error: calling `set_len()` immediately after reserving a buffer creates uninitialized values --> src/metadata_store.rs:526:5 | 526 | reusable_frame_buffer.reserve(len + 12); | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 527 | unsafe { 528 | reusable_frame_buffer.set_len(len + 12); | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | = help: initialize the buffer or wrap the content in `MaybeUninit` = help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#uninit_vec error: calling `set_len()` immediately after reserving a buffer creates uninitialized values --> src/metadata_store.rs:592:9 | 592 | low_key_buf.reserve(low_key_len); | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 593 | unsafe { 594 | low_key_buf.set_len(low_key_len); | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | = help: initialize the buffer or wrap the content in `MaybeUninit` = help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#uninit_vec error: could not compile `sled` (lib) due to 3 previous errors --- src/heap.rs | 5 +---- src/metadata_store.rs | 10 ++-------- 2 files changed, 3 insertions(+), 12 deletions(-) diff --git a/src/heap.rs b/src/heap.rs index 0e545fd73..24cd18c99 100644 --- a/src/heap.rs +++ b/src/heap.rs @@ -513,10 +513,7 @@ impl Slab { ) -> io::Result> { log::trace!("reading from slot {} in slab {}", slot, self.slot_size); - let mut data = Vec::with_capacity(self.slot_size); - unsafe { - data.set_len(self.slot_size); - } + let mut data = vec![0u8; self.slot_size]; let whence = self.slot_size as u64 * slot; diff --git a/src/metadata_store.rs b/src/metadata_store.rs index e17e8ae6f..85e5aa470 100644 --- a/src/metadata_store.rs +++ b/src/metadata_store.rs @@ -523,10 +523,7 @@ fn read_frame( let len: usize = usize::try_from(len_u64).unwrap(); reusable_frame_buffer.clear(); - reusable_frame_buffer.reserve(len + 12); - unsafe { - reusable_frame_buffer.set_len(len + 12); - } + reusable_frame_buffer.resize(len + 12, 0); reusable_frame_buffer[..8].copy_from_slice(&frame_size_with_crc_buf); fallible!(file.read_exact(&mut reusable_frame_buffer[8..])); @@ -589,10 +586,7 @@ fn read_frame( let low_key_len_raw = u64::from_le_bytes(low_key_len_buf); let low_key_len = usize::try_from(low_key_len_raw).unwrap(); - low_key_buf.reserve(low_key_len); - unsafe { - low_key_buf.set_len(low_key_len); - } + low_key_buf.resize(low_key_len, 0); decoder .read_exact(&mut low_key_buf) From c03cd73fdf66b6f282589710a82f2f7a5d259928 Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Sun, 14 Sep 2025 17:30:52 +0800 Subject: [PATCH 3/3] Execute `cargo fmt --all` --- examples/bench.rs | 2 +- src/config.rs | 3 +- src/db.rs | 4 +- src/event_verifier.rs | 4 +- src/heap.rs | 17 ++- src/id_allocator.rs | 11 +- src/leaf.rs | 3 +- src/lib.rs | 27 ++-- src/metadata_store.rs | 31 +++-- src/object_cache.rs | 26 ++-- src/object_location_mapper.rs | 13 +- src/tree.rs | 132 ++++++++++--------- tests/crash_tests/crash_batches.rs | 21 +-- tests/crash_tests/crash_sequential_writes.rs | 6 +- 14 files changed, 165 insertions(+), 135 deletions(-) diff --git a/examples/bench.rs b/examples/bench.rs index c524ab909..0e1b8b1db 100644 --- a/examples/bench.rs +++ b/examples/bench.rs @@ -12,7 +12,7 @@ type Db = SledDb<1024>; const N_WRITES_PER_THREAD: u32 = 4 * 1024 * 1024; const MAX_CONCURRENCY: u32 = 4; -const CONCURRENCY: &[usize] = &[/*1, 2, 4,*/ MAX_CONCURRENCY as _]; +const CONCURRENCY: &[usize] = &[/* 1, 2, 4, */ MAX_CONCURRENCY as _]; const BYTES_PER_ITEM: u32 = 8; trait Databench: Clone + Send { diff --git a/src/config.rs b/src/config.rs index ed6e4534d..e4e4a5998 100644 --- a/src/config.rs +++ b/src/config.rs @@ -31,7 +31,8 @@ pub struct Config { /// Start a background thread that flushes data to disk /// every few milliseconds. Defaults to every 200ms. pub flush_every_ms: Option, - /// The zstd compression level to use when writing data to disk. Defaults to 3. + /// The zstd compression level to use when writing data to disk. Defaults + /// to 3. pub zstd_compression_level: i32, /// This is only set to `Some` for objects created via /// `Config::tmp`, and will remove the storage directory diff --git a/src/db.rs b/src/db.rs index 40f5edf89..2f42f1179 100644 --- a/src/db.rs +++ b/src/db.rs @@ -167,8 +167,8 @@ impl Drop for Db { impl Db { #[cfg(feature = "for-internal-testing-only")] fn validate(&self) -> io::Result<()> { - // for each tree, iterate over index, read node and assert low key matches - // and assert first time we've ever seen node ID + // for each tree, iterate over index, read node and assert low key + // matches and assert first time we've ever seen node ID let mut ever_seen = std::collections::HashSet::new(); let before = std::time::Instant::now(); diff --git a/src/event_verifier.rs b/src/event_verifier.rs index 92282bea7..52a173d79 100644 --- a/src/event_verifier.rs +++ b/src/event_verifier.rs @@ -85,7 +85,9 @@ impl EventVerifier { if matches!(state, State::PagedOut) { let dirty_epochs = self.dirty_epochs(object_id); if !dirty_epochs.is_empty() { - println!("{object_id:?} was paged out while having dirty epochs {dirty_epochs:?}"); + println!( + "{object_id:?} was paged out while having dirty epochs {dirty_epochs:?}" + ); self.print_debug_history_for_object(object_id); println!("{state:?} {epoch:?} {at}"); println!("invalid object state transition"); diff --git a/src/heap.rs b/src/heap.rs index 24cd18c99..c67ca536c 100644 --- a/src/heap.rs +++ b/src/heap.rs @@ -227,7 +227,8 @@ enum PersistentSettings { } impl PersistentSettings { - // NB: should only be called with a directory lock already exclusively acquired + // NB: should only be called with a directory lock already exclusively + // acquired fn verify_or_store>( &self, path: P, @@ -414,7 +415,8 @@ mod sys_io { match maybe!(file.read_exact_at(buf, offset)) { Ok(r) => Ok(r), Err(e) => { - // FIXME BUG 3: failed to read 64 bytes at offset 192 from file with len 192 + // FIXME BUG 3: failed to read 64 bytes at offset 192 from file + // with len 192 println!( "failed to read {} bytes at offset {} from file with len {}", buf.len(), @@ -724,11 +726,12 @@ impl Heap { let mut was_recovered = true; for p in [path, &slabs_dir] { if let Err(e) = fs::read_dir(p) - && e.kind() == io::ErrorKind::NotFound { - fallible!(fs::create_dir_all(p)); - was_recovered = false; - continue; - } + && e.kind() == io::ErrorKind::NotFound + { + fallible!(fs::create_dir_all(p)); + was_recovered = false; + continue; + } maybe!(fs::File::open(p).and_then(|f| f.sync_all()))?; } diff --git a/src/id_allocator.rs b/src/id_allocator.rs index 458fb5622..e23b56101 100644 --- a/src/id_allocator.rs +++ b/src/id_allocator.rs @@ -1,6 +1,6 @@ use std::collections::BTreeSet; -use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::Arc; +use std::sync::atomic::{AtomicU64, Ordering}; use crossbeam_queue::SegQueue; use fnv::FnvHashSet; @@ -17,7 +17,8 @@ pub struct Allocator { free_and_pending: Mutex, /// Flat combining. /// - /// A lock free queue of recently freed ids which uses when there is contention on `free_and_pending`. + /// A lock free queue of recently freed ids which uses when there is + /// contention on `free_and_pending`. free_queue: SegQueue, allocation_counter: AtomicU64, free_counter: AtomicU64, @@ -92,11 +93,7 @@ impl Allocator { pub fn max_allocated(&self) -> Option { let next = self.free_and_pending.lock().next_to_allocate; - if next == 0 { - None - } else { - Some(next - 1) - } + if next == 0 { None } else { Some(next - 1) } } pub fn allocate(&self) -> u64 { diff --git a/src/leaf.rs b/src/leaf.rs index 3a2334cc2..c4c8382f0 100644 --- a/src/leaf.rs +++ b/src/leaf.rs @@ -168,7 +168,8 @@ impl Leaf { let mut leaf: Box> = bincode::deserialize(&zstd_decoded).unwrap(); - // use decompressed buffer length as a cheap proxy for in-memory size for now + // use decompressed buffer length as a cheap proxy for in-memory size + // for now leaf.in_memory_size = zstd_decoded.len(); Ok(leaf) diff --git a/src/lib.rs b/src/lib.rs index 038c064b5..c87b38486 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -13,14 +13,15 @@ // * this feels nice, we can lazily update a global stable flushed counter // * can get rid of dirty_flush_epoch and page_out_on_flush? // * or at least dirty_flush_epoch -// * dirty_flush_epoch really means "hasn't yet been cooperatively serialized @ F.E." +// * dirty_flush_epoch really means "hasn't yet been cooperatively serialized +// @ F.E." // * interesting metrics: // * whether dirty for some epoch // * whether cooperatively serialized for some epoch // * whether fully flushed for some epoch // * clean -> dirty -> {maybe coop} -> flushed -// * for page-out, we only care if it's stable or if we need to add it to -// a page-out priority queue +// * for page-out, we only care if it's stable or if we need to add it to a +// page-out priority queue // * page-out doesn't seem to happen as expected // // reliability @@ -47,25 +48,25 @@ // post-1.0 improvements // // reliability -// TODO bug hiding: if the crash_iter test panics, the test doesn't fail as expected -// TODO event log assertion for testing heap location bidirectional referential integrity, -// particularly in the object location mapper. -// TODO ensure nothing "from the future" gets copied into earlier epochs during GC -// TODO collection_id on page_in checks - it needs to be pinned w/ heap's EBR? -// TODO put aborts behind feature flags for hard crashes +// TODO bug hiding: if the crash_iter test panics, the test doesn't fail as +// expected TODO event log assertion for testing heap location bidirectional +// referential integrity, particularly in the object location mapper. +// TODO ensure nothing "from the future" gets copied into earlier epochs during +// GC TODO collection_id on page_in checks - it needs to be pinned w/ heap's +// EBR? TODO put aborts behind feature flags for hard crashes // TODO re-enable transaction tests in test_tree.rs // // performance // TODO force writers to flush when some number of dirty epochs have built up // TODO serialize flush batch in parallel // TODO concurrent serialization of NotYetSerialized dirty objects -// TODO make the Arc { @@ -257,8 +258,8 @@ impl MetadataStore { set_error(&self.inner.global_error, error); } - /// Returns the writer handle `MetadataStore`, a sorted array of metadata, and a sorted array - /// of free keys. + /// Returns the writer handle `MetadataStore`, a sorted array of metadata, + /// and a sorted array of free keys. pub fn recover>( storage_directory: P, ) -> io::Result<( @@ -285,9 +286,10 @@ impl MetadataStore { // initialize directories if not present if let Err(e) = fs::read_dir(path) - && e.kind() == io::ErrorKind::NotFound { - fallible!(fs::create_dir_all(path)); - } + && e.kind() == io::ErrorKind::NotFound + { + fallible!(fs::create_dir_all(path)); + } let _ = fs::File::create(path.join(WARN)); @@ -340,7 +342,8 @@ impl MetadataStore { Ok((MetadataStore { inner, is_shut_down: false }, recovery.recovered)) } - /// Returns the recovered mappings, the id for the next log file, the highest allocated object id, and the set of free ids + /// Returns the recovered mappings, the id for the next log file, the + /// highest allocated object id, and the set of free ids fn recover_inner>( storage_directory: P, directory_lock: &fs::File, @@ -359,8 +362,8 @@ impl MetadataStore { ) } - /// Write a batch of metadata. `None` for the second half of the outer tuple represents a - /// deletion. Returns the bytes written. + /// Write a batch of metadata. `None` for the second half of the outer tuple + /// represents a deletion. Returns the bytes written. pub fn write_batch(&self, batch: &[UpdateMetadata]) -> io::Result { self.check_error()?; @@ -427,7 +430,8 @@ impl MetadataStore { } fn serialize_batch(batch: &[UpdateMetadata]) -> Vec { - // we initialize the vector to contain placeholder bytes for the frame length + // we initialize the vector to contain placeholder bytes for the frame + // length let batch_bytes = 0_u64.to_le_bytes().to_vec(); // write format: @@ -437,7 +441,8 @@ fn serialize_batch(batch: &[UpdateMetadata]) -> Vec { // zstd encoded 8 byte LE key // zstd encoded 8 byte LE value // repeated for each kv pair - // LE encoded crc32 of length + payload raw bytes, XOR 0xAF to make non-zero in empty case + // LE encoded crc32 of length + payload raw bytes, XOR 0xAF to make + // non-zero in empty case let mut batch_encoder = ZstdEncoder::new(batch_bytes, ZSTD_LEVEL).unwrap(); for update_metadata in batch { @@ -609,8 +614,8 @@ fn read_frame( Ok(ret) } -// returns the deduplicated data in this log, along with an optional offset where a -// final torn write occurred. +// returns the deduplicated data in this log, along with an optional offset +// where a final torn write occurred. fn read_log( directory_path: &Path, lsn: u64, diff --git a/src/object_cache.rs b/src/object_cache.rs index 5f696a8b6..b75542928 100644 --- a/src/object_cache.rs +++ b/src/object_cache.rs @@ -166,8 +166,8 @@ impl std::panic::RefUnwindSafe } impl ObjectCache { - /// Returns the recovered ObjectCache, the tree indexes, and a bool signifying whether the system - /// was recovered or not + /// Returns the recovered ObjectCache, the tree indexes, and a bool + /// signifying whether the system was recovered or not pub fn recover( config: &Config, ) -> io::Result<( @@ -530,8 +530,8 @@ impl ObjectCache { assert_eq!(dirty_value_initial_read, dirty_value); } - // drop is necessary to increase chance of Arc strong count reaching 1 - // while taking ownership of the value + // drop is necessary to increase chance of Arc strong count reaching + // 1 while taking ownership of the value drop(dirty_value_initial_read); assert_eq!(dirty_epoch, flush_through_epoch); @@ -658,8 +658,10 @@ impl ObjectCache { leaf_ref.serialize(self.config.zstd_compression_level) } else { - // Here we expect that there was a benign data race and that another thread - // mutated the leaf after encountering it being dirty for our epoch, after + // Here we expect that there was a benign data race and + // that another thread + // mutated the leaf after encountering it being dirty + // for our epoch, after // storing a CooperativelySerialized in the dirty map. let dirty_value_2_opt = self.dirty.remove(&(dirty_epoch, dirty_object_id)); @@ -762,11 +764,13 @@ impl ObjectCache { }; if let Some(ref inner) = object.inner.read().leaf - && let Some(dirty) = inner.dirty_flush_epoch { - assert!(dirty > flush_through_epoch); - // This object will be rewritten anyway when its dirty epoch gets flushed - continue; - } + && let Some(dirty) = inner.dirty_flush_epoch + { + assert!(dirty > flush_through_epoch); + // This object will be rewritten anyway when its dirty epoch + // gets flushed + continue; + } let data = match self.read(fragmented_object_id) { Some(Ok(data)) => data, diff --git a/src/object_location_mapper.rs b/src/object_location_mapper.rs index 568ec29f6..e907962b2 100644 --- a/src/object_location_mapper.rs +++ b/src/object_location_mapper.rs @@ -154,7 +154,8 @@ impl ObjectLocationMapper { } pub(crate) fn allocate_object_id(&self) -> ObjectId { - // object IDs wrap a NonZeroU64, so if we get 0, just re-allocate and leak the id + // object IDs wrap a NonZeroU64, so if we get 0, just re-allocate and + // leak the id let mut object_id = self.object_id_allocator.allocate(); if object_id == 0 { @@ -231,7 +232,8 @@ impl ObjectLocationMapper { .get(slot) .swap(*object_id, Ordering::Release); - // TODO add debug event verifier here assert_eq!(0, last_oid_at_location); + // TODO add debug event verifier here assert_eq!(0, + // last_oid_at_location); last_address_opt } @@ -287,9 +289,10 @@ impl ObjectLocationMapper { ) { SlabAddress::from(rt_raw_sa) } else { - // object has been removed but its slot has not yet been freed, - // hopefully due to a deferred write - // TODO test that with a testing event log + // object has been removed but its slot has not yet been + // freed, hopefully due to a deferred + // write TODO test that with a testing + // event log continue; }; diff --git a/src/tree.rs b/src/tree.rs index 01b86ecec..322afafab 100644 --- a/src/tree.rs +++ b/src/tree.rs @@ -359,26 +359,27 @@ impl Tree { } if let Some(ref hi) = leaf.hi - && &**hi <= key { - let size = leaf.in_memory_size; - log::trace!( - "key overshoot in page_in - search key {:?}, node hi {:?}", - key, - hi - ); - drop(write); - self.cache.mark_access_and_evict( - node.object_id, - size, - flush_epoch, - )?; + && &**hi <= key + { + let size = leaf.in_memory_size; + log::trace!( + "key overshoot in page_in - search key {:?}, node hi {:?}", + key, + hi + ); + drop(write); + self.cache.mark_access_and_evict( + node.object_id, + size, + flush_epoch, + )?; - hint::spin_loop(); + hint::spin_loop(); - last_continue = concat!(file!(), ':', line!()); + last_continue = concat!(file!(), ':', line!()); - continue; - } + continue; + } return Ok((low_key, write, node)); } } @@ -660,13 +661,14 @@ impl Tree { continue; } if let Some(ref hi) = leaf.hi - && &**hi <= key { - log::trace!("key overshoot on leaf_for_key"); - // cache maintenance occurs in Drop for LeafReadGuard - drop(leaf_guard); - hint::spin_loop(); - continue; - } + && &**hi <= key + { + log::trace!("key overshoot on leaf_for_key"); + // cache maintenance occurs in Drop for LeafReadGuard + drop(leaf_guard); + hint::spin_loop(); + continue; + } if leaf.lo != node.low_key { // TODO determine why this rare situation occurs and better @@ -712,24 +714,25 @@ impl Tree { } if let Some(max_unflushed_epoch) = leaf.max_unflushed_epoch { - // We already serialized something for this epoch, so if we did so again, - // we need to think a bit. + // We already serialized something for this epoch, so if we did so + // again, we need to think a bit. assert_ne!(max_unflushed_epoch, flush_epoch_guard.epoch()); } if let Some(old_dirty_epoch) = leaf.dirty_flush_epoch - && old_dirty_epoch != flush_epoch_guard.epoch() { - assert!(old_dirty_epoch < flush_epoch_guard.epoch()); + && old_dirty_epoch != flush_epoch_guard.epoch() + { + assert!(old_dirty_epoch < flush_epoch_guard.epoch()); - log::trace!( - "cooperatively flushing {:?} with dirty {:?} after checking into {:?}", - node.object_id, - old_dirty_epoch, - flush_epoch_guard.epoch() - ); + log::trace!( + "cooperatively flushing {:?} with dirty {:?} after checking into {:?}", + node.object_id, + old_dirty_epoch, + flush_epoch_guard.epoch() + ); - self.cooperatively_serialize_leaf(node.object_id, &mut *leaf); - } + self.cooperatively_serialize_leaf(node.object_id, &mut *leaf); + } Ok(LeafWriteGuard { flush_epoch_guard, @@ -982,17 +985,19 @@ impl Tree { /// correct. If both old and new are `Some`, will modify the value if /// old is correct. /// - /// It returns `Ok(Ok(CompareAndSwapSuccess { new_value, previous_value }))` if operation finishes successfully. + /// It returns `Ok(Ok(CompareAndSwapSuccess { new_value, previous_value }))` + /// if operation finishes successfully. /// /// If it fails it returns: - /// - `Ok(Err(CompareAndSwapError{ current, proposed }))` if no IO - /// error was encountered but the operation - /// failed to specify the correct current value. `CompareAndSwapError` contains - /// current and proposed values. - /// - `Err(io::Error)` if there was a high-level IO problem that prevented - /// the operation from logically progressing. This is usually fatal and - /// will prevent future requests from functioning, and requires the - /// administrator to fix the system issue before restarting. + /// - `Ok(Err(CompareAndSwapError{ current, proposed }))` if no IO error + /// was encountered but the operation failed to specify the correct + /// current value. `CompareAndSwapError` contains current and proposed + /// values. + /// - `Err(io::Error)` if there was a high-level IO problem that + /// prevented the operation from logically progressing. This is + /// usually fatal and will prevent future requests from functioning, + /// and requires the administrator to fix the system issue before + /// restarting. /// /// # Examples /// @@ -1394,10 +1399,11 @@ impl Tree { let leaf = w.leaf.as_ref().unwrap(); assert!(&leaf.lo <= key); if let Some(hi) = &leaf.hi - && hi <= key { - let (lo, w, n) = last.take().unwrap(); - acquired_locks.insert(lo, (w, n)); - } + && hi <= key + { + let (lo, w, n) = last.take().unwrap(); + acquired_locks.insert(lo, (w, n)); + } } if last.is_none() { // TODO evaluate whether this is correct, as page_in performs @@ -1779,12 +1785,13 @@ impl Tree { } } - /// Pops the last kv pair in the provided range, or returns `Ok(None)` if nothing - /// exists within that range. + /// Pops the last kv pair in the provided range, or returns `Ok(None)` if + /// nothing exists within that range. /// /// # Panics /// - /// This will panic if the provided range's end_bound() == Bound::Excluded(K::MIN). + /// This will panic if the provided range's end_bound() == + /// Bound::Excluded(K::MIN). /// /// # Examples /// @@ -1893,12 +1900,13 @@ impl Tree { } } - /// Pops the first kv pair in the provided range, or returns `Ok(None)` if nothing - /// exists within that range. + /// Pops the first kv pair in the provided range, or returns `Ok(None)` if + /// nothing exists within that range. /// /// # Panics /// - /// This will panic if the provided range's end_bound() == Bound::Excluded(K::MIN). + /// This will panic if the provided range's end_bound() == + /// Bound::Excluded(K::MIN). /// /// # Examples /// @@ -2053,11 +2061,12 @@ impl Iterator for Iter { let leaf = node.leaf_read.leaf.as_ref().unwrap(); if let Some(leaf_hi) = &leaf.hi - && leaf_hi <= &search_key { - // concurrent merge, retry - log::trace!("undershot in interator, retrying search"); - continue; - } + && leaf_hi <= &search_key + { + // concurrent merge, retry + log::trace!("undershot in interator, retrying search"); + continue; + } if leaf.lo > search_key { // concurrent successor split, retry @@ -2118,7 +2127,8 @@ impl DoubleEndedIterator for Iter { continue; } - // determine if we undershot our target due to concurrent modifications + // determine if we undershot our target due to concurrent + // modifications let undershot = match (&leaf.hi, &self.next_back_last_lo, &self.bounds.1) { (Some(leaf_hi), Some(last_lo), _) => leaf_hi < last_lo, diff --git a/tests/crash_tests/crash_batches.rs b/tests/crash_tests/crash_batches.rs index 69ba45a32..ec68cd1a8 100644 --- a/tests/crash_tests/crash_batches.rs +++ b/tests/crash_tests/crash_batches.rs @@ -18,7 +18,8 @@ fn verify_batches(tree: &Db) -> u32 { None => return 0, }; - // we now expect all items in the batch to be present and to have the same value + // we now expect all items in the batch to be present and to have the same + // value for key in 0..BATCH_SIZE { let res = tree.get(u32_to_vec(key)); @@ -33,17 +34,19 @@ fn verify_batches(tree: &Db) -> u32 { }; let value = slice_to_u32(&*v); // FIXME BUG 1 count 2 - // assertion `left == right` failed: expected key 0 to have value 62003, instead it had value 62375 in db with keys: - // {0:62003, 1:62003, 2:62003, 3:62003, 4:62003, 5:62003, 6:62003, 7:62003, + // assertion `left == right` failed: expected key 0 to have value 62003, + // instead it had value 62375 in db with keys: {0:62003, 1: + // 62003, 2:62003, 3:62003, 4:62003, 5:62003, 6:62003, 7:62003, // Human: iterating shows correct value, but first get did not // - // expected key 1 to have value 1, instead it had value 29469 in db with keys: - // {0:1, 1:29469, 2:29469, 3:29469, 4:29469, 5:29469, 6:29469, 7:29469, - // Human: 0 didn't get included in later syncs + // expected key 1 to have value 1, instead it had value 29469 in db with + // keys: {0:1, 1:29469, 2:29469, 3:29469, 4:29469, 5:29469, + // 6:29469, 7:29469, Human: 0 didn't get included in later syncs // - // expected key 0 to have value 59485, instead it had value 59484 in db with keys: - // {0:59485, 1:59485, 2:59485, 3:59485, 4:59485, 5:59485, 6:59485, 7:59485, - // Human: had key N during first check, then N + 1 in iteration + // expected key 0 to have value 59485, instead it had value 59484 in db + // with keys: {0:59485, 1:59485, 2:59485, 3:59485, 4:59485, + // 5:59485, 6:59485, 7:59485, Human: had key N during first + // check, then N + 1 in iteration assert_eq!( first_value, value, diff --git a/tests/crash_tests/crash_sequential_writes.rs b/tests/crash_tests/crash_sequential_writes.rs index fdb3752f5..e9a204b8e 100644 --- a/tests/crash_tests/crash_sequential_writes.rs +++ b/tests/crash_tests/crash_sequential_writes.rs @@ -38,9 +38,9 @@ fn verify(tree: &Db) -> (u32, u32) { }; let actual = slice_to_u32(&*v); // FIXME BUG 2 - // thread '' panicked at tests/test_crash_recovery.rs:159:13: - // assertion `left == right` failed - // left: 139 + // thread '' panicked at + // tests/test_crash_recovery.rs:159:13: assertion `left + // == right` failed left: 139 // right: 136 assert_eq!( expected,