diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 465db08fe3afc..79ef7e9b6625d 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -63,7 +63,7 @@ use sc_client_api::{ utils::is_descendent_of, IoInfo, MemoryInfo, MemorySize, UsageInfo, }; -use sc_state_db::StateDb; +use sc_state_db::{IsPruned, StateDb}; use sp_arithmetic::traits::Saturating; use sp_blockchain::{ well_known_cache_keys, Backend as _, CachedHeaderMetadata, Error as ClientError, HeaderBackend, @@ -442,9 +442,10 @@ struct PendingBlock { } // wrapper that implements trait required for state_db -struct StateMetaDb<'a>(&'a dyn Database); +#[derive(Clone)] +struct StateMetaDb(Arc>); -impl<'a> sc_state_db::MetaDb for StateMetaDb<'a> { +impl sc_state_db::MetaDb for StateMetaDb { type Error = sp_database::error::DatabaseError; fn get_meta(&self, key: &[u8]) -> Result>, Self::Error> { @@ -915,7 +916,7 @@ impl sc_client_api::backend::BlockImportOperation struct StorageDb { pub db: Arc>, - pub state_db: StateDb>, + pub state_db: StateDb, StateMetaDb>, prefix_keys: bool, } @@ -1104,11 +1105,11 @@ impl Backend { let mut db_init_transaction = Transaction::new(); let requested_state_pruning = config.state_pruning.clone(); - let state_meta_db = StateMetaDb(db.as_ref()); + let state_meta_db = StateMetaDb(db.clone()); let map_e = sp_blockchain::Error::from_state_db; let (state_db_init_commit_set, state_db) = StateDb::open( - &state_meta_db, + state_meta_db, requested_state_pruning, !db.supports_ref_counting(), should_init, @@ -1317,10 +1318,11 @@ impl Backend { } trace!(target: "db", "Canonicalize block #{} ({:?})", new_canonical, hash); - let commit = - self.storage.state_db.canonicalize_block(&hash).map_err( - sp_blockchain::Error::from_state_db::>, - )?; + let commit = self.storage.state_db.canonicalize_block(&hash).map_err( + sp_blockchain::Error::from_state_db::< + sc_state_db::Error, + >, + )?; apply_state_commit(transaction, commit); } Ok(()) @@ -1471,14 +1473,16 @@ impl Backend { .storage .state_db .insert_block(&hash, number_u64, pending_block.header.parent_hash(), changeset) - .map_err(|e: sc_state_db::Error| { + .map_err(|e: sc_state_db::Error| { sp_blockchain::Error::from_state_db(e) })?; apply_state_commit(&mut transaction, commit); if number <= last_finalized_num { // Canonicalize in the db when re-importing existing blocks with state. let commit = self.storage.state_db.canonicalize_block(&hash).map_err( - sp_blockchain::Error::from_state_db::>, + sp_blockchain::Error::from_state_db::< + sc_state_db::Error, + >, )?; apply_state_commit(&mut transaction, commit); meta_updates.push(MetaUpdate { @@ -1679,10 +1683,11 @@ impl Backend { .map(|c| f_num.saturated_into::() > c) .unwrap_or(true) { - let commit = - self.storage.state_db.canonicalize_block(&f_hash).map_err( - sp_blockchain::Error::from_state_db::>, - )?; + let commit = self.storage.state_db.canonicalize_block(&f_hash).map_err( + sp_blockchain::Error::from_state_db::< + sc_state_db::Error, + >, + )?; apply_state_commit(transaction, commit); } @@ -2294,13 +2299,14 @@ impl sc_client_api::backend::Backend for Backend { match self.blockchain.header_metadata(hash) { Ok(ref hdr) => { - if !self.have_state_at(&hash, hdr.number) { - return Err(sp_blockchain::Error::UnknownBlock(format!( - "State already discarded for {:?}", - block - ))) - } - if let Ok(()) = self.storage.state_db.pin(&hash) { + let hint = || { + sc_state_db::NodeDb::get(self.storage.as_ref(), hdr.state_root.as_ref()) + .unwrap_or(None) + .is_some() + }; + if let Ok(()) = + self.storage.state_db.pin(&hash, hdr.number.saturated_into::(), hint) + { let root = hdr.state_root; let db_state = DbStateBuilder::::new(self.storage.clone(), root) .with_optional_cache( @@ -2333,7 +2339,20 @@ impl sc_client_api::backend::Backend for Backend { _ => false, } } else { - !self.storage.state_db.is_pruned(hash, number.saturated_into::()) + match self.storage.state_db.is_pruned(hash, number.saturated_into::()) { + IsPruned::Pruned => false, + IsPruned::NotPruned => true, + IsPruned::MaybePruned => match self.blockchain.header_metadata(*hash) { + Ok(header) => sp_state_machine::Storage::get( + self.storage.as_ref(), + &header.state_root, + (&[], None), + ) + .unwrap_or(None) + .is_some(), + _ => false, + }, + } } } diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index 1c7140777e16e..f21b707a489f0 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -51,7 +51,7 @@ use log::trace; use noncanonical::NonCanonicalOverlay; use parity_util_mem::{malloc_size, MallocSizeOf}; use parking_lot::RwLock; -use pruning::RefWindow; +use pruning::{HaveBlock, RefWindow}; use sc_client_api::{MemorySize, StateDbMemoryInfo}; use std::{ collections::{hash_map::Entry, HashMap}, @@ -62,6 +62,7 @@ const PRUNING_MODE: &[u8] = b"mode"; const PRUNING_MODE_ARCHIVE: &[u8] = b"archive"; const PRUNING_MODE_ARCHIVE_CANON: &[u8] = b"archive_canonical"; const PRUNING_MODE_CONSTRAINED: &[u8] = b"constrained"; +pub(crate) const DEFAULT_MAX_BLOCK_CONSTRAINT: u32 = 256; /// Database value type. pub type DBValue = Vec; @@ -115,12 +116,14 @@ pub trait NodeDb { } /// Error type. +#[derive(Eq, PartialEq)] pub enum Error { /// Database backend error. Db(E), StateDb(StateDbError), } +#[derive(Eq, PartialEq)] pub enum StateDbError { /// `Codec` decoding error. Decoding(codec::Error), @@ -138,6 +141,10 @@ pub enum StateDbError { BlockAlreadyExists, /// Invalid metadata Metadata(String), + /// Trying to get a block record from db while it is not commit to db yet + BlockUnavailable, + /// Block record is missing from the pruning window + BlockMissing, } impl From for Error { @@ -182,6 +189,9 @@ impl fmt::Debug for StateDbError { Self::TooManySiblingBlocks => write!(f, "Too many sibling blocks inserted"), Self::BlockAlreadyExists => write!(f, "Block already exists"), Self::Metadata(message) => write!(f, "Invalid metadata: {}", message), + Self::BlockUnavailable => + write!(f, "Trying to get a block record from db while it is not commit to db yet"), + Self::BlockMissing => write!(f, "Block record is missing from the pruning window"), } } } @@ -266,7 +276,7 @@ impl Default for PruningMode { impl Default for Constraints { fn default() -> Self { - Self { max_blocks: Some(256), max_mem: None } + Self { max_blocks: Some(DEFAULT_MAX_BLOCK_CONSTRAINT), max_mem: None } } } @@ -276,38 +286,41 @@ fn to_meta_key(suffix: &[u8], data: &S) -> Vec { buffer } -struct StateDbSync { +pub struct StateDbSync { mode: PruningMode, non_canonical: NonCanonicalOverlay, - pruning: Option>, + pruning: Option>, pinned: HashMap, } -impl StateDbSync { - fn new( +impl + StateDbSync +{ + fn new( mode: PruningMode, ref_counting: bool, - db: &D, - ) -> Result, Error> { + db: D, + ) -> Result, Error> { trace!(target: "state-db", "StateDb settings: {:?}. Ref-counting: {}", mode, ref_counting); - let non_canonical: NonCanonicalOverlay = NonCanonicalOverlay::new(db)?; - let pruning: Option> = match mode { + let non_canonical: NonCanonicalOverlay = NonCanonicalOverlay::new(&db)?; + let pruning: Option> = match mode { PruningMode::Constrained(Constraints { max_mem: Some(_), .. }) => unimplemented!(), - PruningMode::Constrained(_) => Some(RefWindow::new(db, ref_counting)?), + PruningMode::Constrained(Constraints { max_blocks, .. }) => + Some(RefWindow::new(db, max_blocks.unwrap_or(0), ref_counting)?), PruningMode::ArchiveAll | PruningMode::ArchiveCanonical => None, }; Ok(StateDbSync { mode, non_canonical, pruning, pinned: Default::default() }) } - fn insert_block( + fn insert_block( &mut self, hash: &BlockHash, number: u64, parent_hash: &BlockHash, mut changeset: ChangeSet, - ) -> Result, Error> { + ) -> Result, Error> { match self.mode { PruningMode::ArchiveAll => { changeset.deleted.clear(); @@ -321,25 +334,23 @@ impl StateDbSync( - &mut self, - hash: &BlockHash, - ) -> Result, Error> { + fn canonicalize_block(&mut self, hash: &BlockHash) -> Result, Error> { + // NOTE: it is important that the change to `LAST_CANONICAL` (emit from + // `non_canonical.canonicalize`) and the insert of the new pruning journal (emit from + // `pruning.note_canonical`) are collected into the same `CommitSet` and are committed to + // the database atomically to keep their consistency when restarting the node let mut commit = CommitSet::default(); if self.mode == PruningMode::ArchiveAll { return Ok(commit) } - match self.non_canonical.canonicalize(hash, &mut commit) { - Ok(()) => - if self.mode == PruningMode::ArchiveCanonical { - commit.data.deleted.clear(); - }, - Err(e) => return Err(e.into()), - }; + let number = self.non_canonical.canonicalize(hash, &mut commit)?; + if self.mode == PruningMode::ArchiveCanonical { + commit.data.deleted.clear(); + } if let Some(ref mut pruning) = self.pruning { - pruning.note_canonical(hash, &mut commit); + pruning.note_canonical(hash, number, &mut commit)?; } - self.prune(&mut commit); + self.prune(&mut commit)?; Ok(commit) } @@ -347,22 +358,31 @@ impl StateDbSync bool { + fn is_pruned(&self, hash: &BlockHash, number: u64) -> IsPruned { match self.mode { - PruningMode::ArchiveAll => false, + PruningMode::ArchiveAll => IsPruned::NotPruned, PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => { if self.best_canonical().map(|c| number > c).unwrap_or(true) { - !self.non_canonical.have_block(hash) + if self.non_canonical.have_block(hash) { + IsPruned::NotPruned + } else { + IsPruned::Pruned + } } else { - self.pruning.as_ref().map_or(false, |pruning| { - number < pruning.pending() || !pruning.have_block(hash) - }) + match self.pruning.as_ref() { + None => IsPruned::NotPruned, + Some(pruning) => match pruning.have_block(hash, number) { + HaveBlock::NotHave => IsPruned::Pruned, + HaveBlock::Have => IsPruned::NotPruned, + HaveBlock::MayHave => IsPruned::MaybePruned, + }, + } } }, } } - fn prune(&mut self, commit: &mut CommitSet) { + fn prune(&mut self, commit: &mut CommitSet) -> Result<(), Error> { if let (&mut Some(ref mut pruning), &PruningMode::Constrained(ref constraints)) = (&mut self.pruning, &self.mode) { @@ -376,12 +396,23 @@ impl StateDbSync break, + res => + if res?.map_or(false, |h| pinned.contains_key(&h)) { + break + }, + } + match pruning.prune_one(commit) { + // this branch should not reach as previous `next_hash` don't return error + // keeping it for robustness + Err(Error::StateDb(StateDbError::BlockUnavailable)) => break, + res => res?, } - pruning.prune_one(commit); } } + Ok(()) } /// Revert all non-canonical blocks with the best block number. @@ -403,13 +434,22 @@ impl StateDbSync Result<(), PinError> { + fn pin(&mut self, hash: &BlockHash, number: u64, hint: F) -> Result<(), PinError> + where + F: Fn() -> bool, + { match self.mode { PruningMode::ArchiveAll => Ok(()), PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => { - if self.non_canonical.have_block(hash) || - self.pruning.as_ref().map_or(false, |pruning| pruning.have_block(hash)) - { + let have_block = self.non_canonical.have_block(hash) || + self.pruning.as_ref().map_or(false, |pruning| { + match pruning.have_block(hash, number) { + HaveBlock::NotHave => false, + HaveBlock::Have => true, + HaveBlock::MayHave => hint(), + } + }); + if have_block { let refs = self.pinned.entry(hash.clone()).or_default(); if *refs == 0 { trace!(target: "state-db-pin", "Pinned block: {:?}", hash); @@ -440,13 +480,13 @@ impl StateDbSync( + pub fn get( &self, key: &Q, - db: &D, - ) -> Result, Error> + db: &DB, + ) -> Result, Error> where - Q: AsRef, + Q: AsRef, Key: std::borrow::Borrow, Q: std::hash::Hash + Eq, { @@ -461,10 +501,11 @@ impl StateDbSync StateDbSync StateDbMemoryInfo { StateDbMemoryInfo { non_canonical: MemorySize::from_bytes(malloc_size(&self.non_canonical)), - pruning: self.pruning.as_ref().map(|p| MemorySize::from_bytes(malloc_size(p))), + pruning: self.pruning.as_ref().map(|p| MemorySize::from_bytes(malloc_size(&p))), pinned: MemorySize::from_bytes(malloc_size(&self.pinned)), } } @@ -490,22 +531,21 @@ impl StateDbSync { - db: RwLock>, +pub struct StateDb { + db: RwLock>, } -impl StateDb { +impl + StateDb +{ /// Create an instance of [`StateDb`]. - pub fn open( - db: &D, + pub fn open( + db: D, requested_mode: Option, ref_counting: bool, should_init: bool, - ) -> Result<(CommitSet, StateDb), Error> - where - D: MetaDb, - { - let stored_mode = fetch_stored_pruning_mode(db)?; + ) -> Result<(CommitSet, StateDb), Error> { + let stored_mode = fetch_stored_pruning_mode(&db)?; let selected_mode = match (should_init, stored_mode, requested_mode) { (true, stored_mode, requested_mode) => { @@ -548,27 +588,28 @@ impl StateDb( + pub fn insert_block( &self, hash: &BlockHash, number: u64, parent_hash: &BlockHash, changeset: ChangeSet, - ) -> Result, Error> { + ) -> Result, Error> { self.db.write().insert_block(hash, number, parent_hash, changeset) } /// Finalize a previously inserted block. - pub fn canonicalize_block( - &self, - hash: &BlockHash, - ) -> Result, Error> { + pub fn canonicalize_block(&self, hash: &BlockHash) -> Result, Error> { self.db.write().canonicalize_block(hash) } /// Prevents pruning of specified block and its descendants. - pub fn pin(&self, hash: &BlockHash) -> Result<(), PinError> { - self.db.write().pin(hash) + /// `hint` used for futher checking if the given block exists + pub fn pin(&self, hash: &BlockHash, number: u64, hint: F) -> Result<(), PinError> + where + F: Fn() -> bool, + { + self.db.write().pin(hash, number, hint) } /// Allows pruning of specified block. @@ -577,13 +618,13 @@ impl StateDb( + pub fn get( &self, key: &Q, - db: &D, - ) -> Result, Error> + db: &DB, + ) -> Result, Error> where - Q: AsRef, + Q: AsRef, Key: std::borrow::Borrow, Q: std::hash::Hash + Eq, { @@ -609,7 +650,7 @@ impl StateDb bool { + pub fn is_pruned(&self, hash: &BlockHash, number: u64) -> IsPruned { return self.db.read().is_pruned(hash, number) } @@ -629,6 +670,17 @@ impl StateDb(db: &D) -> Result, Error> { let meta_key_mode = to_meta_key(PRUNING_MODE, &()); if let Some(stored_mode) = db.get_meta(&meta_key_mode).map_err(Error::Db)? { @@ -664,20 +716,19 @@ fn choose_pruning_mode( mod tests { use crate::{ test::{make_changeset, make_db, TestDb}, - Constraints, Error, PruningMode, StateDb, StateDbError, + Constraints, Error, IsPruned, PruningMode, StateDb, StateDbError, }; use sp_core::H256; - use std::io; - fn make_test_db(settings: PruningMode) -> (TestDb, StateDb) { + fn make_test_db(settings: PruningMode) -> (TestDb, StateDb) { let mut db = make_db(&[91, 921, 922, 93, 94]); let (state_db_init, state_db) = - StateDb::open(&mut db, Some(settings), false, true).unwrap(); + StateDb::open(db.clone(), Some(settings), false, true).unwrap(); db.commit(&state_db_init); db.commit( &state_db - .insert_block::( + .insert_block( &H256::from_low_u64_be(1), 1, &H256::from_low_u64_be(0), @@ -687,7 +738,7 @@ mod tests { ); db.commit( &state_db - .insert_block::( + .insert_block( &H256::from_low_u64_be(21), 2, &H256::from_low_u64_be(1), @@ -697,7 +748,7 @@ mod tests { ); db.commit( &state_db - .insert_block::( + .insert_block( &H256::from_low_u64_be(22), 2, &H256::from_low_u64_be(1), @@ -707,7 +758,7 @@ mod tests { ); db.commit( &state_db - .insert_block::( + .insert_block( &H256::from_low_u64_be(3), 3, &H256::from_low_u64_be(21), @@ -716,11 +767,11 @@ mod tests { .unwrap(), ); state_db.apply_pending(); - db.commit(&state_db.canonicalize_block::(&H256::from_low_u64_be(1)).unwrap()); + db.commit(&state_db.canonicalize_block(&H256::from_low_u64_be(1)).unwrap()); state_db.apply_pending(); db.commit( &state_db - .insert_block::( + .insert_block( &H256::from_low_u64_be(4), 4, &H256::from_low_u64_be(3), @@ -729,9 +780,9 @@ mod tests { .unwrap(), ); state_db.apply_pending(); - db.commit(&state_db.canonicalize_block::(&H256::from_low_u64_be(21)).unwrap()); + db.commit(&state_db.canonicalize_block(&H256::from_low_u64_be(21)).unwrap()); state_db.apply_pending(); - db.commit(&state_db.canonicalize_block::(&H256::from_low_u64_be(3)).unwrap()); + db.commit(&state_db.canonicalize_block(&H256::from_low_u64_be(3)).unwrap()); state_db.apply_pending(); (db, state_db) @@ -741,7 +792,7 @@ mod tests { fn full_archive_keeps_everything() { let (db, sdb) = make_test_db(PruningMode::ArchiveAll); assert!(db.data_eq(&make_db(&[1, 21, 22, 3, 4, 91, 921, 922, 93, 94]))); - assert!(!sdb.is_pruned(&H256::from_low_u64_be(0), 0)); + assert_eq!(sdb.is_pruned(&H256::from_low_u64_be(0), 0), IsPruned::NotPruned); } #[test] @@ -750,6 +801,43 @@ mod tests { assert!(db.data_eq(&make_db(&[1, 21, 3, 91, 921, 922, 93, 94]))); } + #[test] + fn block_record_unavailable() { + let (mut db, state_db) = make_test_db(PruningMode::Constrained(Constraints { + max_blocks: Some(1), + max_mem: None, + })); + // import 2 blocks + for i in &[5, 6] { + db.commit( + &state_db + .insert_block( + &H256::from_low_u64_be(*i), + *i, + &H256::from_low_u64_be(*i - 1), + make_changeset(&[], &[]), + ) + .unwrap(), + ); + } + // canonicalize block 4 but not commit it to db + let c1 = state_db.canonicalize_block(&H256::from_low_u64_be(4)).unwrap(); + assert_eq!(state_db.is_pruned(&H256::from_low_u64_be(3), 3), IsPruned::Pruned); + + // canonicalize block 5 but not commit it to db, block 4 is not pruned due to it is not + // commit to db yet (unavailable), return `MaybePruned` here because `apply_pending` is not + // called and block 3 is still in cache + let c2 = state_db.canonicalize_block(&H256::from_low_u64_be(5)).unwrap(); + assert_eq!(state_db.is_pruned(&H256::from_low_u64_be(4), 4), IsPruned::MaybePruned); + + // commit block 4 and 5 to db, and import a new block will prune both block 4 and 5 + db.commit(&c1); + db.commit(&c2); + db.commit(&state_db.canonicalize_block(&H256::from_low_u64_be(6)).unwrap()); + assert_eq!(state_db.is_pruned(&H256::from_low_u64_be(4), 4), IsPruned::Pruned); + assert_eq!(state_db.is_pruned(&H256::from_low_u64_be(5), 5), IsPruned::Pruned); + } + #[test] fn prune_window_0() { let (db, _) = make_test_db(PruningMode::Constrained(Constraints { @@ -765,10 +853,10 @@ mod tests { max_blocks: Some(1), max_mem: None, })); - assert!(sdb.is_pruned(&H256::from_low_u64_be(0), 0)); - assert!(sdb.is_pruned(&H256::from_low_u64_be(1), 1)); - assert!(sdb.is_pruned(&H256::from_low_u64_be(21), 2)); - assert!(sdb.is_pruned(&H256::from_low_u64_be(22), 2)); + assert_eq!(sdb.is_pruned(&H256::from_low_u64_be(0), 0), IsPruned::Pruned); + assert_eq!(sdb.is_pruned(&H256::from_low_u64_be(1), 1), IsPruned::Pruned); + assert_eq!(sdb.is_pruned(&H256::from_low_u64_be(21), 2), IsPruned::Pruned); + assert_eq!(sdb.is_pruned(&H256::from_low_u64_be(22), 2), IsPruned::Pruned); assert!(db.data_eq(&make_db(&[21, 3, 922, 93, 94]))); } @@ -778,10 +866,10 @@ mod tests { max_blocks: Some(2), max_mem: None, })); - assert!(sdb.is_pruned(&H256::from_low_u64_be(0), 0)); - assert!(sdb.is_pruned(&H256::from_low_u64_be(1), 1)); - assert!(!sdb.is_pruned(&H256::from_low_u64_be(21), 2)); - assert!(sdb.is_pruned(&H256::from_low_u64_be(22), 2)); + assert_eq!(sdb.is_pruned(&H256::from_low_u64_be(0), 0), IsPruned::Pruned); + assert_eq!(sdb.is_pruned(&H256::from_low_u64_be(1), 1), IsPruned::Pruned); + assert_eq!(sdb.is_pruned(&H256::from_low_u64_be(21), 2), IsPruned::NotPruned); + assert_eq!(sdb.is_pruned(&H256::from_low_u64_be(22), 2), IsPruned::Pruned); assert!(db.data_eq(&make_db(&[1, 21, 3, 921, 922, 93, 94]))); } @@ -789,11 +877,11 @@ mod tests { fn detects_incompatible_mode() { let mut db = make_db(&[]); let (state_db_init, state_db) = - StateDb::open(&mut db, Some(PruningMode::ArchiveAll), false, true).unwrap(); + StateDb::open(db.clone(), Some(PruningMode::ArchiveAll), false, true).unwrap(); db.commit(&state_db_init); db.commit( &state_db - .insert_block::( + .insert_block( &H256::from_low_u64_be(0), 0, &H256::from_low_u64_be(0), @@ -802,8 +890,8 @@ mod tests { .unwrap(), ); let new_mode = PruningMode::Constrained(Constraints { max_blocks: Some(2), max_mem: None }); - let state_db_open_result: Result<(_, StateDb), _> = - StateDb::open(&mut db, Some(new_mode), false, false); + let state_db_open_result: Result<(_, StateDb), _> = + StateDb::open(db.clone(), Some(new_mode), false, false); assert!(state_db_open_result.is_err()); } @@ -814,12 +902,13 @@ mod tests { ) { let mut db = make_db(&[]); let (state_db_init, state_db) = - StateDb::::open(&mut db, mode_when_created, false, true).unwrap(); + StateDb::::open(db.clone(), mode_when_created, false, true) + .unwrap(); db.commit(&state_db_init); std::mem::drop(state_db); let state_db_reopen_result = - StateDb::::open(&mut db, mode_when_reopened, false, false); + StateDb::::open(db.clone(), mode_when_reopened, false, false); if let Ok(expected_mode) = expected_effective_mode_when_reopenned { let (state_db_init, state_db_reopened) = state_db_reopen_result.unwrap(); db.commit(&state_db_init); diff --git a/client/state-db/src/noncanonical.rs b/client/state-db/src/noncanonical.rs index 13cf5825b1b24..559fc7ca023fe 100644 --- a/client/state-db/src/noncanonical.rs +++ b/client/state-db/src/noncanonical.rs @@ -28,7 +28,7 @@ use log::trace; use std::collections::{hash_map::Entry, HashMap, VecDeque}; const NON_CANONICAL_JOURNAL: &[u8] = b"noncanonical_journal"; -const LAST_CANONICAL: &[u8] = b"last_canonical"; +pub(crate) const LAST_CANONICAL: &[u8] = b"last_canonical"; const MAX_BLOCKS_PER_LEVEL: u64 = 32; /// See module documentation. @@ -376,12 +376,13 @@ impl NonCanonicalOverlay { } /// Select a top-level root and canonicalized it. Discards all sibling subtrees and the root. - /// Returns a set of changes that need to be added to the DB. + /// Add a set of changes of the canonicalized block to `CommitSet` + /// Return the block number of the canonicalized block pub fn canonicalize( &mut self, hash: &BlockHash, commit: &mut CommitSet, - ) -> Result<(), StateDbError> { + ) -> Result { trace!(target: "state-db", "Canonicalizing {:?}", hash); let level = self .levels @@ -431,7 +432,7 @@ impl NonCanonicalOverlay { .push((to_meta_key(LAST_CANONICAL, &()), canonicalized.encode())); trace!(target: "state-db", "Discarding {} records", commit.meta.deleted.len()); self.pending_canonicalizations.push(hash.clone()); - Ok(()) + Ok(canonicalized.1) } fn apply_canonicalizations(&mut self) { @@ -755,7 +756,7 @@ mod tests { .unwrap(), ); db.commit(&overlay.insert(&h2, 11, &h1, make_changeset(&[5], &[3])).unwrap()); - assert_eq!(db.meta.len(), 3); + assert_eq!(db.meta_len(), 3); let overlay2 = NonCanonicalOverlay::::new(&db).unwrap(); assert_eq!(overlay.levels, overlay2.levels); diff --git a/client/state-db/src/pruning.rs b/client/state-db/src/pruning.rs index 0fdcb8e822b6f..2c23110910495 100644 --- a/client/state-db/src/pruning.rs +++ b/client/state-db/src/pruning.rs @@ -24,74 +24,74 @@ //! the death list. //! The changes are journaled in the DB. -use crate::{to_meta_key, CommitSet, Error, Hash, MetaDb}; +use crate::{ + noncanonical::LAST_CANONICAL, to_meta_key, CommitSet, Error, Hash, MetaDb, StateDbError, + DEFAULT_MAX_BLOCK_CONSTRAINT, +}; use codec::{Decode, Encode}; -use log::{trace, warn}; -use std::collections::{HashMap, HashSet, VecDeque}; +use log::{error, trace, warn}; +use std::{ + cmp, + collections::{HashMap, HashSet, VecDeque}, +}; -const LAST_PRUNED: &[u8] = b"last_pruned"; +pub(crate) const LAST_PRUNED: &[u8] = b"last_pruned"; const PRUNING_JOURNAL: &[u8] = b"pruning_journal"; /// See module documentation. #[derive(parity_util_mem_derive::MallocSizeOf)] -pub struct RefWindow { - /// A queue of keys that should be deleted for each block in the pruning window. - death_rows: VecDeque>, - /// An index that maps each key from `death_rows` to block number. - death_index: HashMap, +pub struct RefWindow { + /// A queue of blocks keep tracking keys that should be deleted for each block in the + /// pruning window. + queue: DeathRowQueue, /// Block number that corresponds to the front of `death_rows`. - pending_number: u64, + base: u64, /// Number of call of `note_canonical` after /// last call `apply_pending` or `revert_pending` pending_canonicalizations: usize, /// Number of calls of `prune_one` after /// last call `apply_pending` or `revert_pending` pending_prunings: usize, - /// Keep track of re-inserted keys and do not delete them when pruning. - /// Setting this to false requires backend that supports reference - /// counting. - count_insertions: bool, } -#[derive(Debug, PartialEq, Eq, parity_util_mem_derive::MallocSizeOf)] -struct DeathRow { - hash: BlockHash, - journal_key: Vec, - deleted: HashSet, -} - -#[derive(Encode, Decode)] -struct JournalRecord { - hash: BlockHash, - inserted: Vec, - deleted: Vec, -} - -fn to_journal_key(block: u64) -> Vec { - to_meta_key(PRUNING_JOURNAL, &block) +/// `DeathRowQueue` used to keep track of blocks in the pruning window, there are two flavors: +/// - `Mem`, used when the backend database do not supports reference counting, keep all +/// blocks in memory, and keep track of re-inserted keys to not delete them when pruning +/// - `DbBacked`, used when the backend database supports reference counting, only keep +/// a few number of blocks in memory and load more blocks on demand +#[derive(parity_util_mem_derive::MallocSizeOf)] +enum DeathRowQueue { + Mem { + /// A queue of keys that should be deleted for each block in the pruning window. + death_rows: VecDeque>, + /// An index that maps each key from `death_rows` to block number. + death_index: HashMap, + }, + DbBacked { + // The backend database + #[ignore_malloc_size_of = "Shared data"] + db: D, + /// A queue of keys that should be deleted for each block in the pruning window. + /// Only caching the first fews blocks of the pruning window, blocks inside are + /// successive and ordered by block number + cache: VecDeque>, + /// A soft limit of the cache's size + cache_capacity: usize, + /// The number of blocks in queue that are not loaded into `cache`. + uncached_blocks: usize, + }, } -impl RefWindow { - pub fn new( - db: &D, - count_insertions: bool, - ) -> Result, Error> { - let last_pruned = db.get_meta(&to_meta_key(LAST_PRUNED, &())).map_err(Error::Db)?; - let pending_number: u64 = match last_pruned { - Some(buffer) => u64::decode(&mut buffer.as_slice())? + 1, - None => 0, - }; - let mut block = pending_number; - let mut pruning = RefWindow { - death_rows: Default::default(), - death_index: Default::default(), - pending_number, - pending_canonicalizations: 0, - pending_prunings: 0, - count_insertions, +impl DeathRowQueue { + /// Return a `DeathRowQueue` that all blocks are keep in memory + fn new_mem(db: &D, base: u64) -> Result, Error> { + let mut block = base; + let mut queue = DeathRowQueue::::Mem { + death_rows: VecDeque::new(), + death_index: HashMap::new(), }; // read the journal - trace!(target: "state-db", "Reading pruning journal. Pending #{}", pending_number); + trace!(target: "state-db", "Reading pruning journal for the memory queue. Pending #{}", base); loop { let journal_key = to_journal_key(block); match db.get_meta(&journal_key).map_err(Error::Db)? { @@ -99,102 +99,444 @@ impl RefWindow { let record: JournalRecord = Decode::decode(&mut record.as_slice())?; trace!(target: "state-db", "Pruning journal entry {} ({} inserted, {} deleted)", block, record.inserted.len(), record.deleted.len()); - pruning.import( - &record.hash, - journal_key, - record.inserted.into_iter(), - record.deleted, - ); + queue.import(base, record); }, None => break, } block += 1; } - Ok(pruning) + Ok(queue) + } + + /// Return a `DeathRowQueue` that backed by an database, and only keep a few number + /// of blocks in memory + fn new_db_backed( + db: D, + base: u64, + mut uncached_blocks: usize, + window_size: u32, + ) -> Result, Error> { + // limit the cache capacity from 1 to `DEFAULT_MAX_BLOCK_CONSTRAINT` + let cache_capacity = window_size.max(1).min(DEFAULT_MAX_BLOCK_CONSTRAINT) as usize; + let mut cache = VecDeque::with_capacity(cache_capacity); + trace!(target: "state-db", "Reading pruning journal for the database-backed queue. Pending #{}", base); + // Load block from db + DeathRowQueue::load_batch_from_db( + &db, + &mut uncached_blocks, + &mut cache, + base, + cache_capacity, + )?; + Ok(DeathRowQueue::DbBacked { db, cache, cache_capacity, uncached_blocks }) + } + + /// import a new block to the back of the queue + fn import(&mut self, base: u64, journal_record: JournalRecord) { + let JournalRecord { hash, inserted, deleted } = journal_record; + match self { + DeathRowQueue::DbBacked { uncached_blocks, cache, cache_capacity, .. } => { + // `uncached_blocks` is zero means currently all block are loaded into `cache` + // thus if `cache` is not full, load the next block into `cache` too + if *uncached_blocks == 0 && cache.len() < *cache_capacity { + cache.push_back(DeathRow { hash, deleted: deleted.into_iter().collect() }); + } else { + *uncached_blocks += 1; + } + }, + DeathRowQueue::Mem { death_rows, death_index } => { + // remove all re-inserted keys from death rows + for k in inserted { + if let Some(block) = death_index.remove(&k) { + death_rows[(block - base) as usize].deleted.remove(&k); + } + } + // add new keys + let imported_block = base + death_rows.len() as u64; + for k in deleted.iter() { + death_index.insert(k.clone(), imported_block); + } + death_rows.push_back(DeathRow { hash, deleted: deleted.into_iter().collect() }); + }, + } } - fn import>( + /// Pop out one block from the front of the queue, `base` is the block number + /// of the first block of the queue + fn pop_front( &mut self, - hash: &BlockHash, - journal_key: Vec, - inserted: I, - deleted: Vec, - ) { - if self.count_insertions { - // remove all re-inserted keys from death rows - for k in inserted { - if let Some(block) = self.death_index.remove(&k) { - self.death_rows[(block - self.pending_number) as usize].deleted.remove(&k); + base: u64, + ) -> Result>, Error> { + match self { + DeathRowQueue::DbBacked { db, uncached_blocks, cache, cache_capacity } => { + if cache.is_empty() && *uncached_blocks != 0 { + // load more blocks from db since there are still blocks in it + DeathRowQueue::load_batch_from_db( + db, + uncached_blocks, + cache, + base, + *cache_capacity, + )?; } - } + Ok(cache.pop_front()) + }, + DeathRowQueue::Mem { death_rows, death_index } => match death_rows.pop_front() { + Some(row) => { + for k in row.deleted.iter() { + death_index.remove(k); + } + Ok(Some(row)) + }, + None => Ok(None), + }, + } + } - // add new keys - let imported_block = self.pending_number + self.death_rows.len() as u64; - for k in deleted.iter() { - self.death_index.insert(k.clone(), imported_block); + /// Revert recent additions to the queue, namely remove `amount` number of blocks from the back + /// of the queue, `base` is the block number of the first block of the queue + fn revert_recent_add(&mut self, base: u64, amout: usize) { + debug_assert!(amout <= self.len()); + match self { + DeathRowQueue::DbBacked { uncached_blocks, cache, .. } => { + // remove from `uncached_blocks` if it can cover + if *uncached_blocks >= amout { + *uncached_blocks -= amout; + return + } + // reset `uncached_blocks` and remove remain blocks from `cache` + let remain = amout - *uncached_blocks; + *uncached_blocks = 0; + cache.truncate(cache.len() - remain); + }, + DeathRowQueue::Mem { death_rows, death_index } => { + // Revert recent addition to the queue + // Note that pending insertions might cause some existing deletions to be removed + // from `death_index` We don't bother to track and revert that for now. This means + // that a few nodes might end up no being deleted in case transaction fails and + // `revert_pending` is called. + death_rows.truncate(death_rows.len() - amout); + let new_max_block = death_rows.len() as u64 + base; + death_index.retain(|_, block| *block < new_max_block); + }, + } + } + + /// Load a batch of blocks from the backend database into `cache`, start from (and include) the + /// next block followe the last block of `cache`, `base` is the block number of the first block + /// of the queue + fn load_batch_from_db( + db: &D, + uncached_blocks: &mut usize, + cache: &mut VecDeque>, + base: u64, + cache_capacity: usize, + ) -> Result<(), Error> { + // return if all blocks already loaded into `cache` and there are no other + // blocks in the backend database + if *uncached_blocks == 0 { + return Ok(()) + } + let start = base + cache.len() as u64; + let batch_size = cmp::min(*uncached_blocks, cache_capacity); + let mut loaded = 0; + for i in 0..batch_size as u64 { + match load_death_row_from_db::(db, start + i)? { + Some(row) => { + cache.push_back(row); + loaded += 1; + }, + // block may added to the queue but not commit into the db yet, if there are + // data missing in the db `load_death_row_from_db` should return a db error + None => break, } } - self.death_rows.push_back(DeathRow { - hash: hash.clone(), - deleted: deleted.into_iter().collect(), - journal_key, - }); + *uncached_blocks -= loaded; + Ok(()) + } + + /// Get the block in the given index of the queue, `base` is the block number of the + /// first block of the queue + fn get( + &mut self, + base: u64, + index: usize, + ) -> Result>, Error> { + match self { + DeathRowQueue::DbBacked { db, uncached_blocks, cache, cache_capacity } => { + // check if `index` target a block reside on disk + if index >= cache.len() && index < cache.len() + *uncached_blocks { + // if `index` target the next batch of `DeathRow`, load a batch from db + if index - cache.len() < cmp::min(*uncached_blocks, *cache_capacity) { + DeathRowQueue::load_batch_from_db( + db, + uncached_blocks, + cache, + base, + *cache_capacity, + )?; + } else { + // load a single `DeathRow` from db, but do not insert it to `cache` + // because `cache` is a queue of successive `DeathRow` + // NOTE: this branch should not be entered because blocks are visited + // in successive increasing order, just keeping it for robustness + return load_death_row_from_db(db, base + index as u64) + } + } + Ok(cache.get(index).cloned()) + }, + DeathRowQueue::Mem { death_rows, .. } => Ok(death_rows.get(index).cloned()), + } + } + + /// Check if the block at the given `index` of the queue exist + /// it is the caller's responsibility to ensure `index` won't be out of bound + fn have_block(&self, hash: &BlockHash, index: usize) -> HaveBlock { + match self { + DeathRowQueue::DbBacked { cache, .. } => { + if cache.len() > index { + (cache[index].hash == *hash).into() + } else { + // the block not exist in `cache`, but it may exist in the unload + // blocks + HaveBlock::MayHave + } + }, + DeathRowQueue::Mem { death_rows, .. } => (death_rows[index].hash == *hash).into(), + } + } + + /// Return the number of block in the pruning window + fn len(&self) -> usize { + match self { + DeathRowQueue::DbBacked { uncached_blocks, cache, .. } => + cache.len() + *uncached_blocks, + DeathRowQueue::Mem { death_rows, .. } => death_rows.len(), + } + } + + #[cfg(test)] + fn get_mem_queue_state( + &self, + ) -> Option<(&VecDeque>, &HashMap)> { + match self { + DeathRowQueue::DbBacked { .. } => None, + DeathRowQueue::Mem { death_rows, death_index } => Some((death_rows, death_index)), + } + } + + #[cfg(test)] + fn get_db_backed_queue_state(&self) -> Option<(&VecDeque>, usize)> { + match self { + DeathRowQueue::DbBacked { cache, uncached_blocks, .. } => + Some((cache, *uncached_blocks)), + DeathRowQueue::Mem { .. } => None, + } + } +} + +fn load_death_row_from_db( + db: &D, + block: u64, +) -> Result>, Error> { + let journal_key = to_journal_key(block); + match db.get_meta(&journal_key).map_err(Error::Db)? { + Some(record) => { + let JournalRecord { hash, deleted, .. } = Decode::decode(&mut record.as_slice())?; + Ok(Some(DeathRow { hash, deleted: deleted.into_iter().collect() })) + }, + None => Ok(None), + } +} + +#[derive(Clone, Debug, PartialEq, Eq, parity_util_mem_derive::MallocSizeOf)] +struct DeathRow { + hash: BlockHash, + deleted: HashSet, +} + +#[derive(Encode, Decode, Default)] +struct JournalRecord { + hash: BlockHash, + inserted: Vec, + deleted: Vec, +} + +fn to_journal_key(block: u64) -> Vec { + to_meta_key(PRUNING_JOURNAL, &block) +} + +/// The result return by `RefWindow::have_block` +#[derive(Debug, PartialEq, Eq)] +pub enum HaveBlock { + /// Definitely not having this block + NotHave, + /// May or may not have this block, need futher checking + MayHave, + /// Definitely having this block + Have, +} + +impl From for HaveBlock { + fn from(have: bool) -> Self { + if have { + HaveBlock::Have + } else { + HaveBlock::NotHave + } + } +} + +impl RefWindow { + pub fn new( + db: D, + window_size: u32, + count_insertions: bool, + ) -> Result, Error> { + // the block number of the first block in the queue or the next block number if the queue is + // empty + let base = match db.get_meta(&to_meta_key(LAST_PRUNED, &())).map_err(Error::Db)? { + Some(buffer) => u64::decode(&mut buffer.as_slice())? + 1, + None => 0, + }; + // the block number of the last block in the queue + let last_canonicalized_number = + match db.get_meta(&to_meta_key(LAST_CANONICAL, &())).map_err(Error::Db)? { + Some(buffer) => Some(<(BlockHash, u64)>::decode(&mut buffer.as_slice())?.1), + None => None, + }; + + let queue = if count_insertions { + DeathRowQueue::new_mem(&db, base)? + } else { + let unload = match last_canonicalized_number { + Some(last_canonicalized_number) => { + debug_assert!(last_canonicalized_number + 1 >= base); + last_canonicalized_number + 1 - base + }, + // None means `LAST_CANONICAL` is never been wrote, since the pruning journals are + // in the same `CommitSet` as `LAST_CANONICAL`, it means no pruning journal have + // ever been committed to the db, thus set `unload` to zero + None => 0, + }; + DeathRowQueue::new_db_backed(db, base, unload as usize, window_size)? + }; + + Ok(RefWindow { queue, base, pending_canonicalizations: 0, pending_prunings: 0 }) } pub fn window_size(&self) -> u64 { - (self.death_rows.len() - self.pending_prunings) as u64 + (self.queue.len() - self.pending_prunings) as u64 } - pub fn next_hash(&self) -> Option { - self.death_rows.get(self.pending_prunings).map(|r| r.hash.clone()) + /// Get the hash of the next pruning block + pub fn next_hash(&mut self) -> Result, Error> { + let res = match &self.queue { + DeathRowQueue::DbBacked { cache, .. } => + if self.pending_prunings < cache.len() { + cache.get(self.pending_prunings).map(|r| r.hash.clone()) + } else { + self.get(self.pending_prunings)?.map(|r| r.hash) + }, + DeathRowQueue::Mem { death_rows, .. } => + death_rows.get(self.pending_prunings).map(|r| r.hash.clone()), + }; + Ok(res) } pub fn mem_used(&self) -> usize { 0 } + // Return the block number of the first block that not been pending pruned pub fn pending(&self) -> u64 { - self.pending_number + self.pending_prunings as u64 + self.base + self.pending_prunings as u64 } - pub fn have_block(&self, hash: &BlockHash) -> bool { - self.death_rows.iter().skip(self.pending_prunings).any(|r| r.hash == *hash) + fn is_empty(&self) -> bool { + self.queue.len() <= self.pending_prunings + } + + // Check if a block is in the pruning window and not be pruned yet + pub fn have_block(&self, hash: &BlockHash, number: u64) -> HaveBlock { + // if the queue is empty or the block number exceed the pruning window, we definitely + // do not have this block + if self.is_empty() || + number < self.pending() || + number >= self.base + self.queue.len() as u64 + { + return HaveBlock::NotHave + } + self.queue.have_block(hash, (number - self.base) as usize) + } + + fn get(&mut self, index: usize) -> Result>, Error> { + if index >= self.queue.len() { + return Ok(None) + } + match self.queue.get(self.base, index)? { + None => { + if matches!(self.queue, DeathRowQueue::DbBacked { .. }) && + // whether trying to get a pending canonicalize block which may not commit to the db yet + index >= self.queue.len() - self.pending_canonicalizations + { + trace!(target: "state-db", "Trying to get a pending canonicalize block that not commit to the db yet"); + Err(Error::StateDb(StateDbError::BlockUnavailable)) + } else { + // A block of the queue is missing, this may happen if `CommitSet` are commit to + // db concurrently and calling `apply_pending/revert_pending` out of order, this + // should not happen under current implementation but keeping it as a defensive + error!(target: "state-db", "Block record is missing from the pruning window, block number {}", self.base + index as u64); + Err(Error::StateDb(StateDbError::BlockMissing)) + } + }, + s => Ok(s), + } } /// Prune next block. Expects at least one block in the window. Adds changes to `commit`. - pub fn prune_one(&mut self, commit: &mut CommitSet) { - if let Some(pruned) = self.death_rows.get(self.pending_prunings) { + pub fn prune_one(&mut self, commit: &mut CommitSet) -> Result<(), Error> { + if let Some(pruned) = self.get(self.pending_prunings)? { trace!(target: "state-db", "Pruning {:?} ({} deleted)", pruned.hash, pruned.deleted.len()); - let index = self.pending_number + self.pending_prunings as u64; - commit.data.deleted.extend(pruned.deleted.iter().cloned()); + let index = self.base + self.pending_prunings as u64; + commit.data.deleted.extend(pruned.deleted.into_iter()); commit.meta.inserted.push((to_meta_key(LAST_PRUNED, &()), index.encode())); - commit.meta.deleted.push(pruned.journal_key.clone()); + commit + .meta + .deleted + .push(to_journal_key(self.base + self.pending_prunings as u64)); self.pending_prunings += 1; } else { warn!(target: "state-db", "Trying to prune when there's nothing to prune"); } + Ok(()) } /// Add a change set to the window. Creates a journal record and pushes it to `commit` - pub fn note_canonical(&mut self, hash: &BlockHash, commit: &mut CommitSet) { + pub fn note_canonical( + &mut self, + hash: &BlockHash, + number: u64, + commit: &mut CommitSet, + ) -> Result<(), Error> { + if self.base == 0 && self.queue.len() == 0 && number > 0 { + // assume that parent was canonicalized + self.base = number; + } else if (self.base + self.queue.len() as u64) != number { + return Err(Error::StateDb(StateDbError::InvalidBlockNumber)) + } trace!(target: "state-db", "Adding to pruning window: {:?} ({} inserted, {} deleted)", hash, commit.data.inserted.len(), commit.data.deleted.len()); - let inserted = if self.count_insertions { + let inserted = if matches!(self.queue, DeathRowQueue::Mem { .. }) { commit.data.inserted.iter().map(|(k, _)| k.clone()).collect() } else { Default::default() }; let deleted = ::std::mem::take(&mut commit.data.deleted); let journal_record = JournalRecord { hash: hash.clone(), inserted, deleted }; - let block = self.pending_number + self.death_rows.len() as u64; - let journal_key = to_journal_key(block); - commit.meta.inserted.push((journal_key.clone(), journal_record.encode())); - self.import( - &journal_record.hash, - journal_key, - journal_record.inserted.into_iter(), - journal_record.deleted, - ); + commit.meta.inserted.push((to_journal_key(number), journal_record.encode())); + self.queue.import(self.base, journal_record); self.pending_canonicalizations += 1; + Ok(()) } /// Apply all pending changes @@ -202,32 +544,22 @@ impl RefWindow { self.pending_canonicalizations = 0; for _ in 0..self.pending_prunings { let pruned = self - .death_rows - .pop_front() - .expect("pending_prunings is always < death_rows.len()"); + .queue + .pop_front(self.base) + // NOTE: `pop_front` should not return `MetaDb::Error` because blocks are visited + // by `RefWindow::prune_one` first then `RefWindow::apply_pending` and + // `DeathRowQueue::get` should load the blocks into cache already + .expect("block must loaded in cache thus no MetaDb::Error") + .expect("pending_prunings is always < queue.len()"); trace!(target: "state-db", "Applying pruning {:?} ({} deleted)", pruned.hash, pruned.deleted.len()); - if self.count_insertions { - for k in pruned.deleted.iter() { - self.death_index.remove(k); - } - } - self.pending_number += 1; + self.base += 1; } self.pending_prunings = 0; } /// Revert all pending changes pub fn revert_pending(&mut self) { - // Revert pending deletions. - // Note that pending insertions might cause some existing deletions to be removed from - // `death_index` We don't bother to track and revert that for now. This means that a few - // nodes might end up no being deleted in case transaction fails and `revert_pending` is - // called. - self.death_rows.truncate(self.death_rows.len() - self.pending_canonicalizations); - if self.count_insertions { - let new_max_block = self.death_rows.len() as u64 + self.pending_number; - self.death_index.retain(|_, block| *block < new_max_block); - } + self.queue.revert_recent_add(self.base, self.pending_canonicalizations); self.pending_canonicalizations = 0; self.pending_prunings = 0; } @@ -235,38 +567,45 @@ impl RefWindow { #[cfg(test)] mod tests { - use super::RefWindow; + use super::{to_journal_key, DeathRowQueue, HaveBlock, JournalRecord, RefWindow, LAST_PRUNED}; use crate::{ + noncanonical::LAST_CANONICAL, test::{make_commit, make_db, TestDb}, - CommitSet, + to_meta_key, CommitSet, Error, Hash, StateDbError, DEFAULT_MAX_BLOCK_CONSTRAINT, }; + use codec::Encode; use sp_core::H256; - fn check_journal(pruning: &RefWindow, db: &TestDb) { - let restored: RefWindow = RefWindow::new(db, pruning.count_insertions).unwrap(); - assert_eq!(pruning.pending_number, restored.pending_number); - assert_eq!(pruning.death_rows, restored.death_rows); - assert_eq!(pruning.death_index, restored.death_index); + fn check_journal(pruning: &RefWindow, db: &TestDb) { + let count_insertions = matches!(pruning.queue, DeathRowQueue::Mem { .. }); + let restored: RefWindow = + RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, count_insertions).unwrap(); + assert_eq!(pruning.base, restored.base); + assert_eq!(pruning.queue.get_mem_queue_state(), restored.queue.get_mem_queue_state()); } #[test] fn created_from_empty_db() { let db = make_db(&[]); - let pruning: RefWindow = RefWindow::new(&db, true).unwrap(); - assert_eq!(pruning.pending_number, 0); - assert!(pruning.death_rows.is_empty()); - assert!(pruning.death_index.is_empty()); + let pruning: RefWindow = + RefWindow::new(db, DEFAULT_MAX_BLOCK_CONSTRAINT, true).unwrap(); + assert_eq!(pruning.base, 0); + let (death_rows, death_index) = pruning.queue.get_mem_queue_state().unwrap(); + assert!(death_rows.is_empty()); + assert!(death_index.is_empty()); } #[test] fn prune_empty() { let db = make_db(&[]); - let mut pruning: RefWindow = RefWindow::new(&db, true).unwrap(); + let mut pruning: RefWindow = + RefWindow::new(db, DEFAULT_MAX_BLOCK_CONSTRAINT, true).unwrap(); let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit); - assert_eq!(pruning.pending_number, 0); - assert!(pruning.death_rows.is_empty()); - assert!(pruning.death_index.is_empty()); + pruning.prune_one(&mut commit).unwrap(); + assert_eq!(pruning.base, 0); + let (death_rows, death_index) = pruning.queue.get_mem_queue_state().unwrap(); + assert!(death_rows.is_empty()); + assert!(death_index.is_empty()); assert!(pruning.pending_prunings == 0); assert!(pruning.pending_canonicalizations == 0); } @@ -274,41 +613,45 @@ mod tests { #[test] fn prune_one() { let mut db = make_db(&[1, 2, 3]); - let mut pruning: RefWindow = RefWindow::new(&db, true).unwrap(); + let mut pruning: RefWindow = + RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, true).unwrap(); let mut commit = make_commit(&[4, 5], &[1, 3]); - let h = H256::random(); - pruning.note_canonical(&h, &mut commit); + let hash = H256::random(); + pruning.note_canonical(&hash, 0, &mut commit).unwrap(); db.commit(&commit); - assert!(pruning.have_block(&h)); + assert_eq!(pruning.have_block(&hash, 0), HaveBlock::Have); pruning.apply_pending(); - assert!(pruning.have_block(&h)); + assert_eq!(pruning.have_block(&hash, 0), HaveBlock::Have); assert!(commit.data.deleted.is_empty()); - assert_eq!(pruning.death_rows.len(), 1); - assert_eq!(pruning.death_index.len(), 2); + let (death_rows, death_index) = pruning.queue.get_mem_queue_state().unwrap(); + assert_eq!(death_rows.len(), 1); + assert_eq!(death_index.len(), 2); assert!(db.data_eq(&make_db(&[1, 2, 3, 4, 5]))); check_journal(&pruning, &db); let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit); - assert!(!pruning.have_block(&h)); + pruning.prune_one(&mut commit).unwrap(); + assert_eq!(pruning.have_block(&hash, 0), HaveBlock::NotHave); db.commit(&commit); pruning.apply_pending(); - assert!(!pruning.have_block(&h)); + assert_eq!(pruning.have_block(&hash, 0), HaveBlock::NotHave); assert!(db.data_eq(&make_db(&[2, 4, 5]))); - assert!(pruning.death_rows.is_empty()); - assert!(pruning.death_index.is_empty()); - assert_eq!(pruning.pending_number, 1); + let (death_rows, death_index) = pruning.queue.get_mem_queue_state().unwrap(); + assert!(death_rows.is_empty()); + assert!(death_index.is_empty()); + assert_eq!(pruning.base, 1); } #[test] fn prune_two() { let mut db = make_db(&[1, 2, 3]); - let mut pruning: RefWindow = RefWindow::new(&db, true).unwrap(); + let mut pruning: RefWindow = + RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, true).unwrap(); let mut commit = make_commit(&[4], &[1]); - pruning.note_canonical(&H256::random(), &mut commit); + pruning.note_canonical(&H256::random(), 0, &mut commit).unwrap(); db.commit(&commit); let mut commit = make_commit(&[5], &[2]); - pruning.note_canonical(&H256::random(), &mut commit); + pruning.note_canonical(&H256::random(), 1, &mut commit).unwrap(); db.commit(&commit); pruning.apply_pending(); assert!(db.data_eq(&make_db(&[1, 2, 3, 4, 5]))); @@ -316,53 +659,55 @@ mod tests { check_journal(&pruning, &db); let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit); + pruning.prune_one(&mut commit).unwrap(); db.commit(&commit); pruning.apply_pending(); assert!(db.data_eq(&make_db(&[2, 3, 4, 5]))); let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit); + pruning.prune_one(&mut commit).unwrap(); db.commit(&commit); pruning.apply_pending(); assert!(db.data_eq(&make_db(&[3, 4, 5]))); - assert_eq!(pruning.pending_number, 2); + assert_eq!(pruning.base, 2); } #[test] fn prune_two_pending() { let mut db = make_db(&[1, 2, 3]); - let mut pruning: RefWindow = RefWindow::new(&db, true).unwrap(); + let mut pruning: RefWindow = + RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, true).unwrap(); let mut commit = make_commit(&[4], &[1]); - pruning.note_canonical(&H256::random(), &mut commit); + pruning.note_canonical(&H256::random(), 0, &mut commit).unwrap(); db.commit(&commit); let mut commit = make_commit(&[5], &[2]); - pruning.note_canonical(&H256::random(), &mut commit); + pruning.note_canonical(&H256::random(), 1, &mut commit).unwrap(); db.commit(&commit); assert!(db.data_eq(&make_db(&[1, 2, 3, 4, 5]))); let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit); + pruning.prune_one(&mut commit).unwrap(); db.commit(&commit); assert!(db.data_eq(&make_db(&[2, 3, 4, 5]))); let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit); + pruning.prune_one(&mut commit).unwrap(); db.commit(&commit); pruning.apply_pending(); assert!(db.data_eq(&make_db(&[3, 4, 5]))); - assert_eq!(pruning.pending_number, 2); + assert_eq!(pruning.base, 2); } #[test] fn reinserted_survives() { let mut db = make_db(&[1, 2, 3]); - let mut pruning: RefWindow = RefWindow::new(&db, true).unwrap(); + let mut pruning: RefWindow = + RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, true).unwrap(); let mut commit = make_commit(&[], &[2]); - pruning.note_canonical(&H256::random(), &mut commit); + pruning.note_canonical(&H256::random(), 0, &mut commit).unwrap(); db.commit(&commit); let mut commit = make_commit(&[2], &[]); - pruning.note_canonical(&H256::random(), &mut commit); + pruning.note_canonical(&H256::random(), 1, &mut commit).unwrap(); db.commit(&commit); let mut commit = make_commit(&[], &[2]); - pruning.note_canonical(&H256::random(), &mut commit); + pruning.note_canonical(&H256::random(), 2, &mut commit).unwrap(); db.commit(&commit); assert!(db.data_eq(&make_db(&[1, 2, 3]))); pruning.apply_pending(); @@ -370,62 +715,64 @@ mod tests { check_journal(&pruning, &db); let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit); + pruning.prune_one(&mut commit).unwrap(); db.commit(&commit); assert!(db.data_eq(&make_db(&[1, 2, 3]))); let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit); + pruning.prune_one(&mut commit).unwrap(); db.commit(&commit); assert!(db.data_eq(&make_db(&[1, 2, 3]))); - pruning.prune_one(&mut commit); + pruning.prune_one(&mut commit).unwrap(); db.commit(&commit); assert!(db.data_eq(&make_db(&[1, 3]))); pruning.apply_pending(); - assert_eq!(pruning.pending_number, 3); + assert_eq!(pruning.base, 3); } #[test] fn reinserted_survive_pending() { let mut db = make_db(&[1, 2, 3]); - let mut pruning: RefWindow = RefWindow::new(&db, true).unwrap(); + let mut pruning: RefWindow = + RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, true).unwrap(); let mut commit = make_commit(&[], &[2]); - pruning.note_canonical(&H256::random(), &mut commit); + pruning.note_canonical(&H256::random(), 0, &mut commit).unwrap(); db.commit(&commit); let mut commit = make_commit(&[2], &[]); - pruning.note_canonical(&H256::random(), &mut commit); + pruning.note_canonical(&H256::random(), 1, &mut commit).unwrap(); db.commit(&commit); let mut commit = make_commit(&[], &[2]); - pruning.note_canonical(&H256::random(), &mut commit); + pruning.note_canonical(&H256::random(), 2, &mut commit).unwrap(); db.commit(&commit); assert!(db.data_eq(&make_db(&[1, 2, 3]))); let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit); + pruning.prune_one(&mut commit).unwrap(); db.commit(&commit); assert!(db.data_eq(&make_db(&[1, 2, 3]))); let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit); + pruning.prune_one(&mut commit).unwrap(); db.commit(&commit); assert!(db.data_eq(&make_db(&[1, 2, 3]))); - pruning.prune_one(&mut commit); + pruning.prune_one(&mut commit).unwrap(); db.commit(&commit); assert!(db.data_eq(&make_db(&[1, 3]))); pruning.apply_pending(); - assert_eq!(pruning.pending_number, 3); + assert_eq!(pruning.base, 3); } #[test] fn reinserted_ignores() { let mut db = make_db(&[1, 2, 3]); - let mut pruning: RefWindow = RefWindow::new(&db, false).unwrap(); + let mut pruning: RefWindow = + RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, false).unwrap(); let mut commit = make_commit(&[], &[2]); - pruning.note_canonical(&H256::random(), &mut commit); + pruning.note_canonical(&H256::random(), 0, &mut commit).unwrap(); db.commit(&commit); let mut commit = make_commit(&[2], &[]); - pruning.note_canonical(&H256::random(), &mut commit); + pruning.note_canonical(&H256::random(), 1, &mut commit).unwrap(); db.commit(&commit); let mut commit = make_commit(&[], &[2]); - pruning.note_canonical(&H256::random(), &mut commit); + pruning.note_canonical(&H256::random(), 2, &mut commit).unwrap(); db.commit(&commit); assert!(db.data_eq(&make_db(&[1, 2, 3]))); pruning.apply_pending(); @@ -433,9 +780,276 @@ mod tests { check_journal(&pruning, &db); let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit); + pruning.prune_one(&mut commit).unwrap(); db.commit(&commit); assert!(db.data_eq(&make_db(&[1, 3]))); - assert!(pruning.death_index.is_empty()); + } + + fn push_last_canonicalized(block: u64, commit: &mut CommitSet) { + commit + .meta + .inserted + .push((to_meta_key(LAST_CANONICAL, &()), (block, block).encode())); + } + + fn push_last_pruned(block: u64, commit: &mut CommitSet) { + commit.meta.inserted.push((to_meta_key(LAST_PRUNED, &()), block.encode())); + } + + #[test] + fn init_db_backed_queue() { + let mut db = make_db(&[]); + let mut commit = CommitSet::default(); + + fn load_pruning_from_db(db: TestDb) -> (usize, u64) { + let pruning: RefWindow = + RefWindow::new(db, DEFAULT_MAX_BLOCK_CONSTRAINT, false).unwrap(); + let (cache, _) = pruning.queue.get_db_backed_queue_state().unwrap(); + (cache.len(), pruning.base) + } + + fn push_record(block: u64, commit: &mut CommitSet) { + commit + .meta + .inserted + .push((to_journal_key(block), JournalRecord::::default().encode())); + } + + // empty database + let (loaded_blocks, base) = load_pruning_from_db(db.clone()); + assert_eq!(loaded_blocks, 0); + assert_eq!(base, 0); + + // canonicalized the genesis block but no pruning + push_last_canonicalized(0, &mut commit); + push_record(0, &mut commit); + db.commit(&commit); + let (loaded_blocks, base) = load_pruning_from_db(db.clone()); + assert_eq!(loaded_blocks, 1); + assert_eq!(base, 0); + + // pruned the genesis block + push_last_pruned(0, &mut commit); + db.commit(&commit); + let (loaded_blocks, base) = load_pruning_from_db(db.clone()); + assert_eq!(loaded_blocks, 0); + assert_eq!(base, 1); + + // canonicalize more blocks + push_last_canonicalized(10, &mut commit); + for i in 1..=10 { + push_record(i, &mut commit); + } + db.commit(&commit); + let (loaded_blocks, base) = load_pruning_from_db(db.clone()); + assert_eq!(loaded_blocks, 10); + assert_eq!(base, 1); + + // pruned all blocks + push_last_pruned(10, &mut commit); + db.commit(&commit); + let (loaded_blocks, base) = load_pruning_from_db(db.clone()); + assert_eq!(loaded_blocks, 0); + assert_eq!(base, 11); + } + + #[test] + fn db_backed_queue() { + let mut db = make_db(&[]); + let mut pruning: RefWindow = + RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, false).unwrap(); + let cache_capacity = DEFAULT_MAX_BLOCK_CONSTRAINT as usize; + + // start as an empty queue + let (cache, uncached_blocks) = pruning.queue.get_db_backed_queue_state().unwrap(); + assert_eq!(cache.len(), 0); + assert_eq!(uncached_blocks, 0); + + // import blocks + // queue size and content should match + for i in 0..(cache_capacity + 10) { + let mut commit = make_commit(&[], &[]); + pruning.note_canonical(&(i as u64), i as u64, &mut commit).unwrap(); + push_last_canonicalized(i as u64, &mut commit); + db.commit(&commit); + // block will fill in cache first + let (cache, uncached_blocks) = pruning.queue.get_db_backed_queue_state().unwrap(); + if i < cache_capacity { + assert_eq!(cache.len(), i + 1); + assert_eq!(uncached_blocks, 0); + } else { + assert_eq!(cache.len(), cache_capacity); + assert_eq!(uncached_blocks, i - cache_capacity + 1); + } + } + pruning.apply_pending(); + assert_eq!(pruning.queue.len(), cache_capacity + 10); + let (cache, uncached_blocks) = pruning.queue.get_db_backed_queue_state().unwrap(); + assert_eq!(cache.len(), cache_capacity); + assert_eq!(uncached_blocks, 10); + for i in 0..cache_capacity { + assert_eq!(cache[i].hash, i as u64); + } + + // import a new block to the end of the queue + // won't keep the new block in memory + let mut commit = CommitSet::default(); + pruning + .note_canonical(&(cache_capacity as u64 + 10), cache_capacity as u64 + 10, &mut commit) + .unwrap(); + assert_eq!(pruning.queue.len(), cache_capacity + 11); + let (cache, uncached_blocks) = pruning.queue.get_db_backed_queue_state().unwrap(); + assert_eq!(cache.len(), cache_capacity); + assert_eq!(uncached_blocks, 11); + + // revert the last add that no apply yet + // NOTE: do not commit the previous `CommitSet` to db + pruning.revert_pending(); + assert_eq!(pruning.queue.len(), cache_capacity + 10); + let (cache, uncached_blocks) = pruning.queue.get_db_backed_queue_state().unwrap(); + assert_eq!(cache.len(), cache_capacity); + assert_eq!(uncached_blocks, 10); + + // remove one block from the start of the queue + // block is removed from the head of cache + let mut commit = CommitSet::default(); + pruning.prune_one(&mut commit).unwrap(); + db.commit(&commit); + pruning.apply_pending(); + assert_eq!(pruning.queue.len(), cache_capacity + 9); + let (cache, uncached_blocks) = pruning.queue.get_db_backed_queue_state().unwrap(); + assert_eq!(cache.len(), cache_capacity - 1); + assert_eq!(uncached_blocks, 10); + for i in 0..(cache_capacity - 1) { + assert_eq!(cache[i].hash, (i + 1) as u64); + } + + // load a new queue from db + // `cache` is full again but the content of the queue should be the same + let pruning: RefWindow = + RefWindow::new(db, DEFAULT_MAX_BLOCK_CONSTRAINT, false).unwrap(); + assert_eq!(pruning.queue.len(), cache_capacity + 9); + let (cache, uncached_blocks) = pruning.queue.get_db_backed_queue_state().unwrap(); + assert_eq!(cache.len(), cache_capacity); + assert_eq!(uncached_blocks, 9); + for i in 0..cache_capacity { + assert_eq!(cache[i].hash, (i + 1) as u64); + } + } + + #[test] + fn load_block_from_db() { + let mut db = make_db(&[]); + let mut pruning: RefWindow = + RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, false).unwrap(); + let cache_capacity = DEFAULT_MAX_BLOCK_CONSTRAINT as usize; + + // import blocks + for i in 0..(cache_capacity as u64 * 2 + 10) { + let mut commit = make_commit(&[], &[]); + pruning.note_canonical(&i, i, &mut commit).unwrap(); + push_last_canonicalized(i as u64, &mut commit); + db.commit(&commit); + } + + // the following operations won't triger loading block from db: + // - getting block in cache + // - getting block not in the queue + let index = cache_capacity; + assert_eq!( + pruning.queue.get(0, index - 1).unwrap().unwrap().hash, + cache_capacity as u64 - 1 + ); + assert_eq!(pruning.queue.get(0, cache_capacity * 2 + 10).unwrap(), None); + let (cache, uncached_blocks) = pruning.queue.get_db_backed_queue_state().unwrap(); + assert_eq!(cache.len(), cache_capacity); + assert_eq!(uncached_blocks, cache_capacity + 10); + + // getting a block not in cache will triger loading block from db + assert_eq!(pruning.queue.get(0, index).unwrap().unwrap().hash, cache_capacity as u64); + let (cache, uncached_blocks) = pruning.queue.get_db_backed_queue_state().unwrap(); + assert_eq!(cache.len(), cache_capacity * 2); + assert_eq!(uncached_blocks, 10); + + // clear all block loaded in cache + for _ in 0..cache_capacity * 2 { + let mut commit = CommitSet::default(); + pruning.prune_one(&mut commit).unwrap(); + db.commit(&commit); + } + pruning.apply_pending(); + let (cache, uncached_blocks) = pruning.queue.get_db_backed_queue_state().unwrap(); + assert!(cache.is_empty()); + assert_eq!(uncached_blocks, 10); + + // getting the hash of block that not in cache will also triger loading + // the remaining blocks from db + assert_eq!( + pruning.queue.get(pruning.base, 0).unwrap().unwrap().hash, + (cache_capacity * 2) as u64 + ); + let (cache, uncached_blocks) = pruning.queue.get_db_backed_queue_state().unwrap(); + assert_eq!(cache.len(), 10); + assert_eq!(uncached_blocks, 0); + + // load a new queue from db + // `cache` should be the same + let pruning: RefWindow = + RefWindow::new(db, DEFAULT_MAX_BLOCK_CONSTRAINT, false).unwrap(); + assert_eq!(pruning.queue.len(), 10); + let (cache, uncached_blocks) = pruning.queue.get_db_backed_queue_state().unwrap(); + assert_eq!(cache.len(), 10); + assert_eq!(uncached_blocks, 0); + for i in 0..10 { + assert_eq!(cache[i].hash, (cache_capacity * 2 + i) as u64); + } + } + + #[test] + fn get_block_from_queue() { + let mut db = make_db(&[]); + let mut pruning: RefWindow = + RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, false).unwrap(); + let cache_capacity = DEFAULT_MAX_BLOCK_CONSTRAINT as u64; + + // import blocks and commit to db + let mut commit = make_commit(&[], &[]); + for i in 0..(cache_capacity + 10) { + pruning.note_canonical(&i, i, &mut commit).unwrap(); + } + db.commit(&commit); + + // import a block but not commit to db yet + let mut pending_commit = make_commit(&[], &[]); + let index = cache_capacity + 10; + pruning.note_canonical(&index, index, &mut pending_commit).unwrap(); + + let mut commit = make_commit(&[], &[]); + // prune blocks that had committed to db + for i in 0..(cache_capacity + 10) { + assert_eq!(pruning.next_hash().unwrap(), Some(i)); + pruning.prune_one(&mut commit).unwrap(); + } + // return `BlockUnavailable` for block that did not commit to db + assert_eq!( + pruning.next_hash().unwrap_err(), + Error::StateDb(StateDbError::BlockUnavailable) + ); + assert_eq!( + pruning.prune_one(&mut commit).unwrap_err(), + Error::StateDb(StateDbError::BlockUnavailable) + ); + // commit block to db and no error return + db.commit(&pending_commit); + assert_eq!(pruning.next_hash().unwrap(), Some(index)); + pruning.prune_one(&mut commit).unwrap(); + db.commit(&commit); + + // import a block and do not commit it to db before calling `apply_pending` + pruning + .note_canonical(&(index + 1), index + 1, &mut make_commit(&[], &[])) + .unwrap(); + pruning.apply_pending(); + assert_eq!(pruning.next_hash().unwrap_err(), Error::StateDb(StateDbError::BlockMissing)); } } diff --git a/client/state-db/src/test.rs b/client/state-db/src/test.rs index 9fb97036b2f24..314ec2902452a 100644 --- a/client/state-db/src/test.rs +++ b/client/state-db/src/test.rs @@ -20,10 +20,16 @@ use crate::{ChangeSet, CommitSet, DBValue, MetaDb, NodeDb}; use sp_core::H256; -use std::collections::HashMap; +use std::{ + collections::HashMap, + sync::{Arc, RwLock}, +}; + +#[derive(Default, Debug, Clone)] +pub struct TestDb(Arc>); #[derive(Default, Debug, Clone, PartialEq, Eq)] -pub struct TestDb { +struct TestDbInner { pub data: HashMap, pub meta: HashMap, DBValue>, } @@ -32,7 +38,7 @@ impl MetaDb for TestDb { type Error = (); fn get_meta(&self, key: &[u8]) -> Result, ()> { - Ok(self.meta.get(key).cloned()) + Ok(self.0.read().unwrap().meta.get(key).cloned()) } } @@ -41,25 +47,29 @@ impl NodeDb for TestDb { type Key = H256; fn get(&self, key: &H256) -> Result, ()> { - Ok(self.data.get(key).cloned()) + Ok(self.0.read().unwrap().data.get(key).cloned()) } } impl TestDb { pub fn commit(&mut self, commit: &CommitSet) { - self.data.extend(commit.data.inserted.iter().cloned()); - self.meta.extend(commit.meta.inserted.iter().cloned()); + self.0.write().unwrap().data.extend(commit.data.inserted.iter().cloned()); + self.0.write().unwrap().meta.extend(commit.meta.inserted.iter().cloned()); for k in commit.data.deleted.iter() { - self.data.remove(k); + self.0.write().unwrap().data.remove(k); } - self.meta.extend(commit.meta.inserted.iter().cloned()); + self.0.write().unwrap().meta.extend(commit.meta.inserted.iter().cloned()); for k in commit.meta.deleted.iter() { - self.meta.remove(k); + self.0.write().unwrap().meta.remove(k); } } pub fn data_eq(&self, other: &TestDb) -> bool { - self.data == other.data + self.0.read().unwrap().data == other.0.read().unwrap().data + } + + pub fn meta_len(&self) -> usize { + self.0.read().unwrap().meta.len() } } @@ -78,11 +88,11 @@ pub fn make_commit(inserted: &[u64], deleted: &[u64]) -> CommitSet { } pub fn make_db(inserted: &[u64]) -> TestDb { - TestDb { + TestDb(Arc::new(RwLock::new(TestDbInner { data: inserted .iter() .map(|v| (H256::from_low_u64_be(*v), H256::from_low_u64_be(*v).as_bytes().to_vec())) .collect(), meta: Default::default(), - } + }))) }