Skip to content

Commit

Permalink
KIP 7 implementation
Browse files Browse the repository at this point in the history
  • Loading branch information
someone235 committed Sep 30, 2024
1 parent 3bc2844 commit 2c7dad3
Show file tree
Hide file tree
Showing 12 changed files with 161 additions and 43 deletions.
6 changes: 3 additions & 3 deletions components/consensusmanager/src/session.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ use kaspa_consensus_core::{
block::Block,
blockstatus::BlockStatus,
daa_score_timestamp::DaaScoreTimestamp,
errors::consensus::ConsensusResult,
errors::{consensus::ConsensusResult, pruning::PruningImportResult},
header::Header,
pruning::{PruningPointProof, PruningPointTrustedData, PruningPointsList},
trusted::{ExternalGhostdagData, TrustedBlock},
Expand Down Expand Up @@ -437,8 +437,8 @@ impl ConsensusSessionOwned {
self.clone().spawn_blocking(move |c| c.validate_pruning_points()).await
}

pub async fn async_are_pruning_points_violating_finality(&self, pp_list: PruningPointsList) -> bool {
self.clone().spawn_blocking(move |c| c.are_pruning_points_violating_finality(pp_list)).await
pub async fn async_next_good_finality_point(&self, pp_list: PruningPointsList) -> PruningImportResult<Hash> {
self.clone().spawn_blocking(move |c| c.next_good_finality_point(pp_list)).await
}

pub async fn async_creation_timestamp(&self) -> u64 {
Expand Down
13 changes: 11 additions & 2 deletions consensus/core/src/api/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,12 @@ pub trait ConsensusApi: Send + Sync {
unimplemented!()
}

fn apply_pruning_proof(&self, proof: PruningPointProof, trusted_set: &[TrustedBlock]) -> PruningImportResult<()> {
fn apply_pruning_proof(
&self,
proof: PruningPointProof,
trusted_set: &[TrustedBlock],
good_finality_point: Hash,
) -> PruningImportResult<()> {
unimplemented!()
}

Expand Down Expand Up @@ -351,7 +356,7 @@ pub trait ConsensusApi: Send + Sync {
unimplemented!()
}

fn are_pruning_points_violating_finality(&self, pp_list: PruningPointsList) -> bool {
fn next_good_finality_point(&self, pp_list: PruningPointsList) -> PruningImportResult<Hash> {
unimplemented!()
}

Expand All @@ -362,6 +367,10 @@ pub trait ConsensusApi: Send + Sync {
fn finality_point(&self) -> Hash {
unimplemented!()
}

fn get_good_finality_point(&self) -> Hash {
unimplemented!()
}
}

pub type DynConsensus = Arc<dyn ConsensusApi>;
3 changes: 3 additions & 0 deletions consensus/core/src/errors/pruning.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,9 @@ pub enum PruningImportError {

#[error("process exit was initiated while validating pruning point proof")]
PruningValidationInterrupted,

#[error("pruning point list violates finality")]
PruningPointListViolatesFinality,
}

pub type PruningImportResult<T> = std::result::Result<T, PruningImportError>;
23 changes: 19 additions & 4 deletions consensus/src/consensus/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ use crate::{
acceptance_data::AcceptanceDataStoreReader,
block_transactions::BlockTransactionsStoreReader,
ghostdag::{GhostdagData, GhostdagStoreReader},
good_finality_point::GoodFinalityPointStoreReader,
headers::{CompactHeaderData, HeaderStoreReader},
headers_selected_tip::HeadersSelectedTipStoreReader,
past_pruning_points::PastPruningPointsStoreReader,
Expand Down Expand Up @@ -271,6 +272,7 @@ impl Consensus {
pruning_lock.clone(),
notification_root.clone(),
counters.clone(),
creation_timestamp,
));

let pruning_processor = Arc::new(PruningProcessor::new(
Expand Down Expand Up @@ -760,8 +762,13 @@ impl ConsensusApi for Consensus {
self.services.pruning_proof_manager.validate_pruning_point_proof(proof)
}

fn apply_pruning_proof(&self, proof: PruningPointProof, trusted_set: &[TrustedBlock]) -> PruningImportResult<()> {
self.services.pruning_proof_manager.apply_proof(proof, trusted_set)
fn apply_pruning_proof(
&self,
proof: PruningPointProof,
trusted_set: &[TrustedBlock],
good_finality_point: Hash,
) -> PruningImportResult<()> {
self.services.pruning_proof_manager.apply_proof(proof, trusted_set, good_finality_point)
}

fn import_pruning_points(&self, pruning_points: PruningPointsList) {
Expand Down Expand Up @@ -1031,8 +1038,8 @@ impl ConsensusApi for Consensus {
}
}

fn are_pruning_points_violating_finality(&self, pp_list: PruningPointsList) -> bool {
self.virtual_processor.are_pruning_points_violating_finality(pp_list)
fn next_good_finality_point(&self, pp_list: PruningPointsList) -> PruningImportResult<Hash> {
self.virtual_processor.next_good_finality_point(pp_list)
}

fn creation_timestamp(&self) -> u64 {
Expand All @@ -1042,4 +1049,12 @@ impl ConsensusApi for Consensus {
fn finality_point(&self) -> Hash {
self.virtual_processor.virtual_finality_point(&self.lkg_virtual_state.load().ghostdag_data, self.pruning_point())
}

fn get_good_finality_point(&self) -> Hash {
if self.virtual_processor.is_consensus_mature() {
self.pruning_point()
} else {
self.storage.good_finality_point_store.read().get().unwrap()
}
}
}
5 changes: 5 additions & 0 deletions consensus/src/consensus/storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ use crate::{
daa::DbDaaStore,
depth::DbDepthStore,
ghostdag::{CompactGhostdagData, DbGhostdagStore},
good_finality_point::DbGoodFinalityPointStore,
headers::{CompactHeaderData, DbHeadersStore},
headers_selected_tip::DbHeadersSelectedTipStore,
past_pruning_points::DbPastPruningPointsStore,
Expand Down Expand Up @@ -48,6 +49,7 @@ pub struct ConsensusStorage {
pub pruning_utxoset_stores: Arc<RwLock<PruningUtxosetStores>>,
pub virtual_stores: Arc<RwLock<VirtualStores>>,
pub selected_chain_store: Arc<RwLock<DbSelectedChainStore>>,
pub good_finality_point_store: Arc<RwLock<DbGoodFinalityPointStore>>,

// Append-only stores
pub ghostdag_stores: Arc<Vec<Arc<DbGhostdagStore>>>,
Expand Down Expand Up @@ -235,6 +237,8 @@ impl ConsensusStorage {
let virtual_stores =
Arc::new(RwLock::new(VirtualStores::new(db.clone(), lkg_virtual_state.clone(), utxo_set_builder.build())));

let good_finality_point_store = Arc::new(RwLock::new(DbGoodFinalityPointStore::new(db.clone())));

// Ensure that reachability stores are initialized
reachability::init(reachability_store.write().deref_mut()).unwrap();
relations::init(reachability_relations_store.write().deref_mut());
Expand Down Expand Up @@ -264,6 +268,7 @@ impl ConsensusStorage {
block_window_cache_for_difficulty,
block_window_cache_for_past_median_time,
lkg_virtual_state,
good_finality_point_store,
})
}
}
49 changes: 49 additions & 0 deletions consensus/src/model/stores/good_finality_point.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
use kaspa_database::prelude::StoreResult;
use kaspa_database::prelude::DB;
use kaspa_database::prelude::{BatchDbWriter, CachedDbItem, DirectDbWriter};
use kaspa_database::registry::DatabaseStorePrefixes;
use kaspa_hashes::Hash;
use rocksdb::WriteBatch;
use std::sync::Arc;

/// Reader API for `SelectedTipStore`.
pub trait GoodFinalityPointStoreReader {
fn get(&self) -> StoreResult<Hash>;
}

pub trait GoodFinalityPointStore: GoodFinalityPointStoreReader {
fn set(&mut self, hash: Hash) -> StoreResult<()>;
}

/// A DB + cache implementation of `GoodFinalityPointStore` trait
#[derive(Clone)]
pub struct DbGoodFinalityPointStore {
db: Arc<DB>,
access: CachedDbItem<Hash>,
}

impl DbGoodFinalityPointStore {
pub fn new(db: Arc<DB>) -> Self {
Self { db: Arc::clone(&db), access: CachedDbItem::new(db, DatabaseStorePrefixes::GoodFinalityPoint.into()) }
}

pub fn clone_with_new_cache(&self) -> Self {
Self::new(Arc::clone(&self.db))
}

pub fn set_batch(&mut self, batch: &mut WriteBatch, hash: Hash) -> StoreResult<()> {
self.access.write(BatchDbWriter::new(batch), &hash)
}
}

impl GoodFinalityPointStoreReader for DbGoodFinalityPointStore {
fn get(&self) -> StoreResult<Hash> {
self.access.read()
}
}

impl GoodFinalityPointStore for DbGoodFinalityPointStore {
fn set(&mut self, hash: Hash) -> StoreResult<()> {
self.access.write(DirectDbWriter::new(&self.db), &hash)
}
}
1 change: 1 addition & 0 deletions consensus/src/model/stores/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ pub mod children;
pub mod daa;
pub mod depth;
pub mod ghostdag;
pub mod good_finality_point;
pub mod headers;
pub mod headers_selected_tip;
pub mod past_pruning_points;
Expand Down
70 changes: 46 additions & 24 deletions consensus/src/pipeline/virtual_processor/processor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ use crate::{
daa::DbDaaStore,
depth::{DbDepthStore, DepthStoreReader},
ghostdag::{DbGhostdagStore, GhostdagData, GhostdagStoreReader},
good_finality_point::{DbGoodFinalityPointStore, GoodFinalityPointStore, GoodFinalityPointStoreReader},
headers::{DbHeadersStore, HeaderStoreReader},
past_pruning_points::DbPastPruningPointsStore,
pruning::{DbPruningStore, PruningStoreReader},
Expand Down Expand Up @@ -113,6 +114,8 @@ pub struct VirtualStateProcessor {
pub(super) max_block_parents: u8,
pub(super) mergeset_size_limit: u64,
pub(super) pruning_depth: u64,
consensus_creation_time: u64,
finality_duration: u64,

// Stores
pub(super) statuses_store: Arc<RwLock<DbStatusesStore>>,
Expand All @@ -133,6 +136,8 @@ pub struct VirtualStateProcessor {
pub(super) virtual_stores: Arc<RwLock<VirtualStores>>,
pub(super) pruning_utxoset_stores: Arc<RwLock<PruningUtxosetStores>>,

pub(super) good_finality_point_store: Arc<RwLock<DbGoodFinalityPointStore>>,

/// The "last known good" virtual state. To be used by any logic which does not want to wait
/// for a possible virtual state write to complete but can rather settle with the last known state
pub lkg_virtual_state: LkgVirtualState,
Expand Down Expand Up @@ -176,6 +181,7 @@ impl VirtualStateProcessor {
pruning_lock: SessionLock,
notification_root: Arc<ConsensusNotificationRoot>,
counters: Arc<ProcessingCounters>,
consensus_creation_time: u64,
) -> Self {
Self {
receiver,
Expand Down Expand Up @@ -205,6 +211,7 @@ impl VirtualStateProcessor {
virtual_stores: storage.virtual_stores.clone(),
pruning_utxoset_stores: storage.pruning_utxoset_stores.clone(),
lkg_virtual_state: storage.lkg_virtual_state.clone(),
good_finality_point_store: storage.good_finality_point_store.clone(),

ghostdag_manager: services.ghostdag_primary_manager.clone(),
reachability_service: services.reachability_service.clone(),
Expand All @@ -221,6 +228,8 @@ impl VirtualStateProcessor {
notification_root,
counters,
storage_mass_activation_daa_score: params.storage_mass_activation_daa_score,
consensus_creation_time,
finality_duration: params.finality_duration(),
}
}

Expand Down Expand Up @@ -1027,6 +1036,7 @@ impl VirtualStateProcessor {
pruning_point_write.set_history_root(&mut batch, self.genesis.hash).unwrap();
pruning_utxoset_write.set_utxoset_position(&mut batch, self.genesis.hash).unwrap();
self.db.write(batch).unwrap();
self.good_finality_point_store.write().set(self.genesis.hash).unwrap(); // TODO: Wrong lock behavior?
drop(pruning_point_write);
drop(pruning_utxoset_write);
}
Expand Down Expand Up @@ -1132,33 +1142,45 @@ impl VirtualStateProcessor {
Ok(())
}

pub fn are_pruning_points_violating_finality(&self, pp_list: PruningPointsList) -> bool {
// Ideally we would want to check if the last known pruning point has the finality point
// in its chain, but in some cases it's impossible: let `lkp` be the last known pruning
// point from the list, and `fup` be the first unknown pruning point (the one following `lkp`).
// fup.blue_score - lkp.blue_score ≈ finality_depth (±k), so it's possible for `lkp` not to
// have the finality point in its past. So we have no choice but to check if `lkp`
// has `finality_point.finality_point` in its chain (in the worst case `fup` is one block
// above the current finality point, and in this case `lkp` will be a few blocks above the
// finality_point.finality_point), meaning this function can only detect finality violations
// in depth of 2*finality_depth, and can give false negatives for smaller finality violations.
let current_pp = self.pruning_point_store.read().pruning_point().unwrap();
let vf = self.virtual_finality_point(&self.lkg_virtual_state.load().ghostdag_data, current_pp);
let vff = self.depth_manager.calc_finality_point(&self.ghostdag_primary_store.get_data(vf).unwrap(), current_pp);

let last_known_pp = pp_list.iter().rev().find(|pp| match self.statuses_store.read().get(pp.hash).unwrap_option() {
Some(status) => status.is_valid(),
None => false,
});

if let Some(last_known_pp) = last_known_pp {
!self.reachability_service.is_chain_ancestor_of(vff, last_known_pp.hash)
pub fn next_good_finality_point(&self, pp_list: PruningPointsList) -> PruningImportResult<Hash> {
if self.is_consensus_mature() {
// TODO: Fix comment
// Ideally we would want to check if the last known pruning point has the finality point
// in its chain, but in some cases it's impossible: let `lkp` be the last known pruning
// point from the list, and `fup` be the first unknown pruning point (the one following `lkp`).
// fup.blue_score - lkp.blue_score ≈ finality_depth (±k), so it's possible for `lkp` not to
// have the finality point in its past. So we have no choice but to check if `lkp`
// has `finality_point.finality_point` in its chain (in the worst case `fup` is one block
// above the current finality point, and in this case `lkp` will be a few blocks above the
// finality_point.finality_point), meaning this function can only detect finality violations
// in depth of 2*finality_depth, and can give false negatives for smaller finality violations.
let current_pp = self.pruning_point_store.read().pruning_point().unwrap();
let vf = self.virtual_finality_point(&self.lkg_virtual_state.load().ghostdag_data, current_pp);
let vff = self.depth_manager.calc_finality_point(&self.ghostdag_primary_store.get_data(vf).unwrap(), current_pp);

let first_pp_in_future_of_vff = pp_list
.iter()
.map(|pp| pp.hash)
.filter(|pp| match self.statuses_store.read().get(*pp).unwrap_option() {
Some(status) => status.is_valid(),
None => false,
})
.find(|pp| self.reachability_service.is_chain_ancestor_of(vff, *pp));

first_pp_in_future_of_vff.ok_or(PruningImportError::PruningPointListViolatesFinality)
} else {
// If no pruning point is known, there's definitely a finality violation
// (normally at least genesis should be known).
true
let good_finality_point = self.good_finality_point_store.read().get().unwrap();
if pp_list.iter().map(|h| h.hash).contains(&good_finality_point) {
Ok(good_finality_point)
} else {
Err(PruningImportError::PruningPointListViolatesFinality)
}
}
}

pub fn is_consensus_mature(&self) -> bool {
unix_now() - self.consensus_creation_time > self.finality_duration
}
}

enum MergesetIncreaseResult {
Expand Down
11 changes: 10 additions & 1 deletion consensus/src/processes/pruning_proof/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ use crate::{
stores::{
depth::DbDepthStore,
ghostdag::{DbGhostdagStore, GhostdagData, GhostdagStore, GhostdagStoreReader},
good_finality_point::{DbGoodFinalityPointStore, GoodFinalityPointStore},
headers::{DbHeadersStore, HeaderStore, HeaderStoreReader},
headers_selected_tip::DbHeadersSelectedTipStore,
past_pruning_points::{DbPastPruningPointsStore, PastPruningPointsStore},
Expand Down Expand Up @@ -104,6 +105,7 @@ pub struct PruningProofManager {
headers_selected_tip_store: Arc<RwLock<DbHeadersSelectedTipStore>>,
depth_store: Arc<DbDepthStore>,
selected_chain_store: Arc<RwLock<DbSelectedChainStore>>,
good_finality_point_store: Arc<RwLock<DbGoodFinalityPointStore>>,

ghostdag_managers: Arc<Vec<DbGhostdagManager>>,
traversal_manager: DbDagTraversalManager,
Expand Down Expand Up @@ -154,6 +156,7 @@ impl PruningProofManager {
headers_selected_tip_store: storage.headers_selected_tip_store.clone(),
selected_chain_store: storage.selected_chain_store.clone(),
depth_store: storage.depth_store.clone(),
good_finality_point_store: storage.good_finality_point_store.clone(),

ghostdag_managers,
traversal_manager,
Expand Down Expand Up @@ -199,7 +202,13 @@ impl PruningProofManager {
drop(pruning_point_write);
}

pub fn apply_proof(&self, mut proof: PruningPointProof, trusted_set: &[TrustedBlock]) -> PruningImportResult<()> {
pub fn apply_proof(
&self,
mut proof: PruningPointProof,
trusted_set: &[TrustedBlock],
good_finality_point: Hash,
) -> PruningImportResult<()> {
self.good_finality_point_store.write().set(good_finality_point).unwrap();
let pruning_point_header = proof[0].last().unwrap().clone();
let pruning_point = pruning_point_header.hash;

Expand Down
2 changes: 2 additions & 0 deletions database/src/registry.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,8 @@ pub enum DatabaseStorePrefixes {
ReachabilityTreeChildren = 30,
ReachabilityFutureCoveringSet = 31,

GoodFinalityPoint = 32,

// ---- Metadata ----
MultiConsensusMetadata = 124,
ConsensusEntries = 125,
Expand Down
Loading

0 comments on commit 2c7dad3

Please sign in to comment.