Skip to content

Commit

Permalink
Various miscellaneous changes towards 0.15.1 RC2 (kaspanet#543)
Browse files Browse the repository at this point in the history
* infrequent logs should be debug

* cleanup some todos

* when a network starts, genesis has a body, so there's no need for a special exception

* remove unneeded method and add an error just in case it is added in the future

* count and log chain disqualified blocks

* count and log mempool evictions

* bump version to 0.14.5
  • Loading branch information
michaelsutton authored Sep 5, 2024
1 parent 7cdabb4 commit f866dfa
Show file tree
Hide file tree
Showing 15 changed files with 190 additions and 169 deletions.
114 changes: 57 additions & 57 deletions Cargo.lock

Large diffs are not rendered by default.

112 changes: 56 additions & 56 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ members = [

[workspace.package]
rust-version = "1.80.0"
version = "0.14.3"
version = "0.14.5"
authors = ["Kaspa developers"]
license = "ISC"
repository = "https:/kaspanet/rusty-kaspa"
Expand All @@ -79,61 +79,61 @@ include = [
]

[workspace.dependencies]
# kaspa-testing-integration = { version = "0.14.3", path = "testing/integration" }
kaspa-addresses = { version = "0.14.3", path = "crypto/addresses" }
kaspa-addressmanager = { version = "0.14.3", path = "components/addressmanager" }
kaspa-bip32 = { version = "0.14.3", path = "wallet/bip32" }
kaspa-cli = { version = "0.14.3", path = "cli" }
kaspa-connectionmanager = { version = "0.14.3", path = "components/connectionmanager" }
kaspa-consensus = { version = "0.14.3", path = "consensus" }
kaspa-consensus-core = { version = "0.14.3", path = "consensus/core" }
kaspa-consensus-client = { version = "0.14.3", path = "consensus/client" }
kaspa-consensus-notify = { version = "0.14.3", path = "consensus/notify" }
kaspa-consensus-wasm = { version = "0.14.3", path = "consensus/wasm" }
kaspa-consensusmanager = { version = "0.14.3", path = "components/consensusmanager" }
kaspa-core = { version = "0.14.3", path = "core" }
kaspa-daemon = { version = "0.14.3", path = "daemon" }
kaspa-database = { version = "0.14.3", path = "database" }
kaspa-grpc-client = { version = "0.14.3", path = "rpc/grpc/client" }
kaspa-grpc-core = { version = "0.14.3", path = "rpc/grpc/core" }
kaspa-grpc-server = { version = "0.14.3", path = "rpc/grpc/server" }
kaspa-hashes = { version = "0.14.3", path = "crypto/hashes" }
kaspa-index-core = { version = "0.14.3", path = "indexes/core" }
kaspa-index-processor = { version = "0.14.3", path = "indexes/processor" }
kaspa-math = { version = "0.14.3", path = "math" }
kaspa-merkle = { version = "0.14.3", path = "crypto/merkle" }
kaspa-metrics-core = { version = "0.14.3", path = "metrics/core" }
kaspa-mining = { version = "0.14.3", path = "mining" }
kaspa-mining-errors = { version = "0.14.3", path = "mining/errors" }
kaspa-muhash = { version = "0.14.3", path = "crypto/muhash" }
kaspa-notify = { version = "0.14.3", path = "notify" }
kaspa-p2p-flows = { version = "0.14.3", path = "protocol/flows" }
kaspa-p2p-lib = { version = "0.14.3", path = "protocol/p2p" }
kaspa-perf-monitor = { version = "0.14.3", path = "metrics/perf_monitor" }
kaspa-pow = { version = "0.14.3", path = "consensus/pow" }
kaspa-rpc-core = { version = "0.14.3", path = "rpc/core" }
kaspa-rpc-macros = { version = "0.14.3", path = "rpc/macros" }
kaspa-rpc-service = { version = "0.14.3", path = "rpc/service" }
kaspa-txscript = { version = "0.14.3", path = "crypto/txscript" }
kaspa-txscript-errors = { version = "0.14.3", path = "crypto/txscript/errors" }
kaspa-utils = { version = "0.14.3", path = "utils" }
kaspa-utils-tower = { version = "0.14.3", path = "utils/tower" }
kaspa-utxoindex = { version = "0.14.3", path = "indexes/utxoindex" }
kaspa-wallet = { version = "0.14.3", path = "wallet/native" }
kaspa-wallet-cli-wasm = { version = "0.14.3", path = "wallet/wasm" }
kaspa-wallet-keys = { version = "0.14.3", path = "wallet/keys" }
kaspa-wallet-pskt = { version = "0.14.3", path = "wallet/pskt" }
kaspa-wallet-core = { version = "0.14.3", path = "wallet/core" }
kaspa-wallet-macros = { version = "0.14.3", path = "wallet/macros" }
kaspa-wasm = { version = "0.14.3", path = "wasm" }
kaspa-wasm-core = { version = "0.14.3", path = "wasm/core" }
kaspa-wrpc-client = { version = "0.14.3", path = "rpc/wrpc/client" }
kaspa-wrpc-proxy = { version = "0.14.3", path = "rpc/wrpc/proxy" }
kaspa-wrpc-server = { version = "0.14.3", path = "rpc/wrpc/server" }
kaspa-wrpc-wasm = { version = "0.14.3", path = "rpc/wrpc/wasm" }
kaspa-wrpc-example-subscriber = { version = "0.14.3", path = "rpc/wrpc/examples/subscriber" }
kaspad = { version = "0.14.3", path = "kaspad" }
kaspa-alloc = { version = "0.14.3", path = "utils/alloc" }
# kaspa-testing-integration = { version = "0.14.5", path = "testing/integration" }
kaspa-addresses = { version = "0.14.5", path = "crypto/addresses" }
kaspa-addressmanager = { version = "0.14.5", path = "components/addressmanager" }
kaspa-bip32 = { version = "0.14.5", path = "wallet/bip32" }
kaspa-cli = { version = "0.14.5", path = "cli" }
kaspa-connectionmanager = { version = "0.14.5", path = "components/connectionmanager" }
kaspa-consensus = { version = "0.14.5", path = "consensus" }
kaspa-consensus-core = { version = "0.14.5", path = "consensus/core" }
kaspa-consensus-client = { version = "0.14.5", path = "consensus/client" }
kaspa-consensus-notify = { version = "0.14.5", path = "consensus/notify" }
kaspa-consensus-wasm = { version = "0.14.5", path = "consensus/wasm" }
kaspa-consensusmanager = { version = "0.14.5", path = "components/consensusmanager" }
kaspa-core = { version = "0.14.5", path = "core" }
kaspa-daemon = { version = "0.14.5", path = "daemon" }
kaspa-database = { version = "0.14.5", path = "database" }
kaspa-grpc-client = { version = "0.14.5", path = "rpc/grpc/client" }
kaspa-grpc-core = { version = "0.14.5", path = "rpc/grpc/core" }
kaspa-grpc-server = { version = "0.14.5", path = "rpc/grpc/server" }
kaspa-hashes = { version = "0.14.5", path = "crypto/hashes" }
kaspa-index-core = { version = "0.14.5", path = "indexes/core" }
kaspa-index-processor = { version = "0.14.5", path = "indexes/processor" }
kaspa-math = { version = "0.14.5", path = "math" }
kaspa-merkle = { version = "0.14.5", path = "crypto/merkle" }
kaspa-metrics-core = { version = "0.14.5", path = "metrics/core" }
kaspa-mining = { version = "0.14.5", path = "mining" }
kaspa-mining-errors = { version = "0.14.5", path = "mining/errors" }
kaspa-muhash = { version = "0.14.5", path = "crypto/muhash" }
kaspa-notify = { version = "0.14.5", path = "notify" }
kaspa-p2p-flows = { version = "0.14.5", path = "protocol/flows" }
kaspa-p2p-lib = { version = "0.14.5", path = "protocol/p2p" }
kaspa-perf-monitor = { version = "0.14.5", path = "metrics/perf_monitor" }
kaspa-pow = { version = "0.14.5", path = "consensus/pow" }
kaspa-rpc-core = { version = "0.14.5", path = "rpc/core" }
kaspa-rpc-macros = { version = "0.14.5", path = "rpc/macros" }
kaspa-rpc-service = { version = "0.14.5", path = "rpc/service" }
kaspa-txscript = { version = "0.14.5", path = "crypto/txscript" }
kaspa-txscript-errors = { version = "0.14.5", path = "crypto/txscript/errors" }
kaspa-utils = { version = "0.14.5", path = "utils" }
kaspa-utils-tower = { version = "0.14.5", path = "utils/tower" }
kaspa-utxoindex = { version = "0.14.5", path = "indexes/utxoindex" }
kaspa-wallet = { version = "0.14.5", path = "wallet/native" }
kaspa-wallet-cli-wasm = { version = "0.14.5", path = "wallet/wasm" }
kaspa-wallet-keys = { version = "0.14.5", path = "wallet/keys" }
kaspa-wallet-pskt = { version = "0.14.5", path = "wallet/pskt" }
kaspa-wallet-core = { version = "0.14.5", path = "wallet/core" }
kaspa-wallet-macros = { version = "0.14.5", path = "wallet/macros" }
kaspa-wasm = { version = "0.14.5", path = "wasm" }
kaspa-wasm-core = { version = "0.14.5", path = "wasm/core" }
kaspa-wrpc-client = { version = "0.14.5", path = "rpc/wrpc/client" }
kaspa-wrpc-proxy = { version = "0.14.5", path = "rpc/wrpc/proxy" }
kaspa-wrpc-server = { version = "0.14.5", path = "rpc/wrpc/server" }
kaspa-wrpc-wasm = { version = "0.14.5", path = "rpc/wrpc/wasm" }
kaspa-wrpc-example-subscriber = { version = "0.14.5", path = "rpc/wrpc/examples/subscriber" }
kaspad = { version = "0.14.5", path = "kaspad" }
kaspa-alloc = { version = "0.14.5", path = "utils/alloc" }

# external
aes = "0.8.3"
Expand Down
4 changes: 4 additions & 0 deletions consensus/core/src/api/counters.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ pub struct ProcessingCounters {
pub body_counts: AtomicU64,
pub txs_counts: AtomicU64,
pub chain_block_counts: AtomicU64,
pub chain_disqualified_counts: AtomicU64,
pub mass_counts: AtomicU64,
}

Expand All @@ -22,6 +23,7 @@ impl ProcessingCounters {
body_counts: self.body_counts.load(Ordering::Relaxed),
txs_counts: self.txs_counts.load(Ordering::Relaxed),
chain_block_counts: self.chain_block_counts.load(Ordering::Relaxed),
chain_disqualified_counts: self.chain_disqualified_counts.load(Ordering::Relaxed),
mass_counts: self.mass_counts.load(Ordering::Relaxed),
}
}
Expand All @@ -36,6 +38,7 @@ pub struct ProcessingCountersSnapshot {
pub body_counts: u64,
pub txs_counts: u64,
pub chain_block_counts: u64,
pub chain_disqualified_counts: u64,
pub mass_counts: u64,
}

Expand All @@ -51,6 +54,7 @@ impl core::ops::Sub for &ProcessingCountersSnapshot {
body_counts: self.body_counts.saturating_sub(rhs.body_counts),
txs_counts: self.txs_counts.saturating_sub(rhs.txs_counts),
chain_block_counts: self.chain_block_counts.saturating_sub(rhs.chain_block_counts),
chain_disqualified_counts: self.chain_disqualified_counts.saturating_sub(rhs.chain_disqualified_counts),
mass_counts: self.mass_counts.saturating_sub(rhs.mass_counts),
}
}
Expand Down
4 changes: 4 additions & 0 deletions consensus/core/src/errors/block.rs
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,10 @@ pub enum RuleError {

#[error("DAA window data has only {0} entries")]
InsufficientDaaWindowSize(usize),

/// Currently this error is never created because it is impossible to submit such a block
#[error("cannot add block body to a pruned block")]
PrunedBlock,
}

pub type BlockProcessResult<T> = std::result::Result<T, RuleError>;
Original file line number Diff line number Diff line change
Expand Up @@ -14,14 +14,7 @@ impl BlockBodyProcessor {
pub fn validate_body_in_context(self: &Arc<Self>, block: &Block) -> BlockProcessResult<()> {
self.check_parent_bodies_exist(block)?;
self.check_coinbase_blue_score_and_subsidy(block)?;
self.check_block_transactions_in_context(block)?;
self.check_block_is_not_pruned(block)
}

fn check_block_is_not_pruned(self: &Arc<Self>, _block: &Block) -> BlockProcessResult<()> {
// TODO: In kaspad code it checks that the block is not in the past of the current tips.
// We should decide what's the best indication that a block was pruned.
Ok(())
self.check_block_transactions_in_context(block)
}

fn check_block_transactions_in_context(self: &Arc<Self>, block: &Block) -> BlockProcessResult<()> {
Expand All @@ -36,12 +29,6 @@ impl BlockBodyProcessor {
}

fn check_parent_bodies_exist(self: &Arc<Self>, block: &Block) -> BlockProcessResult<()> {
// TODO: Skip this check for blocks in PP anticone that comes as part of the pruning proof.

if block.header.direct_parents().len() == 1 && block.header.direct_parents()[0] == self.genesis.hash {
return Ok(());
}

let statuses_read_guard = self.statuses_store.read();
let missing: Vec<Hash> = block
.header
Expand Down
4 changes: 1 addition & 3 deletions consensus/src/pipeline/body_processor/processor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -201,8 +201,7 @@ impl BlockBodyProcessor {
// transactions that fits the merkle root.
// PrunedBlock - PrunedBlock is an error that rejects a block body and
// not the block as a whole, so we shouldn't mark it as invalid.
// TODO: implement the last part.
if !matches!(e, RuleError::BadMerkleRoot(_, _) | RuleError::MissingParents(_)) {
if !matches!(e, RuleError::BadMerkleRoot(_, _) | RuleError::MissingParents(_) | RuleError::PrunedBlock) {
self.statuses_store.write().set(block.hash(), BlockStatus::StatusInvalid).unwrap();
}
return Err(e);
Expand All @@ -226,7 +225,6 @@ impl BlockBodyProcessor {
fn validate_body(self: &Arc<BlockBodyProcessor>, block: &Block, is_trusted: bool) -> BlockProcessResult<u64> {
let mass = self.validate_body_in_isolation(block)?;
if !is_trusted {
// TODO: Check that it's safe to skip this check if the block is trusted.
self.validate_body_in_context(block)?;
}
Ok(mass)
Expand Down
3 changes: 0 additions & 3 deletions consensus/src/pipeline/header_processor/processor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -308,8 +308,6 @@ impl HeaderProcessor {

// Runs partial header validation for trusted blocks (currently validates only header-in-isolation and computes GHOSTDAG).
fn validate_trusted_header(&self, header: &Arc<Header>) -> BlockProcessResult<HeaderProcessingContext> {
// TODO: For now we skip most validations for trusted blocks, but in the future we should
// employ some validations to avoid spam etc.
let block_level = self.validate_header_in_isolation(header)?;
let mut ctx = self.build_processing_context(header, block_level);
self.ghostdag(&mut ctx);
Expand Down Expand Up @@ -407,7 +405,6 @@ impl HeaderProcessor {
&& reachability::is_chain_ancestor_of(&staging, pp, ctx.hash).unwrap()
{
// Hint reachability about the new tip.
// TODO: identify a disqualified hst and make sure to use sink instead
reachability::hint_virtual_selected_parent(&mut staging, ctx.hash).unwrap();
hst_write.set_batch(&mut batch, SortableBlock::new(ctx.hash, header.blue_work)).unwrap();
}
Expand Down
9 changes: 8 additions & 1 deletion consensus/src/pipeline/monitor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ use kaspa_core::{
service::{AsyncService, AsyncServiceFuture},
tick::{TickReason, TickService},
},
trace,
trace, warn,
};
use std::{
sync::Arc,
Expand Down Expand Up @@ -62,6 +62,13 @@ impl ConsensusMonitor {
if delta.body_counts != 0 { delta.mass_counts as f64 / delta.body_counts as f64 } else{ 0f64 },
);

if delta.chain_disqualified_counts > 0 {
warn!(
"Consensus detected UTXO-invalid blocks which are disqualified from the virtual selected chain (possibly due to inheritance): {} disqualified vs. {} valid chain blocks",
delta.chain_disqualified_counts, delta.chain_block_counts
);
}

last_snapshot = snapshot;
last_log_time = now;
}
Expand Down
16 changes: 11 additions & 5 deletions consensus/src/pipeline/virtual_processor/processor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -383,10 +383,12 @@ impl VirtualStateProcessor {

// Walk back up to the new virtual selected parent candidate
let mut chain_block_counter = 0;
let mut chain_disqualified_counter = 0;
for (selected_parent, current) in self.reachability_service.forward_chain_iterator(split_point, to, true).tuple_windows() {
if selected_parent != diff_point {
// This indicates that the selected parent is disqualified, propagate up and continue
self.statuses_store.write().set(current, StatusDisqualifiedFromChain).unwrap();
chain_disqualified_counter += 1;
continue;
}

Expand Down Expand Up @@ -416,6 +418,7 @@ impl VirtualStateProcessor {
if let Err(rule_error) = res {
info!("Block {} is disqualified from virtual chain: {}", current, rule_error);
self.statuses_store.write().set(current, StatusDisqualifiedFromChain).unwrap();
chain_disqualified_counter += 1;
} else {
debug!("VIRTUAL PROCESSOR, UTXO validated for {current}");

Expand All @@ -434,6 +437,9 @@ impl VirtualStateProcessor {
}
// Report counters
self.counters.chain_block_counts.fetch_add(chain_block_counter, Ordering::Relaxed);
if chain_disqualified_counter > 0 {
self.counters.chain_disqualified_counts.fetch_add(chain_disqualified_counter, Ordering::Relaxed);
}

diff_point
}
Expand Down Expand Up @@ -559,7 +565,7 @@ impl VirtualStateProcessor {
finality_point: Hash,
pruning_point: Hash,
) -> (Hash, VecDeque<Hash>) {
// TODO: tests
// TODO (relaxed): additional tests

let mut heap = tips
.into_iter()
Expand Down Expand Up @@ -621,7 +627,7 @@ impl VirtualStateProcessor {
mut candidates: VecDeque<Hash>,
pruning_point: Hash,
) -> (Vec<Hash>, GhostdagData) {
// TODO: tests
// TODO (relaxed): additional tests

// Mergeset increasing might traverse DAG areas which are below the finality point and which theoretically
// can borderline with pruned data, hence we acquire the prune lock to ensure data consistency. Note that
Expand Down Expand Up @@ -670,7 +676,7 @@ impl VirtualStateProcessor {
MergesetIncreaseResult::Rejected { new_candidate } => {
// If we already have a candidate in the past of new candidate then skip.
if self.reachability_service.is_any_dag_ancestor(&mut candidates.iter().copied(), new_candidate) {
continue; // TODO: not sure this test is needed if candidates invariant as antichain is kept
continue; // TODO (optimization): not sure this check is needed if candidates invariant as antichain is kept
}
// Remove all candidates which are in the future of the new candidate
candidates.retain(|&h| !self.reachability_service.is_dag_ancestor_of(new_candidate, h));
Expand Down Expand Up @@ -860,7 +866,7 @@ impl VirtualStateProcessor {
build_mode: TemplateBuildMode,
) -> Result<BlockTemplate, RuleError> {
//
// TODO: tests
// TODO (relaxed): additional tests
//

// We call for the initial tx batch before acquiring the virtual read lock,
Expand Down Expand Up @@ -1048,7 +1054,7 @@ impl VirtualStateProcessor {
);
}

// TODO: rename to reflect finalizing pruning point utxoset state and importing *to* virtual utxoset
/// Finalizes the pruning point utxoset state and imports the pruning point utxoset *to* virtual utxoset
pub fn import_pruning_point_utxo_set(
&self,
new_pruning_point: Hash,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -267,7 +267,6 @@ impl VirtualStateProcessor {
for i in 0..mutable_tx.tx.inputs.len() {
if mutable_tx.entries[i].is_some() {
// We prefer a previously populated entry if such exists
// TODO: consider re-checking the utxo view to get the most up-to-date entry (since DAA score can change)
continue;
}
if let Some(entry) = utxo_view.get(&mutable_tx.tx.inputs[i].previous_outpoint) {
Expand Down
4 changes: 2 additions & 2 deletions crypto/txscript/src/caches.rs
Original file line number Diff line number Diff line change
Expand Up @@ -87,8 +87,8 @@ impl core::ops::Sub for &TxScriptCacheCountersSnapshot {

fn sub(self, rhs: Self) -> Self::Output {
Self::Output {
insert_counts: self.insert_counts.checked_sub(rhs.insert_counts).unwrap_or_default(),
get_counts: self.get_counts.checked_sub(rhs.get_counts).unwrap_or_default(),
insert_counts: self.insert_counts.saturating_sub(rhs.insert_counts),
get_counts: self.get_counts.saturating_sub(rhs.get_counts),
}
}
}
8 changes: 4 additions & 4 deletions kaspad/src/daemon.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ use kaspa_consensus_core::{
errors::config::{ConfigError, ConfigResult},
};
use kaspa_consensus_notify::{root::ConsensusNotificationRoot, service::NotifyService};
use kaspa_core::{core::Core, info, trace};
use kaspa_core::{core::Core, debug, info};
use kaspa_core::{kaspad_env::version, task::tick::TickService};
use kaspa_database::prelude::CachePolicy;
use kaspa_grpc_server::service::GrpcService;
Expand Down Expand Up @@ -400,10 +400,10 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm
.with_tick_service(tick_service.clone());
let perf_monitor = if args.perf_metrics {
let cb = move |counters: CountersSnapshot| {
trace!("[{}] {}", kaspa_perf_monitor::SERVICE_NAME, counters.to_process_metrics_display());
trace!("[{}] {}", kaspa_perf_monitor::SERVICE_NAME, counters.to_io_metrics_display());
debug!("[{}] {}", kaspa_perf_monitor::SERVICE_NAME, counters.to_process_metrics_display());
debug!("[{}] {}", kaspa_perf_monitor::SERVICE_NAME, counters.to_io_metrics_display());
#[cfg(feature = "heap")]
trace!("[{}] heap stats: {:?}", kaspa_perf_monitor::SERVICE_NAME, dhat::HeapStats::get());
debug!("[{}] heap stats: {:?}", kaspa_perf_monitor::SERVICE_NAME, dhat::HeapStats::get());
};
Arc::new(perf_monitor_builder.with_fetch_cb(cb).build())
} else {
Expand Down
Loading

0 comments on commit f866dfa

Please sign in to comment.