Skip to content

Commit

Permalink
Prevent collation fetch reputation drop
Browse files Browse the repository at this point in the history
  • Loading branch information
Crystalin committed Dec 28, 2021
1 parent 0889d36 commit dfd86c0
Show file tree
Hide file tree
Showing 2 changed files with 2 additions and 5 deletions.
5 changes: 1 addition & 4 deletions node/network/collator-protocol/src/validator_side/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1202,7 +1202,7 @@ async fn poll_requests(
span_per_relay_parent: &HashMap<Hash, PerLeafSpan>,
) -> Vec<(PeerId, Rep)> {
let mut retained_requested = HashSet::new();
let mut reputation_changes = Vec::new();
let reputation_changes = Vec::new();
for (pending_collation, per_req) in requested_collations.iter_mut() {
// Despite the await, this won't block on the response itself.
let result =
Expand All @@ -1212,9 +1212,6 @@ async fn poll_requests(
if !result.is_ready() {
retained_requested.insert(pending_collation.clone());
}
if let CollationFetchResult::Error(rep) = result {
reputation_changes.push((pending_collation.peer_id.clone(), rep));
}
}
requested_collations.retain(|k, _| retained_requested.contains(k));
reputation_changes
Expand Down
2 changes: 1 addition & 1 deletion node/network/protocol/src/request_response/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ pub const CHUNK_REQUEST_TIMEOUT: Duration = DEFAULT_REQUEST_TIMEOUT_CONNECTED;
/// This timeout is based on what seems sensible from a time budget perspective, considering 6
/// second block time. This is going to be tough, if we have multiple forks and large PoVs, but we
/// only have so much time.
const POV_REQUEST_TIMEOUT_CONNECTED: Duration = Duration::from_millis(1000);
const POV_REQUEST_TIMEOUT_CONNECTED: Duration = Duration::from_millis(1500);

/// We want timeout statement requests fast, so we don't waste time on slow nodes. Responders will
/// try their best to either serve within that timeout or return an error immediately. (We need to
Expand Down

0 comments on commit dfd86c0

Please sign in to comment.