Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

Commit

Permalink
fmt
Browse files Browse the repository at this point in the history
  • Loading branch information
slumber committed May 24, 2023
1 parent 5ad5706 commit baf1365
Show file tree
Hide file tree
Showing 4 changed files with 62 additions and 66 deletions.
101 changes: 50 additions & 51 deletions node/core/dispute-coordinator/src/initialized.rs
Original file line number Diff line number Diff line change
Expand Up @@ -214,62 +214,61 @@ impl Initialized {
gum::trace!(target: LOG_TARGET, "Waiting for message");
let mut overlay_db = OverlayedBackend::new(backend);
let default_confirm = Box::new(|| Ok(()));
let confirm_write = match MuxedMessage::receive(ctx, &mut self.participation_receiver)
.await?
{
MuxedMessage::Participation(msg) => {
gum::trace!(target: LOG_TARGET, "MuxedMessage::Participation");
let ParticipationStatement {
session,
candidate_hash,
candidate_receipt,
outcome,
} = self.participation.get_participation_result(ctx, msg).await?;
if let Some(valid) = outcome.validity() {
gum::trace!(
target: LOG_TARGET,
?session,
?candidate_hash,
?valid,
"Issuing local statement based on participation outcome."
);
self.issue_local_statement(
ctx,
&mut overlay_db,
let confirm_write =
match MuxedMessage::receive(ctx, &mut self.participation_receiver).await? {
MuxedMessage::Participation(msg) => {
gum::trace!(target: LOG_TARGET, "MuxedMessage::Participation");
let ParticipationStatement {
session,
candidate_hash,
candidate_receipt,
session,
valid,
clock.now(),
)
.await?;
} else {
gum::warn!(target: LOG_TARGET, ?outcome, "Dispute participation failed");
}
default_confirm
},
MuxedMessage::Subsystem(msg) => match msg {
FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(()),
FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update)) => {
gum::trace!(target: LOG_TARGET, "OverseerSignal::ActiveLeaves");
self.process_active_leaves_update(
ctx,
&mut overlay_db,
update,
clock.now(),
)
.await?;
outcome,
} = self.participation.get_participation_result(ctx, msg).await?;
if let Some(valid) = outcome.validity() {
gum::trace!(
target: LOG_TARGET,
?session,
?candidate_hash,
?valid,
"Issuing local statement based on participation outcome."
);
self.issue_local_statement(
ctx,
&mut overlay_db,
candidate_hash,
candidate_receipt,
session,
valid,
clock.now(),
)
.await?;
} else {
gum::warn!(target: LOG_TARGET, ?outcome, "Dispute participation failed");
}
default_confirm
},
FromOrchestra::Signal(OverseerSignal::BlockFinalized(_, n)) => {
gum::trace!(target: LOG_TARGET, "OverseerSignal::BlockFinalized");
self.scraper.process_finalized_block(&n);
default_confirm
MuxedMessage::Subsystem(msg) => match msg {
FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(()),
FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update)) => {
gum::trace!(target: LOG_TARGET, "OverseerSignal::ActiveLeaves");
self.process_active_leaves_update(
ctx,
&mut overlay_db,
update,
clock.now(),
)
.await?;
default_confirm
},
FromOrchestra::Signal(OverseerSignal::BlockFinalized(_, n)) => {
gum::trace!(target: LOG_TARGET, "OverseerSignal::BlockFinalized");
self.scraper.process_finalized_block(&n);
default_confirm
},
FromOrchestra::Communication { msg } =>
self.handle_incoming(ctx, &mut overlay_db, msg, clock.now()).await?,
},
FromOrchestra::Communication { msg } =>
self.handle_incoming(ctx, &mut overlay_db, msg, clock.now()).await?,
},
};
};

if !overlay_db.is_empty() {
let ops = overlay_db.into_write_ops();
Expand Down
6 changes: 3 additions & 3 deletions node/service/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -840,18 +840,18 @@ where
net_config.add_request_response_protocol(cfg);
let (collation_req_v1_receiver, cfg) =
IncomingRequest::get_config_receiver(&req_protocol_names);
net_config.add_request_response_protocol(cfg);
net_config.add_request_response_protocol(cfg);
let (collation_req_vstaging_receiver, cfg) =
IncomingRequest::get_config_receiver(&req_protocol_names);
net_config.add_request_response_protocol(cfg);
net_config.add_request_response_protocol(cfg);
let (available_data_req_receiver, cfg) =
IncomingRequest::get_config_receiver(&req_protocol_names);
net_config.add_request_response_protocol(cfg);
let (statement_req_receiver, cfg) = IncomingRequest::get_config_receiver(&req_protocol_names);
net_config.add_request_response_protocol(cfg);
let (candidate_req_vstaging_receiver, cfg) =
IncomingRequest::get_config_receiver(&req_protocol_names);
net_config.add_request_response_protocol(cfg);
net_config.add_request_response_protocol(cfg);
let (dispute_req_receiver, cfg) = IncomingRequest::get_config_receiver(&req_protocol_names);
net_config.add_request_response_protocol(cfg);

Expand Down
16 changes: 6 additions & 10 deletions runtime/parachains/src/inclusion/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,8 @@
//! to included.
use crate::{
configuration::{self, HostConfiguration}, disputes, dmp, hrmp, paras,
configuration::{self, HostConfiguration},
disputes, dmp, hrmp, paras,
paras_inherent::DisputedBitfield,
scheduler::{self, CoreAssignment},
shared::{self, AllowedRelayParentsTracker},
Expand Down Expand Up @@ -995,9 +996,7 @@ impl<T: Config> Pallet<T> {
))
}

pub(crate) fn relay_dispatch_queue_size(
para_id: ParaId,
) -> (u32, u32) {
pub(crate) fn relay_dispatch_queue_size(para_id: ParaId) -> (u32, u32) {
let fp = T::MessageQueue::footprint(AggregateMessageOrigin::Ump(UmpQueueId::Para(para_id)));
(fp.count as u32, fp.size as u32)
}
Expand All @@ -1023,9 +1022,7 @@ impl<T: Config> Pallet<T> {

let (para_queue_count, mut para_queue_size) = Self::relay_dispatch_queue_size(para);

if para_queue_count.saturating_add(additional_msgs) >
config.max_upward_queue_count
{
if para_queue_count.saturating_add(additional_msgs) > config.max_upward_queue_count {
return Err(UmpAcceptanceCheckErr::CapacityExceeded {
count: para_queue_count.saturating_add(additional_msgs).into(),
limit: config.max_upward_queue_count.into(),
Expand All @@ -1037,14 +1034,13 @@ impl<T: Config> Pallet<T> {
if msg_size > config.max_upward_message_size {
return Err(UmpAcceptanceCheckErr::MessageSize {
idx: idx as u32,
msg_size: msg_size,
msg_size,
max_size: config.max_upward_message_size,
})
}
// make sure that the queue is not overfilled.
// we do it here only once since returning false invalidates the whole relay-chain block.
if para_queue_size.saturating_add(msg_size) > config.max_upward_queue_size
{
if para_queue_size.saturating_add(msg_size) > config.max_upward_queue_size {
return Err(UmpAcceptanceCheckErr::TotalSizeExceeded {
total_size: para_queue_size.saturating_add(msg_size).into(),
limit: config.max_upward_queue_size.into(),
Expand Down
5 changes: 3 additions & 2 deletions runtime/parachains/src/runtime_api_impl/vstaging.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@

//! Put implementations of functions from staging APIs here.
use crate::{configuration, dmp, hrmp, initializer, paras, shared, inclusion};
use crate::{configuration, dmp, hrmp, inclusion, initializer, paras, shared};
use primitives::{
vstaging::{
AsyncBackingParams, BackingState, CandidatePendingAvailability, Constraints,
Expand Down Expand Up @@ -50,7 +50,8 @@ pub fn backing_state<T: initializer::Config>(
Some(block_num).zip(<paras::Pallet<T>>::future_code_hash(para_id))
});

let (ump_msg_count, ump_total_bytes) = <inclusion::Pallet<T>>::relay_dispatch_queue_size(para_id);
let (ump_msg_count, ump_total_bytes) =
<inclusion::Pallet<T>>::relay_dispatch_queue_size(para_id);
let ump_remaining = config.max_upward_queue_count - ump_msg_count;
let ump_remaining_bytes = config.max_upward_queue_size - ump_total_bytes;

Expand Down

0 comments on commit baf1365

Please sign in to comment.