Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Ozgun upgrade polkadot sdk v1.13.0 #293

Closed
wants to merge 10 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3,690 changes: 2,013 additions & 1,677 deletions evm-template/Cargo.lock

Large diffs are not rendered by default.

246 changes: 123 additions & 123 deletions evm-template/Cargo.toml

Large diffs are not rendered by default.

15 changes: 7 additions & 8 deletions evm-template/node/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,10 @@ clap = { workspace = true }
futures = { workspace = true }
hex = { workspace = true }
hex-literal = { workspace = true }
jsonrpsee = { workspace = true, features = [ "server" ] }
jsonrpsee = { workspace = true, features = ["server"] }
log = { workspace = true }
parity-scale-codec = { workspace = true }
serde = { workspace = true, features = [ "derive" ] }
serde = { workspace = true, features = ["derive"] }
serde_derive = { workspace = true }
serde_json = { workspace = true }

Expand Down Expand Up @@ -59,7 +59,7 @@ substrate-frame-rpc-system = { workspace = true }
substrate-prometheus-endpoint = { workspace = true }

# Polkadot
polkadot-cli = { workspace = true, features = [ "rococo-native" ] }
polkadot-cli = { workspace = true, features = ["rococo-native"] }
polkadot-primitives = { workspace = true }
xcm = { workspace = true }

Expand All @@ -78,12 +78,12 @@ cumulus-relay-chain-interface = { workspace = true }
# Frontier
fc-api = { workspace = true }
fc-consensus = { workspace = true }
fc-db = { workspace = true, features = [ "rocksdb" ] }
fc-mapping-sync = { workspace = true, features = [ "sql" ] }
fc-db = { workspace = true, features = ["rocksdb"] }
fc-mapping-sync = { workspace = true, features = ["sql"] }
fc-rpc = { workspace = true }
fc-rpc-core = { workspace = true }
fc-storage = { workspace = true }
fp-dynamic-fee = { workspace = true, features = [ "std" ] }
fp-dynamic-fee = { workspace = true, features = ["std"] }
fp-evm = { workspace = true }
fp-rpc = { workspace = true }

Expand All @@ -93,7 +93,6 @@ substrate-build-script-utils = { workspace = true }
[features]
default = []
async-backing = []
experimental = []
runtime-benchmarks = [
"evm-runtime-template/runtime-benchmarks",
"frame-benchmarking-cli/runtime-benchmarks",
Expand All @@ -108,7 +107,7 @@ try-runtime = [
"polkadot-cli/try-runtime",
"sp-runtime/try-runtime",
]
txpool = [ "fc-rpc/txpool" ]
txpool = ["fc-rpc/txpool"]

[lints]
workspace = true
3 changes: 1 addition & 2 deletions evm-template/node/src/chain_spec.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,7 @@ use sp_runtime::traits::{IdentifyAccount, Verify};
use crate::contracts::{parse_contracts, ContractsPath};

/// Specialized `ChainSpec` for the normal parachain runtime.
pub type ChainSpec =
sc_service::GenericChainSpec<evm_runtime_template::RuntimeGenesisConfig, Extensions>;
pub type ChainSpec = sc_service::GenericChainSpec<Extensions>;

/// The default XCM version to set in genesis config.
const SAFE_XCM_VERSION: u32 = xcm::prelude::XCM_VERSION;
Expand Down
2 changes: 1 addition & 1 deletion evm-template/node/src/command.rs
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@ pub fn run() -> Result<()> {
match cmd {
BenchmarkCmd::Pallet(cmd) =>
if cfg!(feature = "runtime-benchmarks") {
runner.sync_run(|config| cmd.run::<sp_runtime::traits::HashingFor<Block>, ReclaimHostFunctions>(config))
runner.sync_run(|config| cmd.run_with_spec::<sp_runtime::traits::HashingFor<Block>, ReclaimHostFunctions>(Some(config.chain_spec)))
} else {
Err("Benchmarking wasn't enabled when building the node. \
You can enable it with `--features runtime-benchmarks`."
Expand Down
38 changes: 22 additions & 16 deletions evm-template/node/src/eth.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,13 @@ use std::{

// Local
use evm_runtime_template::opaque::Block;
use fc_rpc::{EthTask, OverrideHandle};
use fc_rpc::EthTask;
pub use fc_rpc_core::types::{FeeHistoryCache, FeeHistoryCacheLimit, FilterPool};
pub use fc_storage::{StorageOverride, StorageOverrideHandler};
use futures::{future, prelude::*};
// Substrate
use sc_client_api::BlockchainEvents;
use sc_executor::WasmExecutor;
use sc_executor::{HostFunctions, WasmExecutor};
use sc_network_sync::SyncingService;
use sc_service::{error::Error as ServiceError, Configuration, TaskManager};
use sp_api::ConstructRuntimeApi;
Expand All @@ -24,7 +25,7 @@ pub type FullClient<RuntimeApi, Executor> =
sc_service::TFullClient<Block, RuntimeApi, WasmExecutor<Executor>>;

/// Frontier DB backend type.
pub type FrontierBackend = fc_db::Backend<Block>;
pub type FrontierBackend<C> = fc_db::Backend<Block, C>;

pub fn db_config_dir(config: &Configuration) -> PathBuf {
config.base_path.config_dir(config.chain_spec.id())
Expand Down Expand Up @@ -124,13 +125,13 @@ impl<Api> EthCompatRuntimeApiCollection for Api where
{
}

pub async fn spawn_frontier_tasks<RuntimeApi, Functions>(
pub async fn spawn_frontier_tasks<RuntimeApi, Executor>(
task_manager: &TaskManager,
client: Arc<FullClient<RuntimeApi, Functions>>,
client: Arc<FullClient<RuntimeApi, Executor>>,
backend: Arc<FullBackend>,
frontier_backend: FrontierBackend,
frontier_backend: Arc<FrontierBackend<FullClient<RuntimeApi, Executor>>>,
filter_pool: Option<FilterPool>,
overrides: Arc<OverrideHandle<Block>>,
storage_override: Arc<dyn StorageOverride<Block>>,
fee_history_cache: FeeHistoryCache,
fee_history_cache_limit: FeeHistoryCacheLimit,
sync: Arc<SyncingService<Block>>,
Expand All @@ -140,13 +141,13 @@ pub async fn spawn_frontier_tasks<RuntimeApi, Functions>(
>,
>,
) where
RuntimeApi: ConstructRuntimeApi<Block, FullClient<RuntimeApi, Functions>>,
RuntimeApi: ConstructRuntimeApi<Block, FullClient<RuntimeApi, Executor>>,
RuntimeApi: Send + Sync + 'static,
RuntimeApi::RuntimeApi: EthCompatRuntimeApiCollection,
Functions: sc_executor::sp_wasm_interface::HostFunctions,
Executor: HostFunctions + 'static,
{
// Spawn main mapping sync worker background task.
match frontier_backend {
match &*frontier_backend {
fc_db::Backend::KeyValue(b) => {
task_manager.spawn_essential_handle().spawn(
"frontier-mapping-sync-worker",
Expand All @@ -156,10 +157,10 @@ pub async fn spawn_frontier_tasks<RuntimeApi, Functions>(
Duration::new(6, 0),
client.clone(),
backend,
overrides.clone(),
Arc::new(b),
storage_override.clone(),
b.clone(),
3,
0,
0u32,
fc_mapping_sync::SyncStrategy::Normal,
sync,
pubsub_notification_sinks,
Expand All @@ -174,10 +175,10 @@ pub async fn spawn_frontier_tasks<RuntimeApi, Functions>(
fc_mapping_sync::sql::SyncWorker::run(
client.clone(),
backend,
Arc::new(b),
b.clone(),
client.import_notification_stream(),
fc_mapping_sync::sql::SyncWorkerConfig {
read_notification_timeout: Duration::from_secs(10),
read_notification_timeout: Duration::from_secs(30),
check_indexed_blocks_interval: Duration::from_secs(60),
},
fc_mapping_sync::SyncStrategy::Parachain,
Expand All @@ -203,6 +204,11 @@ pub async fn spawn_frontier_tasks<RuntimeApi, Functions>(
task_manager.spawn_essential_handle().spawn(
"frontier-fee-history",
Some("frontier"),
EthTask::fee_history_task(client, overrides, fee_history_cache, fee_history_cache_limit),
EthTask::fee_history_task(
client,
storage_override,
fee_history_cache,
fee_history_cache_limit,
),
);
}
10 changes: 5 additions & 5 deletions evm-template/node/src/rpc/eth.rs
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
use std::{collections::BTreeMap, sync::Arc};

// Frontier
pub use fc_rpc::{EthBlockDataCacheTask, EthConfig, OverrideHandle};
pub use fc_rpc::{EthBlockDataCacheTask, EthConfig};
pub use fc_rpc_core::types::{FeeHistoryCache, FeeHistoryCacheLimit, FilterPool};
pub use fc_storage::overrides_handle;
pub use fc_storage::StorageOverride;
use fp_rpc::{ConvertTransaction, ConvertTransactionRuntimeApi, EthereumRuntimeRPCApi};
use jsonrpsee::RpcModule;
// Substrate
Expand All @@ -12,7 +12,7 @@ use sc_client_api::{
client::BlockchainEvents,
AuxStore, UsageProvider,
};
use sc_network::NetworkService;
use sc_network::service::traits::NetworkService;
use sc_network_sync::SyncingService;
use sc_rpc::SubscriptionTaskExecutor;
use sc_transaction_pool::{ChainApi, Pool};
Expand Down Expand Up @@ -40,13 +40,13 @@ pub struct EthDeps<B: BlockT, C, P, A: ChainApi, CT, CIDP> {
/// Whether to enable dev signer
pub enable_dev_signer: bool,
/// Network service
pub network: Arc<NetworkService<B, B::Hash>>,
pub network: Arc<dyn NetworkService>,
/// Chain syncing service
pub sync: Arc<SyncingService<B>>,
/// Frontier Backend.
pub frontier_backend: Arc<dyn fc_api::Backend<B>>,
/// Ethereum data access overrides.
pub overrides: Arc<OverrideHandle<B>>,
pub overrides: Arc<dyn StorageOverride<B>>,
/// Cache for Ethereum block data.
pub block_data_cache: Arc<EthBlockDataCacheTask<B>>,
/// EthFilterApi pool.
Expand Down
2 changes: 1 addition & 1 deletion evm-template/node/src/rpc/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ use sp_consensus_aura::sr25519::AuthorityId as AuraId;
use sp_inherents::CreateInherentDataProviders;
use sp_runtime::traits::Block as BlockT;

pub use self::eth::{overrides_handle, EthDeps};
pub use self::eth::EthDeps;
use crate::rpc::eth::create_eth;

/// A type representing all RPC extensions.
Expand Down
56 changes: 28 additions & 28 deletions evm-template/node/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,26 +28,26 @@ use frame_benchmarking_cli::SUBSTRATE_REFERENCE_HARDWARE;
use sc_client_api::Backend;
use sc_consensus::ImportQueue;
use sc_executor::WasmExecutor;
use sc_network::NetworkBlock;
use sc_network::{config::FullNetworkConfiguration, NetworkBlock};
use sc_network_sync::SyncingService;
use sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, TaskManager};
use sc_telemetry::{Telemetry, TelemetryHandle, TelemetryWorker, TelemetryWorkerHandle};
use sc_transaction_pool_api::OffchainTransactionPoolFactory;
use sp_core::U256;
use sp_core::{H256, U256};
use sp_keystore::KeystorePtr;
use substrate_prometheus_endpoint::Registry;

use crate::eth::{
db_config_dir, new_frontier_partial, spawn_frontier_tasks, BackendType, EthConfiguration,
FrontierBackend, FrontierPartialComponents,
FrontierBackend, FrontierPartialComponents, StorageOverrideHandler,
};

#[cfg(not(feature = "runtime-benchmarks"))]
type HostFunctions =
pub type HostFunctions =
(sp_io::SubstrateHostFunctions, cumulus_client_service::storage_proof_size::HostFunctions);

#[cfg(feature = "runtime-benchmarks")]
type HostFunctions = (
pub type HostFunctions = (
sp_io::SubstrateHostFunctions,
cumulus_client_service::storage_proof_size::HostFunctions,
frame_benchmarking::benchmarking::HostFunctions,
Expand All @@ -72,8 +72,9 @@ pub type Service = PartialComponents<
ParachainBlockImport,
Option<Telemetry>,
Option<TelemetryWorkerHandle>,
FrontierBackend,
Arc<fc_rpc::OverrideHandle<Block>>,
// TODO: I used `ParachainExecutor` in here, but in upstream frontier, they are using an `Executor` generic which depends on `NativeExecutionDispatch`.
FrontierBackend<ParachainClient>,
Arc<dyn fc_storage::StorageOverride<Block>>,
),
>;

Expand Down Expand Up @@ -132,14 +133,14 @@ pub fn new_partial(
&task_manager,
)?;

let overrides = crate::rpc::overrides_handle(client.clone());
let storage_override = Arc::new(StorageOverrideHandler::new(client.clone()));

let frontier_backend = match eth_config.frontier_backend_type {
BackendType::KeyValue => FrontierBackend::KeyValue(fc_db::kv::Backend::open(
BackendType::KeyValue => FrontierBackend::KeyValue(Arc::new(fc_db::kv::Backend::open(
Arc::clone(&client),
&config.database,
&db_config_dir(config),
)?),
)?)),
BackendType::Sql => {
let db_path = db_config_dir(config).join("sql");
std::fs::create_dir_all(&db_path).expect("failed creating sql db directory");
Expand All @@ -156,10 +157,10 @@ pub fn new_partial(
}),
eth_config.frontier_sql_backend_pool_size,
std::num::NonZeroU32::new(eth_config.frontier_sql_backend_num_ops_timeout),
overrides.clone(),
storage_override.clone(),
))
.unwrap_or_else(|err| panic!("failed creating sql backend: {:?}", err));
FrontierBackend::Sql(backend)
FrontierBackend::Sql(Arc::new(backend))
}
};

Expand All @@ -171,7 +172,13 @@ pub fn new_partial(
task_manager,
transaction_pool,
select_chain: (),
other: (block_import, telemetry, telemetry_worker_handle, frontier_backend, overrides),
other: (
block_import,
telemetry,
telemetry_worker_handle,
frontier_backend,
storage_override,
),
})
}

Expand All @@ -198,7 +205,10 @@ async fn start_node_impl(

let (block_import, mut telemetry, telemetry_worker_handle, frontier_backend, overrides) =
params.other;
let net_config = sc_network::config::FullNetworkConfiguration::new(&parachain_config.network);

let frontier_backend = Arc::new(frontier_backend);
let net_config: FullNetworkConfiguration<Block, H256, sc_network::NetworkWorker<_, _>> =
sc_network::config::FullNetworkConfiguration::new(&parachain_config.network);

let client = params.client.clone();
let backend = params.backend.clone();
Expand Down Expand Up @@ -248,7 +258,7 @@ async fn start_node_impl(
transaction_pool: Some(OffchainTransactionPoolFactory::new(
transaction_pool.clone(),
)),
network_provider: network.clone(),
network_provider: Arc::new(network.clone()),
is_validator: parachain_config.role.is_authority(),
enable_http_requests: false,
custom_extensions: move |_| vec![],
Expand Down Expand Up @@ -306,9 +316,9 @@ async fn start_node_impl(
enable_dev_signer,
network: network.clone(),
sync: sync_service.clone(),
frontier_backend: match frontier_backend.clone() {
fc_db::Backend::KeyValue(b) => Arc::new(b),
fc_db::Backend::Sql(b) => Arc::new(b),
frontier_backend: match &*frontier_backend.clone() {
fc_db::Backend::KeyValue(b) => b.clone(),
fc_db::Backend::Sql(b) => b.clone(),
},
overrides: overrides.clone(),
block_data_cache: block_data_cache.clone(),
Expand Down Expand Up @@ -454,8 +464,6 @@ fn build_import_queue(
telemetry: Option<TelemetryHandle>,
task_manager: &TaskManager,
) -> Result<sc_consensus::DefaultImportQueue<Block>, sc_service::Error> {
let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?;

Ok(cumulus_client_consensus_aura::equivocation_import_queue::fully_verifying_import_queue::<
sp_consensus_aura::sr25519::AuthorityPair,
_,
Expand All @@ -469,7 +477,6 @@ fn build_import_queue(
let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
Ok(timestamp)
},
slot_duration,
&task_manager.spawn_essential_handle(),
config.prometheus_registry(),
telemetry,
Expand Down Expand Up @@ -498,11 +505,6 @@ fn start_consensus(
#[cfg(feature = "async-backing")]
use cumulus_client_consensus_aura::collators::lookahead::{self as aura, Params};

// NOTE: because we use Aura here explicitly, we can use
// `CollatorSybilResistance::Resistant` when starting the network.
#[cfg(not(feature = "async-backing"))]
let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?;

let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording(
task_manager.spawn_handle(),
client.clone(),
Expand Down Expand Up @@ -539,8 +541,6 @@ fn start_consensus(
collator_key,
para_id,
overseer_handle,
#[cfg(not(feature = "async-backing"))]
slot_duration,
relay_chain_slot_duration,
proposer,
collator_service,
Expand Down
Loading