Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix shiden genesis sync #1242

Merged
merged 5 commits into from
May 16, 2024
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 2 additions & 16 deletions bin/collator/src/local/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -368,15 +368,10 @@ pub fn start_node(
enable_evm_rpc: true, // enable EVM RPC for dev node by default
};

let pending_consensus_data_provider = Box::new(
fc_rpc::pending::AuraConsensusDataProvider::new(client.clone()),
);

crate::rpc::create_full(
deps,
subscription,
pubsub_notification_sinks.clone(),
pending_consensus_data_provider,
rpc_config.clone(),
)
.map_err::<ServiceError, _>(Into::into)
Expand Down Expand Up @@ -656,17 +651,8 @@ pub fn start_node(config: Configuration) -> Result<TaskManager, ServiceError> {
enable_evm_rpc: true, // enable EVM RPC for dev node by default
};

let pending_consensus_data_provider = Box::new(
fc_rpc::pending::AuraConsensusDataProvider::new(client.clone()),
);

crate::rpc::create_full(
deps,
subscription,
pubsub_notification_sinks.clone(),
pending_consensus_data_provider,
)
.map_err::<ServiceError, _>(Into::into)
crate::rpc::create_full(deps, subscription, pubsub_notification_sinks.clone())
.map_err::<ServiceError, _>(Into::into)
})
};

Expand Down
18 changes: 2 additions & 16 deletions bin/collator/src/parachain/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -494,17 +494,8 @@ where
enable_evm_rpc: additional_config.enable_evm_rpc,
};

let pending_consensus_data_provider = Box::new(
fc_rpc::pending::AuraConsensusDataProvider::new(client.clone()),
);

crate::rpc::create_full(
deps,
subscription,
pubsub_notification_sinks.clone(),
pending_consensus_data_provider,
)
.map_err(Into::into)
crate::rpc::create_full(deps, subscription, pubsub_notification_sinks.clone())
.map_err(Into::into)
})
};

Expand Down Expand Up @@ -845,15 +836,10 @@ where
enable_evm_rpc: additional_config.enable_evm_rpc,
};

let pending_consensus_data_provider = Box::new(
fc_rpc::pending::AuraConsensusDataProvider::new(client.clone()),
);

crate::rpc::create_full(
deps,
subscription,
pubsub_notification_sinks.clone(),
pending_consensus_data_provider,
rpc_config.clone(),
)
.map_err(Into::into)
Expand Down
202 changes: 149 additions & 53 deletions bin/collator/src/rpc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@

//! Astar RPCs implementation.

use cumulus_primitives_parachain_inherent::ParachainInherentData;
use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder;
use fc_rpc::{
pending::ConsensusDataProvider, Eth, EthApiServer, EthBlockDataCacheTask, EthFilter,
Expand All @@ -38,14 +37,23 @@ use sc_rpc::dev::DevApiServer;
pub use sc_rpc::{DenyUnsafe, SubscriptionTaskExecutor};
use sc_transaction_pool::{ChainApi, Pool};
use sc_transaction_pool_api::TransactionPool;
use sp_api::{CallApiAt, ProvideRuntimeApi};
use sp_api::{ApiExt, CallApiAt, ProvideRuntimeApi};
use sp_block_builder::BlockBuilder;
use sp_blockchain::{
Backend as BlockchainBackend, Error as BlockChainError, HeaderBackend, HeaderMetadata,
};
use sp_consensus_aura::{sr25519::AuthorityId as AuraId, AuraApi};
use sp_runtime::traits::BlakeTwo256;
use std::sync::Arc;
use sp_consensus_aura::{
digests::CompatibleDigestItem,
sr25519::{AuthorityId as AuraId, AuthoritySignature},
AuraApi,
};
use sp_inherents::{CreateInherentDataProviders, Error, InherentData};
use sp_runtime::{
traits::{BlakeTwo256, Block as BlockT, Header},
Digest, DigestItem,
};
use sp_timestamp::TimestampInherentData;
use std::{marker::PhantomData, sync::Arc};
use substrate_frame_rpc_system::{System, SystemApiServer};

#[cfg(feature = "evm-tracing")]
Expand Down Expand Up @@ -149,7 +157,6 @@ pub fn create_full<C, P, BE, A>(
fc_mapping_sync::EthereumBlockNotification<Block>,
>,
>,
pending_consenus_data_provider: Box<dyn ConsensusDataProvider<Block>>,
tracing_config: EvmTracingConfig,
) -> Result<RpcModule<()>, Box<dyn std::error::Error + Send + Sync>>
where
Expand Down Expand Up @@ -182,12 +189,7 @@ where
let client = Arc::clone(&deps.client);
let graph = Arc::clone(&deps.graph);

let mut io = create_full_rpc(
deps,
subscription_task_executor,
pubsub_notification_sinks,
pending_consenus_data_provider,
)?;
let mut io = create_full_rpc(deps, subscription_task_executor, pubsub_notification_sinks)?;

if tracing_config.enable_txpool {
io.merge(MoonbeamTxPool::new(Arc::clone(&client), graph).into_rpc())?;
Expand Down Expand Up @@ -221,7 +223,6 @@ pub fn create_full<C, P, BE, A>(
fc_mapping_sync::EthereumBlockNotification<Block>,
>,
>,
pending_consenus_data_provider: Box<dyn ConsensusDataProvider<Block>>,
) -> Result<RpcModule<()>, Box<dyn std::error::Error + Send + Sync>>
where
C: ProvideRuntimeApi<Block>
Expand All @@ -248,12 +249,7 @@ where
BE::Blockchain: BlockchainBackend<Block>,
A: ChainApi<Block = Block> + 'static,
{
create_full_rpc(
deps,
subscription_task_executor,
pubsub_notification_sinks,
pending_consenus_data_provider,
)
create_full_rpc(deps, subscription_task_executor, pubsub_notification_sinks)
}

fn create_full_rpc<C, P, BE, A>(
Expand All @@ -264,7 +260,6 @@ fn create_full_rpc<C, P, BE, A>(
fc_mapping_sync::EthereumBlockNotification<Block>,
>,
>,
pending_consenus_data_provider: Box<dyn ConsensusDataProvider<Block>>,
) -> Result<RpcModule<()>, Box<dyn std::error::Error + Send + Sync>>
where
C: ProvideRuntimeApi<Block>
Expand Down Expand Up @@ -319,37 +314,6 @@ where

let no_tx_converter: Option<fp_rpc::NoTransactionConverter> = None;

let slot_duration = sc_consensus_aura::slot_duration(&*client)?;
let pending_create_inherent_data_providers = move |_, _| async move {
let current = sp_timestamp::InherentDataProvider::from_system_time();
let next_slot = current.timestamp().as_millis() + slot_duration.as_millis();
let timestamp = sp_timestamp::InherentDataProvider::new(next_slot.into());
let slot =
sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
*timestamp,
slot_duration,
);
// Create a dummy parachain inherent data provider which is required to pass
// the checks by the para chain system. We use dummy values because in the 'pending context'
// neither do we have access to the real values nor do we need them.
let (relay_parent_storage_root, relay_chain_state) =
RelayStateSproofBuilder::default().into_state_root_and_proof();
let vfp = PersistedValidationData {
// This is a hack to make `cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases`
// happy. Relay parent number can't be bigger than u32::MAX.
relay_parent_number: u32::MAX,
relay_parent_storage_root,
..Default::default()
};
let parachain_inherent_data = ParachainInherentData {
validation_data: vfp,
relay_chain_state,
downward_messages: Default::default(),
horizontal_messages: Default::default(),
};
Ok((slot, timestamp, parachain_inherent_data))
};

io.merge(
Eth::<_, _, _, _, _, _, _, ()>::new(
client.clone(),
Expand All @@ -367,8 +331,10 @@ where
// Allow 10x max allowed weight for non-transactional calls
10,
None,
pending_create_inherent_data_providers,
Some(pending_consenus_data_provider),
PendingCrateInherentDataProvider::new(client.clone()),
Some(Box::new(AuraConsensusDataProviderFallback::new(
client.clone(),
))),
)
.replace_config::<AstarEthConfig<C, BE>>()
.into_rpc(),
Expand Down Expand Up @@ -407,3 +373,133 @@ where

Ok(io)
}

struct AuraConsensusDataProviderFallback<B, C> {
ermalkaleci marked this conversation as resolved.
Show resolved Hide resolved
client: Arc<C>,
phantom_data: PhantomData<B>,
}

impl<B, C> AuraConsensusDataProviderFallback<B, C>
where
B: BlockT,
C: AuxStore + ProvideRuntimeApi<B> + UsageProvider<B> + Send + Sync,
C::Api: AuraApi<B, AuraId>,
{
fn new(client: Arc<C>) -> Self {
Self {
client,
phantom_data: Default::default(),
}
}
}

impl<B, C> ConsensusDataProvider<B> for AuraConsensusDataProviderFallback<B, C>
where
B: BlockT,
C: AuxStore + ProvideRuntimeApi<B> + UsageProvider<B> + Send + Sync,
C::Api: AuraApi<B, AuraId>,
{
fn create_digest(&self, parent: &B::Header, data: &InherentData) -> Result<Digest, Error> {
if self
.client
.runtime_api()
.has_api::<dyn AuraApi<Block, AuraId>>(parent.hash())
.unwrap_or_default()
{
let slot_duration = sc_consensus_aura::slot_duration(&*self.client)
.expect("slot_duration should be present at this point; qed.");
let timestamp = data
.timestamp_inherent_data()?
.expect("Timestamp is always present; qed");

let digest_item =
<DigestItem as CompatibleDigestItem<AuthoritySignature>>::aura_pre_digest(
sp_consensus_aura::Slot::from_timestamp(timestamp, slot_duration),
);

return Ok(Digest {
logs: vec![digest_item],
});
}
Err(Error::Application("AuraApi is not present".into()))
}
}

struct PendingCrateInherentDataProvider<B, C> {
client: Arc<C>,
phantom_data: PhantomData<B>,
}

impl<B, C> PendingCrateInherentDataProvider<B, C>
where
B: BlockT,
C: AuxStore + ProvideRuntimeApi<B> + UsageProvider<B> + Send + Sync,
C::Api: AuraApi<B, AuraId>,
{
fn new(client: Arc<C>) -> Self {
Self {
client,
phantom_data: Default::default(),
}
}
}

#[async_trait::async_trait]
impl<B, C> CreateInherentDataProviders<B, ()> for PendingCrateInherentDataProvider<B, C>
where
B: BlockT,
C: AuxStore + ProvideRuntimeApi<B> + UsageProvider<B> + Send + Sync,
C::Api: AuraApi<B, AuraId>,
{
type InherentDataProviders = (
sp_consensus_aura::inherents::InherentDataProvider,
sp_timestamp::InherentDataProvider,
cumulus_primitives_parachain_inherent::ParachainInherentData,
);

async fn create_inherent_data_providers(
&self,
parent: B::Hash,
_extra_args: (),
) -> Result<Self::InherentDataProviders, Box<dyn std::error::Error + Send + Sync>> {
if !self
.client
.runtime_api()
.has_api::<dyn AuraApi<Block, AuraId>>(parent)
.unwrap_or_default()
{
return Err("AuraApi is not present".into());
}

let slot_duration = sc_consensus_aura::slot_duration(&*self.client)
.expect("slot_duration should be present at this point; qed.");
let current = sp_timestamp::InherentDataProvider::from_system_time();
let next_slot = current.timestamp().as_millis() + slot_duration.as_millis();
let timestamp = sp_timestamp::InherentDataProvider::new(next_slot.into());
let slot =
sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
*timestamp,
slot_duration,
);
// Create a dummy parachain inherent data provider which is required to pass
// the checks by the para chain system. We use dummy values because in the 'pending context'
// neither do we have access to the real values nor do we need them.
let (relay_parent_storage_root, relay_chain_state) =
RelayStateSproofBuilder::default().into_state_root_and_proof();
let vfp = PersistedValidationData {
// This is a hack to make `cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases`
// happy. Relay parent number can't be bigger than u32::MAX.
relay_parent_number: u32::MAX,
relay_parent_storage_root,
..Default::default()
};
let parachain_inherent_data =
cumulus_primitives_parachain_inherent::ParachainInherentData {
validation_data: vfp,
relay_chain_state,
downward_messages: Default::default(),
horizontal_messages: Default::default(),
};
Ok((slot, timestamp, parachain_inherent_data))
}
}
Loading