Skip to content

Commit

Permalink
chore: Run clippy pedantic
Browse files Browse the repository at this point in the history
  • Loading branch information
petarvujovic98 committed May 9, 2024
1 parent 3f1d95a commit 2e79158
Show file tree
Hide file tree
Showing 8 changed files with 76 additions and 74 deletions.
8 changes: 4 additions & 4 deletions host/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,13 +25,13 @@ use std::{alloc, collections::HashMap, fmt::Debug, path::PathBuf};

use alloy_primitives::Address;
use alloy_rpc_types::EIP1186AccountProofResponse;
use anyhow::{Context, Result};
use anyhow::Context;
use cap::Cap;
use clap::Parser;
use serde::{Deserialize, Serialize};
use serde_json::Value;

use crate::{error::HostError, request::ProofRequestOpt};
use crate::{error::HostResult, request::ProofRequestOpt};

type MerkleProof = HashMap<Address, EIP1186AccountProofResponse>;

Expand Down Expand Up @@ -104,7 +104,7 @@ pub struct Cli {

impl Cli {
/// Read the options from a file and merge it with the current options.
pub fn merge_from_file(&mut self) -> Result<(), HostError> {
pub fn merge_from_file(&mut self) -> HostResult<()> {
let file = std::fs::File::open(&self.config_path)?;
let reader = std::io::BufReader::new(file);
let mut config: Value = serde_json::from_reader(reader)?;
Expand Down Expand Up @@ -135,7 +135,7 @@ pub struct ProverState {
}

impl ProverState {
pub fn init() -> Result<Self, HostError> {
pub fn init() -> HostResult<Self> {
// Read the command line arguments;
let mut opts = Cli::parse();
// Read the config file.
Expand Down
2 changes: 1 addition & 1 deletion host/src/metrics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ pub fn inc_guest_error(guest: &ProofType, block_id: u64) {
/// let duration = Duration::from_nanos(1_234_567_891);
/// assert_eq!(duration_to_f64(duration), 1.235);
/// ```
pub fn duration_to_f64(d: Duration) -> f64 {
fn duration_to_f64(d: Duration) -> f64 {
(d.as_secs_f64() * 1_000.0).round() / 1_000.0
}

Expand Down
6 changes: 3 additions & 3 deletions host/src/preflight.rs
Original file line number Diff line number Diff line change
Expand Up @@ -131,15 +131,15 @@ pub async fn preflight<BDP: BlockDataProvider>(
)?,
blob_gas_used: block.header.blob_gas_used.map_or_else(
|| Ok(None),
|b: u128| -> Result<Option<u64>, HostError> {
|b: u128| -> HostResult<Option<u64>> {
b.try_into().map(Some).map_err(|_| {
HostError::Conversion("Failed converting blob gas used to u64".to_owned())
})
},
)?,
excess_blob_gas: block.header.excess_blob_gas.map_or_else(
|| Ok(None),
|b: u128| -> Result<Option<u64>, HostError> {
|b: u128| -> HostResult<Option<u64>> {
b.try_into().map(Some).map_err(|_| {
HostError::Conversion("Failed converting excess blob gas to u64".to_owned())
})
Expand Down Expand Up @@ -241,7 +241,7 @@ async fn prepare_taiko_chain_input(

// Decode the anchor tx to find out which L1 blocks we need to fetch
let anchor_tx = match &block.transactions {
BlockTransactions::Full(txs) => txs[0].to_owned(),
BlockTransactions::Full(txs) => txs[0].clone(),
_ => unreachable!(),
};
let anchor_call = decode_anchor(anchor_tx.input.as_ref())?;
Expand Down
19 changes: 10 additions & 9 deletions host/src/provider_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ impl<BDP: BlockDataProvider> ProviderDb<BDP> {
Ok(provider_db)
}

pub async fn get_proofs(&mut self) -> Result<(MerkleProof, MerkleProof, usize), anyhow::Error> {
pub async fn get_proofs(&mut self) -> HostResult<(MerkleProof, MerkleProof, usize)> {
// Latest proof keys
let mut storage_keys = self.initial_db.storage_keys();
for (address, mut indices) in self.current_db.storage_keys() {
Expand Down Expand Up @@ -133,17 +133,18 @@ impl<BDP: BlockDataProvider> ProviderDb<BDP> {
Ok((initial_proofs, latest_proofs, num_storage_proofs))
}

pub async fn get_ancestor_headers(
&mut self,
) -> Result<Vec<AlloyConsensusHeader>, anyhow::Error> {
pub async fn get_ancestor_headers(&mut self) -> HostResult<Vec<AlloyConsensusHeader>> {
let earliest_block = self
.initial_db
.block_hashes
.keys()
.min()
.unwrap_or(&self.block_number);

let mut headers = Vec::with_capacity((self.block_number - *earliest_block) as usize);
let mut headers = Vec::with_capacity(
usize::try_from(self.block_number - *earliest_block)
.map_err(|_| HostError::Conversion("Could not convert u64 to usize".to_owned()))?,
);
for block_number in (*earliest_block..self.block_number).rev() {
if let std::collections::hash_map::Entry::Vacant(e) =
self.initial_headers.entry(block_number)
Expand Down Expand Up @@ -300,7 +301,7 @@ impl<BDP: BlockDataProvider> Database for ProviderDb<BDP> {

impl<BDP: BlockDataProvider> DatabaseCommit for ProviderDb<BDP> {
fn commit(&mut self, changes: HashMap<Address, Account>) {
self.current_db.commit(changes)
self.current_db.commit(changes);
}
}

Expand All @@ -315,7 +316,7 @@ impl<BDP: BlockDataProvider> OptimisticDatabase for ProviderDb<BDP> {

let Ok(accounts) = self
.provider
.get_accounts(&self.pending_accounts.iter().cloned().collect::<Vec<_>>())
.get_accounts(&self.pending_accounts.iter().copied().collect::<Vec<_>>())
.await
else {
return false;
Expand All @@ -330,7 +331,7 @@ impl<BDP: BlockDataProvider> OptimisticDatabase for ProviderDb<BDP> {

let Ok(slots) = self
.provider
.get_storage_values(&self.pending_slots.iter().cloned().collect::<Vec<_>>())
.get_storage_values(&self.pending_slots.iter().copied().collect::<Vec<_>>())
.await
else {
return false;
Expand All @@ -347,7 +348,7 @@ impl<BDP: BlockDataProvider> OptimisticDatabase for ProviderDb<BDP> {
&self
.pending_block_hashes
.iter()
.cloned()
.copied()
.map(|block_number| (block_number, false))
.collect::<Vec<_>>(),
)
Expand Down
74 changes: 37 additions & 37 deletions host/src/raiko.rs
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ impl Raiko {
pub async fn generate_input<BDP: BlockDataProvider>(
&self,
provider: BDP,
) -> Result<GuestInput, HostError> {
) -> HostResult<GuestInput> {
preflight(
provider,
self.request.block_number,
Expand All @@ -66,7 +66,7 @@ impl Raiko {
.map_err(Into::<error::HostError>::into)
}

pub fn get_output(&self, input: &GuestInput) -> Result<GuestOutput, HostError> {
pub fn get_output(&self, input: &GuestInput) -> HostResult<GuestOutput> {
match TaikoStrategy::build_from(input) {
Ok((header, _mpt_node)) => {
info!("Verifying final state using provider data ...");
Expand All @@ -79,48 +79,48 @@ impl Raiko {

// Check against the expected value of all fields for easy debugability
let exp = &input.block_header_reference;
check_eq(exp.parent_hash, header.parent_hash, "base_fee_per_gas");
check_eq(exp.ommers_hash, header.ommers_hash, "ommers_hash");
check_eq(exp.beneficiary, header.beneficiary, "beneficiary");
check_eq(exp.state_root, header.state_root, "state_root");
check_eq(&exp.parent_hash, &header.parent_hash, "base_fee_per_gas");
check_eq(&exp.ommers_hash, &header.ommers_hash, "ommers_hash");
check_eq(&exp.beneficiary, &header.beneficiary, "beneficiary");
check_eq(&exp.state_root, &header.state_root, "state_root");
check_eq(
exp.transactions_root,
header.transactions_root,
&exp.transactions_root,
&header.transactions_root,
"transactions_root",
);
check_eq(exp.receipts_root, header.receipts_root, "receipts_root");
check_eq(&exp.receipts_root, &header.receipts_root, "receipts_root");
check_eq(
exp.withdrawals_root,
header.withdrawals_root,
&exp.withdrawals_root,
&header.withdrawals_root,
"withdrawals_root",
);
check_eq(exp.logs_bloom, header.logs_bloom, "logs_bloom");
check_eq(exp.difficulty, header.difficulty, "difficulty");
check_eq(exp.number, header.number, "number");
check_eq(exp.gas_limit, header.gas_limit, "gas_limit");
check_eq(exp.gas_used, header.gas_used, "gas_used");
check_eq(exp.timestamp, header.timestamp, "timestamp");
check_eq(exp.mix_hash, header.mix_hash, "mix_hash");
check_eq(exp.nonce, header.nonce, "nonce");
check_eq(&exp.logs_bloom, &header.logs_bloom, "logs_bloom");
check_eq(&exp.difficulty, &header.difficulty, "difficulty");
check_eq(&exp.number, &header.number, "number");
check_eq(&exp.gas_limit, &header.gas_limit, "gas_limit");
check_eq(&exp.gas_used, &header.gas_used, "gas_used");
check_eq(&exp.timestamp, &header.timestamp, "timestamp");
check_eq(&exp.mix_hash, &header.mix_hash, "mix_hash");
check_eq(&exp.nonce, &header.nonce, "nonce");
check_eq(
exp.base_fee_per_gas,
header.base_fee_per_gas,
&exp.base_fee_per_gas,
&header.base_fee_per_gas,
"base_fee_per_gas",
);
check_eq(exp.blob_gas_used, header.blob_gas_used, "blob_gas_used");
check_eq(&exp.blob_gas_used, &header.blob_gas_used, "blob_gas_used");
check_eq(
exp.excess_blob_gas,
header.excess_blob_gas,
&exp.excess_blob_gas,
&header.excess_blob_gas,
"excess_blob_gas",
);
check_eq(
exp.parent_beacon_block_root,
header.parent_beacon_block_root,
&exp.parent_beacon_block_root,
&header.parent_beacon_block_root,
"parent_beacon_block_root",
);
check_eq(
exp.extra_data.clone(),
header.extra_data.clone(),
&exp.extra_data.clone(),
&header.extra_data.clone(),
"extra_data",
);

Expand All @@ -147,7 +147,7 @@ impl Raiko {
&self,
input: GuestInput,
output: &GuestOutput,
) -> Result<serde_json::Value, HostError> {
) -> HostResult<serde_json::Value> {
self.request
.proof_type
.run_prover(
Expand All @@ -173,13 +173,13 @@ impl Prover for NativeProver {
_request: &serde_json::Value,
) -> ProverResult<Proof> {
trace!("Running the native prover for input {input:?}");
match output.clone() {
GuestOutput::Success((wrapped_header, _)) => {
assemble_protocol_instance(&input, &wrapped_header.header)
.map_err(|e| ProverError::GuestError(e.to_string()))?;
}
_ => return Err(ProverError::GuestError("Unexpected output".to_owned())),
}

let GuestOutput::Success((wrapped_header, _)) = output.clone() else {
return Err(ProverError::GuestError("Unexpected output".to_owned()));
};

assemble_protocol_instance(&input, &wrapped_header.header)
.map_err(|e| ProverError::GuestError(e.to_string()))?;

to_proof(Ok(NativeResponse {
output: output.clone(),
Expand All @@ -191,7 +191,7 @@ impl Prover for NativeProver {
}
}

fn check_eq<T: std::cmp::PartialEq + std::fmt::Debug>(expected: T, actual: T, message: &str) {
fn check_eq<T: std::cmp::PartialEq + std::fmt::Debug>(expected: &T, actual: &T, message: &str) {
if expected != actual {
warn!("Assertion failed: {message} - Expected: {expected:?}, Found: {actual:?}");
}
Expand Down
27 changes: 14 additions & 13 deletions host/src/request.rs
Original file line number Diff line number Diff line change
Expand Up @@ -212,24 +212,25 @@ pub struct ProverSpecificOpts {
pub risc0: Option<Value>,
}

impl From<ProverSpecificOpts> for HashMap<String, Value> {
impl<S: ::std::hash::BuildHasher + ::std::default::Default> From<ProverSpecificOpts>
for HashMap<String, Value, S>
{
fn from(value: ProverSpecificOpts) -> Self {
HashMap::from_iter(
[
("native", value.native.clone()),
("sgx", value.sgx.clone()),
("sp1", value.sp1.clone()),
("risc0", value.risc0.clone()),
]
.into_iter()
.filter_map(|(name, value)| value.map(|v| (name.to_string(), v))),
)
[
("native", value.native.clone()),
("sgx", value.sgx.clone()),
("sp1", value.sp1.clone()),
("risc0", value.risc0.clone()),
]
.into_iter()
.filter_map(|(name, value)| value.map(|v| (name.to_string(), v)))
.collect()
}
}

impl ProofRequestOpt {
/// Read a partial proof request config from a file.
pub fn from_file<T>(path: T) -> Result<Self, HostError>
pub fn from_file<T>(path: T) -> HostResult<Self>
where
T: AsRef<Path>,
{
Expand All @@ -240,7 +241,7 @@ impl ProofRequestOpt {
}

/// Merge a partial proof request into current one.
pub fn merge(&mut self, other: &Value) -> Result<(), HostError> {
pub fn merge(&mut self, other: &Value) -> HostResult<()> {
let mut this = serde_json::to_value(&self)?;
merge(&mut this, other);
*self = serde_json::from_value(this)?;
Expand Down
8 changes: 4 additions & 4 deletions host/src/rpc_provider.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ impl BlockDataProvider for RpcBlockDataProvider {
let mut batch = self.client.new_batch();
let mut requests = Vec::with_capacity(max_batch_size);

for (block_number, full) in blocks_to_fetch.iter() {
for (block_number, full) in blocks_to_fetch {
requests.push(Box::pin(
batch
.add_call(
Expand All @@ -67,7 +67,7 @@ impl BlockDataProvider for RpcBlockDataProvider {

let mut blocks = Vec::with_capacity(max_batch_size);
// Collect the data from the batch
for request in requests.into_iter() {
for request in requests {
blocks.push(
request
.await
Expand Down Expand Up @@ -199,7 +199,7 @@ impl BlockDataProvider for RpcBlockDataProvider {

let mut values = Vec::with_capacity(max_batch_size);
// Collect the data from the batch
for request in requests.into_iter() {
for request in requests {
values.push(
request
.await
Expand Down Expand Up @@ -296,7 +296,7 @@ impl BlockDataProvider for RpcBlockDataProvider {
.map_err(|_| HostError::RPC("Error sending batch request".to_owned()))?;

// Collect the data from the batch
for request in requests.into_iter() {
for request in requests {
let mut proof = request
.await
.map_err(|_| HostError::RPC("Error collecting request data".to_owned()))?;
Expand Down
6 changes: 3 additions & 3 deletions host/src/server/api/proof.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,14 +45,14 @@ fn set_cached_input(
cache_path: &Option<PathBuf>,
block_number: u64,
network: &str,
input: GuestInput,
input: &GuestInput,
) -> HostResult<()> {
if let Some(dir) = cache_path.as_ref() {
let path = get_input_path(dir, block_number, network);
if !path.exists() {
let file = File::create(&path).map_err(<std::io::Error as Into<HostError>>::into)?;
info!("caching input for {path:?}");
bincode::serialize_into(file, &input).map_err(|e| HostError::Anyhow(e.into()))?;
bincode::serialize_into(file, input).map_err(|e| HostError::Anyhow(e.into()))?;
}
}
Ok(())
Expand Down Expand Up @@ -162,7 +162,7 @@ async fn proof_handler(
&opts.cache_path,
proof_request.block_number,
&proof_request.network.to_string(),
input,
&input,
)
.map_err(|e| {
dec_current_req();
Expand Down

0 comments on commit 2e79158

Please sign in to comment.