Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Allow importing of historical blobs via HTTP API #6656

Open
wants to merge 2 commits into
base: unstable
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 19 additions & 0 deletions beacon_node/http_api/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4395,6 +4395,24 @@ pub fn serve<T: BeaconChainTypes>(
},
);

// POST lighthouse/database/import_blobs
let post_lighthouse_database_import_blobs = database_path
.and(warp::path("import_blobs"))
.and(warp::path::end())
.and(warp_utils::json::json())
.and(task_spawner_filter.clone())
.and(chain_filter.clone())
.then(
|blobs, task_spawner: TaskSpawner<T::EthSpec>, chain: Arc<BeaconChain<T>>| {
task_spawner.blocking_json_task(Priority::P1, move || {
match chain.store.import_historical_blobs(blobs) {
Ok(()) => Ok(()),
Err(e) => Err(warp_utils::reject::custom_server_error(format!("{e:?}"))),
}
})
},
);

// GET lighthouse/analysis/block_rewards
let get_lighthouse_block_rewards = warp::path("lighthouse")
.and(warp::path("analysis"))
Expand Down Expand Up @@ -4752,6 +4770,7 @@ pub fn serve<T: BeaconChainTypes>(
.uor(post_validator_liveness_epoch)
.uor(post_lighthouse_liveness)
.uor(post_lighthouse_database_reconstruct)
.uor(post_lighthouse_database_import_blobs)
.uor(post_lighthouse_block_rewards)
.uor(post_lighthouse_ui_validator_metrics)
.uor(post_lighthouse_ui_validator_info)
Expand Down
1 change: 1 addition & 0 deletions beacon_node/store/src/errors.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ pub enum Error {
AddPayloadLogicError,
InvalidKey,
InvalidBytes,
InvalidBlobImport(String),
InconsistentFork(InconsistentFork),
Hdiff(hdiff::Error),
CacheBuildError(EpochCacheError),
Expand Down
44 changes: 44 additions & 0 deletions beacon_node/store/src/hot_cold_store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,8 @@ use types::data_column_sidecar::{ColumnIndex, DataColumnSidecar, DataColumnSidec
use types::*;
use zstd::{Decoder, Encoder};

const HISTORICAL_BLOB_BATCH_SIZE: usize = 1000;
macladson marked this conversation as resolved.
Show resolved Hide resolved

/// On-disk database that stores finalized states efficiently.
///
/// Stores vector fields like the `block_roots` and `state_roots` separately, and only stores
Expand Down Expand Up @@ -866,6 +868,48 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
Ok(())
}

/// Import historical blobs.
pub fn import_historical_blobs(
&self,
historical_blobs: Vec<(Hash256, BlobSidecarList<E>)>,
) -> Result<(), Error> {
if historical_blobs.is_empty() {
return Ok(());
}

let mut total_imported = 0;

for chunk in historical_blobs.chunks(HISTORICAL_BLOB_BATCH_SIZE) {
let mut ops = Vec::with_capacity(chunk.len());

for (block_root, blobs) in chunk {
// Verify block exists.
if !self.block_exists(block_root)? {
warn!(
self.log,
"Skipping import of blobs; block root does not exist.";
"block_root" => ?block_root,
"num_blobs" => blobs.len(),
);
continue;
}

self.blobs_as_kv_store_ops(block_root, blobs.clone(), &mut ops);
total_imported += blobs.len();
}

self.blobs_db.do_atomically(ops)?;
}

debug!(
self.log,
"Imported historical blobs.";
"total_imported" => total_imported,
);

Ok(())
}

pub fn blobs_as_kv_store_ops(
&self,
key: &Hash256,
Expand Down
Loading