diff --git a/cl/antiquary/antiquary.go b/cl/antiquary/antiquary.go index 7f49882bfcd..806ccbf4bc2 100644 --- a/cl/antiquary/antiquary.go +++ b/cl/antiquary/antiquary.go @@ -50,6 +50,7 @@ type Antiquary struct { downloader proto_downloader.DownloaderClient logger log.Logger sn *freezeblocks.CaplinSnapshots + stateSn *freezeblocks.CaplinStateSnapshots snReader freezeblocks.BeaconSnapshotReader snBuildSema *semaphore.Weighted // semaphore for building only one type (blocks, caplin, v3) at a time ctx context.Context @@ -65,7 +66,7 @@ type Antiquary struct { balances32 []byte } -func NewAntiquary(ctx context.Context, blobStorage blob_storage.BlobStorage, genesisState *state.CachingBeaconState, validatorsTable *state_accessors.StaticValidatorTable, cfg *clparams.BeaconChainConfig, dirs datadir.Dirs, downloader proto_downloader.DownloaderClient, mainDB kv.RwDB, sn *freezeblocks.CaplinSnapshots, reader freezeblocks.BeaconSnapshotReader, logger log.Logger, states, blocks, blobs, snapgen bool, snBuildSema *semaphore.Weighted) *Antiquary { +func NewAntiquary(ctx context.Context, blobStorage blob_storage.BlobStorage, genesisState *state.CachingBeaconState, validatorsTable *state_accessors.StaticValidatorTable, cfg *clparams.BeaconChainConfig, dirs datadir.Dirs, downloader proto_downloader.DownloaderClient, mainDB kv.RwDB, stateSn *freezeblocks.CaplinStateSnapshots, sn *freezeblocks.CaplinSnapshots, reader freezeblocks.BeaconSnapshotReader, logger log.Logger, states, blocks, blobs, snapgen bool, snBuildSema *semaphore.Weighted) *Antiquary { backfilled := &atomic.Bool{} blobBackfilled := &atomic.Bool{} backfilled.Store(false) @@ -89,6 +90,7 @@ func NewAntiquary(ctx context.Context, blobStorage blob_storage.BlobStorage, gen blocks: blocks, blobs: blobs, snapgen: snapgen, + stateSn: stateSn, } } diff --git a/cl/antiquary/state_antiquary.go b/cl/antiquary/state_antiquary.go index 7344fb52a89..24d04604f81 100644 --- a/cl/antiquary/state_antiquary.go +++ b/cl/antiquary/state_antiquary.go @@ -20,6 +20,7 @@ import ( "bytes" "context" "fmt" + "runtime" "sync" "time" @@ -27,6 +28,7 @@ import ( "github.com/erigontech/erigon-lib/common" libcommon "github.com/erigontech/erigon-lib/common" + "github.com/erigontech/erigon-lib/downloader/snaptype" "github.com/erigontech/erigon-lib/etl" "github.com/erigontech/erigon-lib/kv" "github.com/erigontech/erigon-lib/log/v3" @@ -42,6 +44,7 @@ import ( "github.com/erigontech/erigon/cl/phase1/core/state/raw" "github.com/erigontech/erigon/cl/transition" "github.com/erigontech/erigon/cl/transition/impl/eth2" + "github.com/erigontech/erigon/turbo/snapshotsync/freezeblocks" ) // pool for buffers @@ -111,6 +114,9 @@ func (s *Antiquary) readHistoricalProcessingProgress(ctx context.Context) (progr if err != nil { return } + if s.stateSn != nil { + progress = max(progress, s.stateSn.BlocksAvailable()) + } finalized, err = beacon_indicies.ReadHighestFinalized(tx) if err != nil { @@ -119,8 +125,68 @@ func (s *Antiquary) readHistoricalProcessingProgress(ctx context.Context) (progr return } +func FillStaticValidatorsTableIfNeeded(ctx context.Context, logger log.Logger, stateSn *freezeblocks.CaplinStateSnapshots, validatorsTable *state_accessors.StaticValidatorTable) (bool, error) { + if stateSn == nil || validatorsTable.Slot() != 0 { + return false, nil + } + if err := stateSn.OpenFolder(); err != nil { + return false, err + } + blocksAvaiable := stateSn.BlocksAvailable() + stateSnRoTx := stateSn.View() + defer stateSnRoTx.Close() + + start := time.Now() + for slot := uint64(0); slot <= stateSn.BlocksAvailable(); slot++ { + seg, ok := stateSnRoTx.VisibleSegment(slot, kv.StateEvents) + if !ok { + return false, fmt.Errorf("segment not found for slot %d", slot) + } + buf, err := seg.Get(slot) + if err != nil { + return false, err + } + if len(buf) == 0 { + continue + } + event := state_accessors.NewStateEventsFromBytes(buf) + state_accessors.ReplayEvents( + func(validatorIndex uint64, validator solid.Validator) error { + return validatorsTable.AddValidator(validator, validatorIndex, slot) + }, + func(validatorIndex uint64, exitEpoch uint64) error { + return validatorsTable.AddExitEpoch(validatorIndex, slot, exitEpoch) + }, + func(validatorIndex uint64, withdrawableEpoch uint64) error { + return validatorsTable.AddWithdrawableEpoch(validatorIndex, slot, withdrawableEpoch) + }, + func(validatorIndex uint64, withdrawalCredentials libcommon.Hash) error { + return validatorsTable.AddWithdrawalCredentials(validatorIndex, slot, withdrawalCredentials) + }, + func(validatorIndex uint64, activationEpoch uint64) error { + return validatorsTable.AddActivationEpoch(validatorIndex, slot, activationEpoch) + }, + func(validatorIndex uint64, activationEligibilityEpoch uint64) error { + return validatorsTable.AddActivationEligibility(validatorIndex, slot, activationEligibilityEpoch) + }, + func(validatorIndex uint64, slashed bool) error { + return validatorsTable.AddSlashed(validatorIndex, slot, slashed) + }, + event, + ) + validatorsTable.SetSlot(slot) + } + logger.Info("[Antiquary] Filled static validators table", "slots", blocksAvaiable, "elapsed", time.Since(start)) + return true, nil +} + func (s *Antiquary) IncrementBeaconState(ctx context.Context, to uint64) error { - var tx kv.Tx + + // Check if you need to fill the static validators table + refilledStaticValidators, err := FillStaticValidatorsTableIfNeeded(ctx, s.logger, s.stateSn, s.validatorsTable) + if err != nil { + return err + } tx, err := s.mainDB.BeginRo(ctx) if err != nil { @@ -129,7 +195,14 @@ func (s *Antiquary) IncrementBeaconState(ctx context.Context, to uint64) error { defer tx.Rollback() // maps which validators changes - changedValidators := make(map[uint64]struct{}) + var changedValidators sync.Map + + if refilledStaticValidators { + s.validatorsTable.ForEach(func(validatorIndex uint64, validator *state_accessors.StaticValidator) bool { + changedValidators.Store(validatorIndex, struct{}{}) + return true + }) + } stateAntiquaryCollector := newBeaconStatesCollector(s.cfg, s.dirs.Tmp, s.logger) defer stateAntiquaryCollector.close() @@ -144,7 +217,7 @@ func (s *Antiquary) IncrementBeaconState(ctx context.Context, to uint64) error { } // Mark all validators as touched because we just initizialized the whole state. s.currentState.ForEachValidator(func(v solid.Validator, index, total int) bool { - changedValidators[uint64(index)] = struct{}{} + changedValidators.Store(uint64(index), struct{}{}) if err = s.validatorsTable.AddValidator(v, uint64(index), 0); err != nil { return false } @@ -175,37 +248,37 @@ func (s *Antiquary) IncrementBeaconState(ctx context.Context, to uint64) error { return stateAntiquaryCollector.collectIntraEpochRandaoMix(slot, mix) }, OnNewValidator: func(index int, v solid.Validator, balance uint64) error { - changedValidators[uint64(index)] = struct{}{} + changedValidators.Store(uint64(index), struct{}{}) events.AddValidator(uint64(index), v) return s.validatorsTable.AddValidator(v, uint64(index), slot) }, OnNewValidatorActivationEpoch: func(index int, epoch uint64) error { - changedValidators[uint64(index)] = struct{}{} + changedValidators.Store(uint64(index), struct{}{}) events.ChangeActivationEpoch(uint64(index), epoch) return s.validatorsTable.AddActivationEpoch(uint64(index), slot, epoch) }, OnNewValidatorExitEpoch: func(index int, epoch uint64) error { - changedValidators[uint64(index)] = struct{}{} + changedValidators.Store(uint64(index), struct{}{}) events.ChangeExitEpoch(uint64(index), epoch) return s.validatorsTable.AddExitEpoch(uint64(index), slot, epoch) }, OnNewValidatorWithdrawableEpoch: func(index int, epoch uint64) error { - changedValidators[uint64(index)] = struct{}{} + changedValidators.Store(uint64(index), struct{}{}) events.ChangeWithdrawableEpoch(uint64(index), epoch) return s.validatorsTable.AddWithdrawableEpoch(uint64(index), slot, epoch) }, OnNewValidatorSlashed: func(index int, newSlashed bool) error { - changedValidators[uint64(index)] = struct{}{} + changedValidators.Store(uint64(index), struct{}{}) events.ChangeSlashed(uint64(index), newSlashed) return s.validatorsTable.AddSlashed(uint64(index), slot, newSlashed) }, OnNewValidatorActivationEligibilityEpoch: func(index int, epoch uint64) error { - changedValidators[uint64(index)] = struct{}{} + changedValidators.Store(uint64(index), struct{}{}) events.ChangeActivationEligibilityEpoch(uint64(index), epoch) return s.validatorsTable.AddActivationEligibility(uint64(index), slot, epoch) }, OnNewValidatorWithdrawalCredentials: func(index int, wc []byte) error { - changedValidators[uint64(index)] = struct{}{} + changedValidators.Store(uint64(index), struct{}{}) events.ChangeWithdrawalCredentials(uint64(index), libcommon.BytesToHash(wc)) return s.validatorsTable.AddWithdrawalCredentials(uint64(index), slot, libcommon.BytesToHash(wc)) }, @@ -389,7 +462,7 @@ func (s *Antiquary) IncrementBeaconState(ctx context.Context, to uint64) error { buf := &bytes.Buffer{} s.validatorsTable.ForEach(func(validatorIndex uint64, validator *state_accessors.StaticValidator) bool { - if _, ok := changedValidators[validatorIndex]; !ok { + if _, ok := changedValidators.Load(validatorIndex); !ok { return true } buf.Reset() @@ -413,6 +486,41 @@ func (s *Antiquary) IncrementBeaconState(ctx context.Context, to uint64) error { return err } log.Info("Historical states antiquated", "slot", s.currentState.Slot(), "root", libcommon.Hash(stateRoot), "latency", endTime) + if s.snapgen { + if err := s.stateSn.OpenFolder(); err != nil { + return err + } + blocksPerStatefulFile := uint64(snaptype.CaplinMergeLimit * 5) + from := s.stateSn.BlocksAvailable() + 1 + if from+blocksPerStatefulFile+safetyMargin > s.currentState.Slot() { + return nil + } + to := s.currentState.Slot() + if to < (safetyMargin + blocksPerStatefulFile) { + return nil + } + to = to - (safetyMargin + blocksPerStatefulFile) + if from >= to { + return nil + } + if err := s.stateSn.DumpCaplinState( + ctx, + s.stateSn.BlocksAvailable()+1, + to, + blocksPerStatefulFile, + s.sn.Salt, + s.dirs, + runtime.NumCPU(), + log.LvlInfo, + s.logger, + ); err != nil { + return err + } + if err := s.stateSn.OpenFolder(); err != nil { + return err + } + } + return nil } @@ -439,12 +547,15 @@ func (s *Antiquary) initializeStateAntiquaryIfNeeded(ctx context.Context, tx kv. if err != nil { return err } + if s.stateSn != nil { + targetSlot = max(targetSlot, s.stateSn.BlocksAvailable()) + } // We want to backoff by some slots until we get a correct state from DB. // we start from 10 * clparams.SlotsPerDump. backoffStrides := uint64(10) backoffStep := backoffStrides - historicalReader := historical_states_reader.NewHistoricalStatesReader(s.cfg, s.snReader, s.validatorsTable, s.genesisState) + historicalReader := historical_states_reader.NewHistoricalStatesReader(s.cfg, s.snReader, s.validatorsTable, s.genesisState, s.stateSn) for { attempt, err := computeSlotToBeRequested(tx, s.cfg, s.genesisState.Slot(), targetSlot, backoffStep) @@ -465,6 +576,7 @@ func (s *Antiquary) initializeStateAntiquaryIfNeeded(ctx context.Context, tx kv. if err != nil { return fmt.Errorf("failed to read historical state at slot %d: %w", attempt, err) } + fmt.Println("currentState", s.currentState, attempt) if s.currentState == nil { log.Warn("historical state not found, backoff more and try again", "slot", attempt) backoffStep += backoffStrides diff --git a/cl/beacon/handler/attestation_rewards.go b/cl/beacon/handler/attestation_rewards.go index 364340a6e4c..fa88d1ad9c5 100644 --- a/cl/beacon/handler/attestation_rewards.go +++ b/cl/beacon/handler/attestation_rewards.go @@ -178,13 +178,17 @@ func (a *ApiHandler) PostEthV1BeaconRewardsAttestations(w http.ResponseWriter, r if lastSlot > stateProgress { return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("requested range is not yet processed or the node is not archivial")) } + snRoTx := a.caplinStateSnapshots.View() + defer snRoTx.Close() - epochData, err := state_accessors.ReadEpochData(tx, a.beaconChainCfg.RoundSlotToEpoch(lastSlot)) + stateGetter := state_accessors.GetValFnTxAndSnapshot(tx, snRoTx) + + epochData, err := state_accessors.ReadEpochData(stateGetter, a.beaconChainCfg.RoundSlotToEpoch(lastSlot)) if err != nil { return nil, err } - validatorSet, err := a.stateReader.ReadValidatorsForHistoricalState(tx, lastSlot) + validatorSet, err := a.stateReader.ReadValidatorsForHistoricalState(tx, stateGetter, lastSlot) if err != nil { return nil, err } @@ -192,12 +196,12 @@ func (a *ApiHandler) PostEthV1BeaconRewardsAttestations(w http.ResponseWriter, r return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("no validator set found for this epoch")) } - _, previousIdx, err := a.stateReader.ReadParticipations(tx, lastSlot) + _, previousIdx, err := a.stateReader.ReadParticipations(tx, stateGetter, lastSlot) if err != nil { return nil, err } - _, _, finalizedCheckpoint, ok, err := state_accessors.ReadCheckpoints(tx, epoch*a.beaconChainCfg.SlotsPerEpoch) + _, _, finalizedCheckpoint, ok, err := state_accessors.ReadCheckpoints(stateGetter, epoch*a.beaconChainCfg.SlotsPerEpoch) if err != nil { return nil, err } @@ -212,7 +216,7 @@ func (a *ApiHandler) PostEthV1BeaconRewardsAttestations(w http.ResponseWriter, r return resp.WithFinalized(true).WithOptimistic(a.forkchoiceStore.IsRootOptimistic(root)), nil } inactivityScores := solid.NewUint64ListSSZ(int(a.beaconChainCfg.ValidatorRegistryLimit)) - if err := a.stateReader.ReconstructUint64ListDump(tx, lastSlot, kv.InactivityScores, validatorSet.Length(), inactivityScores); err != nil { + if err := a.stateReader.ReconstructUint64ListDump(stateGetter, lastSlot, kv.InactivityScores, validatorSet.Length(), inactivityScores); err != nil { return nil, err } resp, err := a.computeAttestationsRewardsForAltair( diff --git a/cl/beacon/handler/committees.go b/cl/beacon/handler/committees.go index 8c2db31ffea..35516f4c314 100644 --- a/cl/beacon/handler/committees.go +++ b/cl/beacon/handler/committees.go @@ -119,8 +119,13 @@ func (a *ApiHandler) getCommittees(w http.ResponseWriter, r *http.Request) (*bea } return newBeaconResponse(resp).WithFinalized(isFinalized).WithOptimistic(isOptimistic), nil } + snRoTx := a.caplinStateSnapshots.View() + defer snRoTx.Close() + stateGetter := state_accessors.GetValFnTxAndSnapshot(tx, snRoTx) // finality case - activeIdxs, err := state_accessors.ReadActiveIndicies(tx, epoch*a.beaconChainCfg.SlotsPerEpoch) + activeIdxs, err := state_accessors.ReadActiveIndicies( + stateGetter, + epoch*a.beaconChainCfg.SlotsPerEpoch) if err != nil { return nil, err } @@ -134,7 +139,7 @@ func (a *ApiHandler) getCommittees(w http.ResponseWriter, r *http.Request) (*bea } mixPosition := (epoch + a.beaconChainCfg.EpochsPerHistoricalVector - a.beaconChainCfg.MinSeedLookahead - 1) % a.beaconChainCfg.EpochsPerHistoricalVector - mix, err := a.stateReader.ReadRandaoMixBySlotAndIndex(tx, epoch*a.beaconChainCfg.SlotsPerEpoch, mixPosition) + mix, err := a.stateReader.ReadRandaoMixBySlotAndIndex(tx, stateGetter, epoch*a.beaconChainCfg.SlotsPerEpoch, mixPosition) if err != nil { return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("could not read randao mix: %v", err)) } diff --git a/cl/beacon/handler/duties_attester.go b/cl/beacon/handler/duties_attester.go index 604807e50ba..8c0ed8452be 100644 --- a/cl/beacon/handler/duties_attester.go +++ b/cl/beacon/handler/duties_attester.go @@ -149,8 +149,15 @@ func (a *ApiHandler) getAttesterDuties(w http.ResponseWriter, r *http.Request) ( if (epoch)*a.beaconChainCfg.SlotsPerEpoch >= stageStateProgress { return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("epoch %d is too far in the future", epoch)) } + + snRoTx := a.caplinStateSnapshots.View() + defer snRoTx.Close() + + stateGetter := state_accessors.GetValFnTxAndSnapshot(tx, snRoTx) // finality case - activeIdxs, err := state_accessors.ReadActiveIndicies(tx, epoch*a.beaconChainCfg.SlotsPerEpoch) + activeIdxs, err := state_accessors.ReadActiveIndicies( + stateGetter, + epoch*a.beaconChainCfg.SlotsPerEpoch) if err != nil { return nil, err } @@ -164,7 +171,7 @@ func (a *ApiHandler) getAttesterDuties(w http.ResponseWriter, r *http.Request) ( } mixPosition := (epoch + a.beaconChainCfg.EpochsPerHistoricalVector - a.beaconChainCfg.MinSeedLookahead - 1) % a.beaconChainCfg.EpochsPerHistoricalVector - mix, err := a.stateReader.ReadRandaoMixBySlotAndIndex(tx, epoch*a.beaconChainCfg.SlotsPerEpoch, mixPosition) + mix, err := a.stateReader.ReadRandaoMixBySlotAndIndex(tx, stateGetter, epoch*a.beaconChainCfg.SlotsPerEpoch, mixPosition) if err != nil { return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("could not read randao mix: %v", err)) } diff --git a/cl/beacon/handler/duties_sync.go b/cl/beacon/handler/duties_sync.go index 024fd6d45e5..bc4e7cc082a 100644 --- a/cl/beacon/handler/duties_sync.go +++ b/cl/beacon/handler/duties_sync.go @@ -81,9 +81,13 @@ func (a *ApiHandler) getSyncDuties(w http.ResponseWriter, r *http.Request) (*bea if !ok { _, syncCommittee, ok = a.forkchoiceStore.GetSyncCommittees(period - 1) } + snRoTx := a.caplinStateSnapshots.View() + defer snRoTx.Close() // Read them from the archive node if we do not have them in the fast-access storage if !ok { - syncCommittee, err = state_accessors.ReadCurrentSyncCommittee(tx, a.beaconChainCfg.RoundSlotToSyncCommitteePeriod(startSlotAtEpoch)) + syncCommittee, err = state_accessors.ReadCurrentSyncCommittee( + state_accessors.GetValFnTxAndSnapshot(tx, snRoTx), + a.beaconChainCfg.RoundSlotToSyncCommitteePeriod(startSlotAtEpoch)) if syncCommittee == nil { log.Warn("could not find sync committee for epoch", "epoch", epoch, "period", period) return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("could not find sync committee for epoch %d", epoch)) diff --git a/cl/beacon/handler/handler.go b/cl/beacon/handler/handler.go index 093fa6ada1b..fe2e1be10fe 100644 --- a/cl/beacon/handler/handler.go +++ b/cl/beacon/handler/handler.go @@ -63,18 +63,19 @@ type ApiHandler struct { o sync.Once mux *chi.Mux - blockReader freezeblocks.BeaconSnapshotReader - indiciesDB kv.RwDB - netConfig *clparams.NetworkConfig - ethClock eth_clock.EthereumClock - beaconChainCfg *clparams.BeaconChainConfig - forkchoiceStore forkchoice.ForkChoiceStorage - operationsPool pool.OperationsPool - syncedData synced_data.SyncedData - stateReader *historical_states_reader.HistoricalStatesReader - sentinel sentinel.SentinelClient - blobStoage blob_storage.BlobStorage - caplinSnapshots *freezeblocks.CaplinSnapshots + blockReader freezeblocks.BeaconSnapshotReader + indiciesDB kv.RwDB + netConfig *clparams.NetworkConfig + ethClock eth_clock.EthereumClock + beaconChainCfg *clparams.BeaconChainConfig + forkchoiceStore forkchoice.ForkChoiceStorage + operationsPool pool.OperationsPool + syncedData synced_data.SyncedData + stateReader *historical_states_reader.HistoricalStatesReader + sentinel sentinel.SentinelClient + blobStoage blob_storage.BlobStorage + caplinSnapshots *freezeblocks.CaplinSnapshots + caplinStateSnapshots *freezeblocks.CaplinStateSnapshots version string // Node's version @@ -141,24 +142,26 @@ func NewApiHandler( proposerSlashingService services.ProposerSlashingService, builderClient builder.BuilderClient, validatorMonitor monitor.ValidatorMonitor, + caplinStateSnapshots *freezeblocks.CaplinStateSnapshots, ) *ApiHandler { blobBundles, err := lru.New[common.Bytes48, BlobBundle]("blobs", maxBlobBundleCacheSize) if err != nil { panic(err) } return &ApiHandler{ - logger: logger, - validatorParams: validatorParams, - o: sync.Once{}, - netConfig: netConfig, - ethClock: ethClock, - beaconChainCfg: beaconChainConfig, - indiciesDB: indiciesDB, - forkchoiceStore: forkchoiceStore, - operationsPool: operationsPool, - blockReader: rcsn, - syncedData: syncedData, - stateReader: stateReader, + logger: logger, + validatorParams: validatorParams, + o: sync.Once{}, + netConfig: netConfig, + ethClock: ethClock, + beaconChainCfg: beaconChainConfig, + indiciesDB: indiciesDB, + forkchoiceStore: forkchoiceStore, + operationsPool: operationsPool, + blockReader: rcsn, + syncedData: syncedData, + stateReader: stateReader, + caplinStateSnapshots: caplinStateSnapshots, randaoMixesPool: sync.Pool{New: func() interface{} { return solid.NewHashVector(int(beaconChainConfig.EpochsPerHistoricalVector)) }}, diff --git a/cl/beacon/handler/lighthouse.go b/cl/beacon/handler/lighthouse.go index 612cb31e480..f2e978f5e56 100644 --- a/cl/beacon/handler/lighthouse.go +++ b/cl/beacon/handler/lighthouse.go @@ -76,6 +76,10 @@ func (a *ApiHandler) GetLighthouseValidatorInclusionGlobal(w http.ResponseWriter } defer tx.Rollback() + snRoTx := a.caplinStateSnapshots.View() + defer snRoTx.Close() + stateGetter := state_accessors.GetValFnTxAndSnapshot(tx, snRoTx) + slot := epoch * a.beaconChainCfg.SlotsPerEpoch if slot >= a.forkchoiceStore.LowestAvailableSlot() { // Take data from forkchoice @@ -120,29 +124,30 @@ func (a *ApiHandler) GetLighthouseValidatorInclusionGlobal(w http.ResponseWriter } // read the epoch datas first - epochData, err := state_accessors.ReadEpochData(tx, epoch*a.beaconChainCfg.SlotsPerEpoch) + epochData, err := state_accessors.ReadEpochData(stateGetter, epoch*a.beaconChainCfg.SlotsPerEpoch) if err != nil { return nil, err } if epochData == nil { return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("epoch data not found for current epoch")) } - prevEpochData, err := state_accessors.ReadEpochData(tx, prevEpoch*a.beaconChainCfg.SlotsPerEpoch) + prevEpochData, err := state_accessors.ReadEpochData(stateGetter, prevEpoch*a.beaconChainCfg.SlotsPerEpoch) if err != nil { return nil, err } if prevEpochData == nil { return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("epoch data not found for previous epoch")) } + // read the validator set - validatorSet, err := a.stateReader.ReadValidatorsForHistoricalState(tx, slot) + validatorSet, err := a.stateReader.ReadValidatorsForHistoricalState(tx, stateGetter, slot) if err != nil { return nil, err } if validatorSet == nil { return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("validator set not found for current epoch")) } - currentEpochParticipation, previousEpochParticipation, err := a.stateReader.ReadParticipations(tx, slot+(a.beaconChainCfg.SlotsPerEpoch-1)) + currentEpochParticipation, previousEpochParticipation, err := a.stateReader.ReadParticipations(tx, stateGetter, slot+(a.beaconChainCfg.SlotsPerEpoch-1)) if err != nil { return nil, err } @@ -277,15 +282,18 @@ func (a *ApiHandler) GetLighthouseValidatorInclusion(w http.ResponseWriter, r *h return newBeaconResponse(a.computeLighthouseValidatorInclusion(int(validatorIndex), prevEpoch, epoch, activeBalance, prevActiveBalance, validatorSet, currentEpochParticipation, previousEpochParticipation)), nil } + snRoTx := a.caplinStateSnapshots.View() + defer snRoTx.Close() + stateGetter := state_accessors.GetValFnTxAndSnapshot(tx, snRoTx) // read the epoch datas first - epochData, err := state_accessors.ReadEpochData(tx, epoch*a.beaconChainCfg.SlotsPerEpoch) + epochData, err := state_accessors.ReadEpochData(stateGetter, epoch*a.beaconChainCfg.SlotsPerEpoch) if err != nil { return nil, err } if epochData == nil { return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("epoch data not found for current epoch")) } - prevEpochData, err := state_accessors.ReadEpochData(tx, prevEpoch*a.beaconChainCfg.SlotsPerEpoch) + prevEpochData, err := state_accessors.ReadEpochData(stateGetter, prevEpoch*a.beaconChainCfg.SlotsPerEpoch) if err != nil { return nil, err } @@ -293,14 +301,14 @@ func (a *ApiHandler) GetLighthouseValidatorInclusion(w http.ResponseWriter, r *h return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("epoch data not found for previous epoch")) } // read the validator set - validatorSet, err := a.stateReader.ReadValidatorsForHistoricalState(tx, slot) + validatorSet, err := a.stateReader.ReadValidatorsForHistoricalState(tx, stateGetter, slot) if err != nil { return nil, err } if validatorSet == nil { return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("validator set not found for current epoch")) } - currentEpochParticipation, previousEpochParticipation, err := a.stateReader.ReadParticipations(tx, slot+(a.beaconChainCfg.SlotsPerEpoch-1)) + currentEpochParticipation, previousEpochParticipation, err := a.stateReader.ReadParticipations(tx, stateGetter, slot+(a.beaconChainCfg.SlotsPerEpoch-1)) if err != nil { return nil, err } diff --git a/cl/beacon/handler/liveness.go b/cl/beacon/handler/liveness.go index ccce105a571..f81d2e74621 100644 --- a/cl/beacon/handler/liveness.go +++ b/cl/beacon/handler/liveness.go @@ -28,6 +28,7 @@ import ( "github.com/erigontech/erigon/cl/beacon/beaconhttp" "github.com/erigontech/erigon/cl/cltypes" "github.com/erigontech/erigon/cl/cltypes/solid" + state_accessors "github.com/erigontech/erigon/cl/persistence/state" ) type live struct { @@ -138,11 +139,15 @@ func (a *ApiHandler) obtainCurrentEpochParticipationFromEpoch(tx kv.Tx, epoch ui if epoch > 0 { prevEpoch-- } + snRoTx := a.caplinStateSnapshots.View() + defer snRoTx.Close() + + stateGetter := state_accessors.GetValFnTxAndSnapshot(tx, snRoTx) currParticipation, ok1 := a.forkchoiceStore.Participation(epoch) prevParticipation, ok2 := a.forkchoiceStore.Participation(prevEpoch) if !ok1 || !ok2 { - return a.stateReader.ReadParticipations(tx, blockSlot) + return a.stateReader.ReadParticipations(tx, stateGetter, blockSlot) } return currParticipation, prevParticipation, nil diff --git a/cl/beacon/handler/rewards.go b/cl/beacon/handler/rewards.go index bec4923de39..6a302207020 100644 --- a/cl/beacon/handler/rewards.go +++ b/cl/beacon/handler/rewards.go @@ -81,7 +81,11 @@ func (a *ApiHandler) GetEthV1BeaconRewardsBlocks(w http.ResponseWriter, r *http. Total: blkRewards.Attestations + blkRewards.ProposerSlashings + blkRewards.AttesterSlashings + blkRewards.SyncAggregate, }).WithFinalized(isFinalized).WithOptimistic(isOptimistic), nil } - slotData, err := state_accessors.ReadSlotData(tx, slot) + snRoTx := a.caplinStateSnapshots.View() + defer snRoTx.Close() + + stateGetter := state_accessors.GetValFnTxAndSnapshot(tx, snRoTx) + slotData, err := state_accessors.ReadSlotData(stateGetter, slot) if err != nil { return nil, err } @@ -165,11 +169,15 @@ func (a *ApiHandler) PostEthV1BeaconRewardsSyncCommittees(w http.ResponseWriter, syncCommittee *solid.SyncCommittee totalActiveBalance uint64 ) + + snRoTx := a.caplinStateSnapshots.View() + defer snRoTx.Close() + getter := state_accessors.GetValFnTxAndSnapshot(tx, snRoTx) if slot < a.forkchoiceStore.LowestAvailableSlot() { if !isCanonical { return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("non-canonical finalized block not found")) } - epochData, err := state_accessors.ReadEpochData(tx, a.beaconChainCfg.RoundSlotToEpoch(blk.Block.Slot)) + epochData, err := state_accessors.ReadEpochData(getter, a.beaconChainCfg.RoundSlotToEpoch(blk.Block.Slot)) if err != nil { return nil, err } @@ -177,7 +185,7 @@ func (a *ApiHandler) PostEthV1BeaconRewardsSyncCommittees(w http.ResponseWriter, return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("could not read historical sync committee rewards, node may not be archive or it still processing historical states")) } totalActiveBalance = epochData.TotalActiveBalance - syncCommittee, err = state_accessors.ReadCurrentSyncCommittee(tx, a.beaconChainCfg.RoundSlotToSyncCommitteePeriod(blk.Block.Slot)) + syncCommittee, err = state_accessors.ReadCurrentSyncCommittee(getter, a.beaconChainCfg.RoundSlotToSyncCommitteePeriod(blk.Block.Slot)) if err != nil { return nil, err } diff --git a/cl/beacon/handler/states.go b/cl/beacon/handler/states.go index 011a9e43687..751f1e8867f 100644 --- a/cl/beacon/handler/states.go +++ b/cl/beacon/handler/states.go @@ -262,8 +262,13 @@ func (a *ApiHandler) getFinalityCheckpoints(w http.ResponseWriter, r *http.Reque if err != nil { return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err) } + + snRoTx := a.caplinStateSnapshots.View() + defer snRoTx.Close() + + stateGetter := state_accessors.GetValFnTxAndSnapshot(tx, snRoTx) if !ok { - currentJustifiedCheckpoint, previousJustifiedCheckpoint, finalizedCheckpoint, ok, err = state_accessors.ReadCheckpoints(tx, a.beaconChainCfg.RoundSlotToEpoch(*slot)) + currentJustifiedCheckpoint, previousJustifiedCheckpoint, finalizedCheckpoint, ok, err = state_accessors.ReadCheckpoints(stateGetter, a.beaconChainCfg.RoundSlotToEpoch(*slot)) if err != nil { return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, err) } @@ -317,16 +322,21 @@ func (a *ApiHandler) getSyncCommittees(w http.ResponseWriter, r *http.Request) ( return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("could not read block slot: %x", blockRoot)) } + snRoTx := a.caplinStateSnapshots.View() + defer snRoTx.Close() + + stateGetter := state_accessors.GetValFnTxAndSnapshot(tx, snRoTx) + // Code here currentSyncCommittee, nextSyncCommittee, ok := a.forkchoiceStore.GetSyncCommittees(a.beaconChainCfg.SyncCommitteePeriod(*slot)) if !ok { syncCommitteeSlot := a.beaconChainCfg.RoundSlotToSyncCommitteePeriod(*slot) // Check the main database if it cannot be found in the forkchoice store - currentSyncCommittee, err = state_accessors.ReadCurrentSyncCommittee(tx, syncCommitteeSlot) + currentSyncCommittee, err = state_accessors.ReadCurrentSyncCommittee(stateGetter, syncCommitteeSlot) if err != nil { return nil, err } - nextSyncCommittee, err = state_accessors.ReadNextSyncCommittee(tx, syncCommitteeSlot) + nextSyncCommittee, err = state_accessors.ReadNextSyncCommittee(stateGetter, syncCommitteeSlot) if err != nil { return nil, err } @@ -441,7 +451,11 @@ func (a *ApiHandler) getRandao(w http.ResponseWriter, r *http.Request) (*beaconh if canonicalRoot != blockRoot { return nil, beaconhttp.NewEndpointError(http.StatusNotFound, fmt.Errorf("could not read randao: %x", blockRoot)) } - mix, err := a.stateReader.ReadRandaoMixBySlotAndIndex(tx, slot, epoch%a.beaconChainCfg.EpochsPerHistoricalVector) + snRoTx := a.caplinStateSnapshots.View() + defer snRoTx.Close() + + stateGetter := state_accessors.GetValFnTxAndSnapshot(tx, snRoTx) + mix, err := a.stateReader.ReadRandaoMixBySlotAndIndex(tx, stateGetter, slot, epoch%a.beaconChainCfg.EpochsPerHistoricalVector) if err != nil { return nil, err } diff --git a/cl/beacon/handler/utils_test.go b/cl/beacon/handler/utils_test.go index f480f849b2f..a1c0e0f8ddf 100644 --- a/cl/beacon/handler/utils_test.go +++ b/cl/beacon/handler/utils_test.go @@ -78,10 +78,10 @@ func setupTestingHandler(t *testing.T, v clparams.StateVersion, logger log.Logge ctx := context.Background() vt := state_accessors.NewStaticValidatorTable() - a := antiquary.NewAntiquary(ctx, nil, preState, vt, &bcfg, datadir.New("/tmp"), nil, db, nil, reader, logger, true, true, false, false, nil) + a := antiquary.NewAntiquary(ctx, nil, preState, vt, &bcfg, datadir.New("/tmp"), nil, db, nil, nil, reader, logger, true, true, false, false, nil) require.NoError(t, a.IncrementBeaconState(ctx, blocks[len(blocks)-1].Block.Slot+33)) // historical states reader below - statesReader := historical_states_reader.NewHistoricalStatesReader(&bcfg, reader, vt, preState) + statesReader := historical_states_reader.NewHistoricalStatesReader(&bcfg, reader, vt, preState, nil) opPool = pool.NewOperationsPool(&bcfg) fcu.Pool = opPool @@ -176,6 +176,7 @@ func setupTestingHandler(t *testing.T, v clparams.StateVersion, logger log.Logge proposerSlashingService, nil, mockValidatorMonitor, + nil, ) // TODO: add tests h.Init() return diff --git a/cl/beacon/handler/validator_test.go b/cl/beacon/handler/validator_test.go index 533e45b3bef..5a1979e58c3 100644 --- a/cl/beacon/handler/validator_test.go +++ b/cl/beacon/handler/validator_test.go @@ -75,6 +75,7 @@ func (t *validatorTestSuite) SetupTest() { nil, nil, nil, + nil, ) t.gomockCtrl = gomockCtrl } diff --git a/cl/beacon/handler/validators.go b/cl/beacon/handler/validators.go index 9b76e6d24e3..39e20896d88 100644 --- a/cl/beacon/handler/validators.go +++ b/cl/beacon/handler/validators.go @@ -338,8 +338,13 @@ func (a *ApiHandler) writeValidatorsResponse( } stateEpoch := *slot / a.beaconChainCfg.SlotsPerEpoch + snRoTx := a.caplinStateSnapshots.View() + defer snRoTx.Close() + + getter := state_accessors.GetValFnTxAndSnapshot(tx, snRoTx) + if *slot < a.forkchoiceStore.LowestAvailableSlot() { - validatorSet, err := a.stateReader.ReadValidatorsForHistoricalState(tx, *slot) + validatorSet, err := a.stateReader.ReadValidatorsForHistoricalState(tx, getter, *slot) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return @@ -347,7 +352,7 @@ func (a *ApiHandler) writeValidatorsResponse( http.Error(w, fmt.Errorf("state not found for slot %v", *slot).Error(), http.StatusNotFound) return } - balances, err := a.stateReader.ReadValidatorsBalances(tx, *slot) + balances, err := a.stateReader.ReadValidatorsBalances(tx, getter, *slot) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return @@ -454,6 +459,11 @@ func (a *ApiHandler) GetEthV1BeaconStatesValidator(w http.ResponseWriter, r *htt return nil, err } + snRoTx := a.caplinStateSnapshots.View() + defer snRoTx.Close() + + getter := state_accessors.GetValFnTxAndSnapshot(tx, snRoTx) + if blockId.Head() { // Lets see if we point to head, if yes then we need to look at the head state we always keep. s := a.syncedData.HeadState() if s == nil { @@ -475,14 +485,14 @@ func (a *ApiHandler) GetEthV1BeaconStatesValidator(w http.ResponseWriter, r *htt stateEpoch := *slot / a.beaconChainCfg.SlotsPerEpoch if *slot < a.forkchoiceStore.LowestAvailableSlot() { - validatorSet, err := a.stateReader.ReadValidatorsForHistoricalState(tx, *slot) + validatorSet, err := a.stateReader.ReadValidatorsForHistoricalState(tx, getter, *slot) if err != nil { return nil, err } if validatorSet == nil { return nil, beaconhttp.NewEndpointError(http.StatusNotFound, errors.New("validators not found")) } - balances, err := a.stateReader.ReadValidatorsBalances(tx, *slot) + balances, err := a.stateReader.ReadValidatorsBalances(tx, getter, *slot) if err != nil { return nil, err } @@ -595,8 +605,13 @@ func (a *ApiHandler) getValidatorBalances(ctx context.Context, w http.ResponseWr return } + snRoTx := a.caplinStateSnapshots.View() + defer snRoTx.Close() + + getter := state_accessors.GetValFnTxAndSnapshot(tx, snRoTx) + if *slot < a.forkchoiceStore.LowestAvailableSlot() { - balances, err := a.stateReader.ReadValidatorsBalances(tx, *slot) + balances, err := a.stateReader.ReadValidatorsBalances(tx, getter, *slot) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return diff --git a/cl/persistence/state/historical_states_reader/attesting_indicies.go b/cl/persistence/state/historical_states_reader/attesting_indicies.go index f0ef90b286c..9da5eb8be79 100644 --- a/cl/persistence/state/historical_states_reader/attesting_indicies.go +++ b/cl/persistence/state/historical_states_reader/attesting_indicies.go @@ -19,13 +19,11 @@ package historical_states_reader import ( "errors" "fmt" - "time" libcommon "github.com/erigontech/erigon-lib/common" "github.com/erigontech/erigon-lib/kv" "github.com/erigontech/erigon/cl/clparams" "github.com/erigontech/erigon/cl/cltypes/solid" - "github.com/erigontech/erigon/cl/monitor/shuffling_metrics" "github.com/erigontech/erigon/cl/persistence/base_encoding" state_accessors "github.com/erigontech/erigon/cl/persistence/state" "github.com/erigontech/erigon/cl/phase1/core/state/shuffling" @@ -104,19 +102,9 @@ func (r *HistoricalStatesReader) ComputeCommittee(mix libcommon.Hash, indicies [ start := (lenIndicies * index) / count end := (lenIndicies * (index + 1)) / count var shuffledIndicies []uint64 - epoch := slot / cfg.SlotsPerEpoch - /* - mixPosition := (epoch + cfg.EpochsPerHistoricalVector - cfg.MinSeedLookahead - 1) % cfg.EpochsPerHistoricalVector - */ - if shuffledIndicesInterface, ok := r.shuffledSetsCache.Get(epoch); ok { - shuffledIndicies = shuffledIndicesInterface - } else { - shuffledIndicies = make([]uint64, lenIndicies) - start := time.Now() - shuffledIndicies = shuffling.ComputeShuffledIndicies(cfg, mix, shuffledIndicies, indicies, slot) - shuffling_metrics.ObserveComputeShuffledIndiciesTime(start) - r.shuffledSetsCache.Add(epoch, shuffledIndicies) - } + + shuffledIndicies = make([]uint64, lenIndicies) + shuffledIndicies = shuffling.ComputeShuffledIndicies(cfg, mix, shuffledIndicies, indicies, slot) return shuffledIndicies[start:end], nil } @@ -132,7 +120,7 @@ func committeeCount(cfg *clparams.BeaconChainConfig, epoch uint64, idxs []uint64 return committeCount } -func (r *HistoricalStatesReader) readHistoricalBlockRoot(tx kv.Tx, slot, index uint64) (libcommon.Hash, error) { +func (r *HistoricalStatesReader) readHistoricalBlockRoot(kvGetter state_accessors.GetValFn, slot, index uint64) (libcommon.Hash, error) { slotSubIndex := slot % r.cfg.SlotsPerHistoricalRoot needFromGenesis := true @@ -152,7 +140,7 @@ func (r *HistoricalStatesReader) readHistoricalBlockRoot(tx kv.Tx, slot, index u if needFromGenesis { return r.genesisState.GetBlockRootAtSlot(slot) } - br, err := tx.GetOne(kv.BlockRoot, base_encoding.Encode64ToBytes4(slotLookup)) + br, err := kvGetter(kv.BlockRoot, base_encoding.Encode64ToBytes4(slotLookup)) if err != nil { return libcommon.Hash{}, err } @@ -163,8 +151,9 @@ func (r *HistoricalStatesReader) readHistoricalBlockRoot(tx kv.Tx, slot, index u } -func (r *HistoricalStatesReader) getAttestationParticipationFlagIndicies(tx kv.Tx, version clparams.StateVersion, stateSlot uint64, data solid.AttestationData, inclusionDelay uint64, skipAssert bool) ([]uint8, error) { - currentCheckpoint, previousCheckpoint, _, ok, err := state_accessors.ReadCheckpoints(tx, r.cfg.RoundSlotToEpoch(stateSlot)) +func (r *HistoricalStatesReader) getAttestationParticipationFlagIndicies(tx kv.Tx, getter state_accessors.GetValFn, version clparams.StateVersion, stateSlot uint64, data solid.AttestationData, inclusionDelay uint64, skipAssert bool) ([]uint8, error) { + + currentCheckpoint, previousCheckpoint, _, ok, err := state_accessors.ReadCheckpoints(getter, r.cfg.RoundSlotToEpoch(stateSlot)) if err != nil { return nil, err } @@ -186,13 +175,13 @@ func (r *HistoricalStatesReader) getAttestationParticipationFlagIndicies(tx kv.T return nil, errors.New("GetAttestationParticipationFlagIndicies: source does not match.") } i := (data.Target.Epoch * r.cfg.SlotsPerEpoch) % r.cfg.SlotsPerHistoricalRoot - targetRoot, err := r.readHistoricalBlockRoot(tx, stateSlot, i) + targetRoot, err := r.readHistoricalBlockRoot(getter, stateSlot, i) if err != nil { return nil, err } i = data.Slot % r.cfg.SlotsPerHistoricalRoot - headRoot, err := r.readHistoricalBlockRoot(tx, stateSlot, i) + headRoot, err := r.readHistoricalBlockRoot(getter, stateSlot, i) if err != nil { return nil, err } diff --git a/cl/persistence/state/historical_states_reader/historical_states_reader.go b/cl/persistence/state/historical_states_reader/historical_states_reader.go index d1cd90c670c..1b50b23ccb3 100644 --- a/cl/persistence/state/historical_states_reader/historical_states_reader.go +++ b/cl/persistence/state/historical_states_reader/historical_states_reader.go @@ -33,7 +33,6 @@ import ( "github.com/erigontech/erigon/cl/persistence/base_encoding" state_accessors "github.com/erigontech/erigon/cl/persistence/state" "github.com/erigontech/erigon/cl/phase1/core/state" - "github.com/erigontech/erigon/cl/phase1/core/state/lru" "github.com/erigontech/erigon/turbo/snapshotsync/freezeblocks" "github.com/klauspost/compress/zstd" ) @@ -46,39 +45,43 @@ type HistoricalStatesReader struct { cfg *clparams.BeaconChainConfig validatorTable *state_accessors.StaticValidatorTable // We can save 80% of the I/O by caching the validator table blockReader freezeblocks.BeaconSnapshotReader + stateSn *freezeblocks.CaplinStateSnapshots genesisState *state.CachingBeaconState - - // cache for shuffled sets - shuffledSetsCache *lru.Cache[uint64, []uint64] } func NewHistoricalStatesReader( cfg *clparams.BeaconChainConfig, blockReader freezeblocks.BeaconSnapshotReader, validatorTable *state_accessors.StaticValidatorTable, - genesisState *state.CachingBeaconState) *HistoricalStatesReader { - - cache, err := lru.New[uint64, []uint64]("shuffledSetsCache_reader", 125) - if err != nil { - panic(err) - } + genesisState *state.CachingBeaconState, stateSn *freezeblocks.CaplinStateSnapshots) *HistoricalStatesReader { return &HistoricalStatesReader{ - cfg: cfg, - blockReader: blockReader, - genesisState: genesisState, - validatorTable: validatorTable, - shuffledSetsCache: cache, + cfg: cfg, + blockReader: blockReader, + genesisState: genesisState, + validatorTable: validatorTable, + stateSn: stateSn, } } func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv.Tx, slot uint64) (*state.CachingBeaconState, error) { + snapshotView := r.stateSn.View() + defer snapshotView.Close() + + kvGetter := state_accessors.GetValFnTxAndSnapshot(tx, snapshotView) + ret := state.New(r.cfg) latestProcessedState, err := state_accessors.GetStateProcessingProgress(tx) if err != nil { return nil, err } + var blocksAvailableInSnapshots uint64 + if r.stateSn != nil { + blocksAvailableInSnapshots = r.stateSn.BlocksAvailable() + } + latestProcessedState = max(latestProcessedState, blocksAvailableInSnapshots) + // If this happens, we need to update our static tables if slot > latestProcessedState || slot > r.validatorTable.Slot() { log.Warn("slot is ahead of the latest processed state", "slot", slot, "latestProcessedState", latestProcessedState, "validatorTableSlot", r.validatorTable.Slot()) @@ -100,7 +103,7 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. blockHeader := block.SignedBeaconBlockHeader().Header blockHeader.Root = common.Hash{} // Read the epoch and per-slot data. - slotData, err := state_accessors.ReadSlotData(tx, slot) + slotData, err := state_accessors.ReadSlotData(kvGetter, slot) if err != nil { return nil, err } @@ -110,7 +113,7 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. } roundedSlot := r.cfg.RoundSlotToEpoch(slot) - epochData, err := state_accessors.ReadEpochData(tx, roundedSlot) + epochData, err := state_accessors.ReadEpochData(kvGetter, roundedSlot) if err != nil { return nil, fmt.Errorf("failed to read epoch data: %w", err) } @@ -129,12 +132,12 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. stateRoots, blockRoots := solid.NewHashVector(int(r.cfg.SlotsPerHistoricalRoot)), solid.NewHashVector(int(r.cfg.SlotsPerHistoricalRoot)) ret.SetLatestBlockHeader(blockHeader) - if err := r.readHistoryHashVector(tx, r.genesisState.BlockRoots(), slot, r.cfg.SlotsPerHistoricalRoot, kv.BlockRoot, blockRoots); err != nil { + if err := r.readHistoryHashVector(tx, kvGetter, r.genesisState.BlockRoots(), slot, r.cfg.SlotsPerHistoricalRoot, kv.BlockRoot, blockRoots); err != nil { return nil, fmt.Errorf("failed to read block roots: %w", err) } ret.SetBlockRoots(blockRoots) - if err := r.readHistoryHashVector(tx, r.genesisState.StateRoots(), slot, r.cfg.SlotsPerHistoricalRoot, kv.StateRoot, stateRoots); err != nil { + if err := r.readHistoryHashVector(tx, kvGetter, r.genesisState.StateRoots(), slot, r.cfg.SlotsPerHistoricalRoot, kv.StateRoot, stateRoots); err != nil { return nil, fmt.Errorf("failed to read state roots: %w", err) } ret.SetStateRoots(stateRoots) @@ -150,14 +153,14 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. // Eth1 eth1DataVotes := solid.NewStaticListSSZ[*cltypes.Eth1Data](int(r.cfg.Eth1DataVotesLength()), 72) - if err := r.readEth1DataVotes(tx, slotData.Eth1DataLength, slot, eth1DataVotes); err != nil { + if err := r.readEth1DataVotes(kvGetter, slotData.Eth1DataLength, slot, eth1DataVotes); err != nil { return nil, fmt.Errorf("failed to read eth1 data votes: %w", err) } ret.SetEth1DataVotes(eth1DataVotes) ret.SetEth1Data(slotData.Eth1Data) ret.SetEth1DepositIndex(slotData.Eth1DepositIndex) // Registry (Validators + Balances) - balancesBytes, err := r.reconstructBalances(tx, slotData.ValidatorLength, slot, kv.ValidatorBalance, kv.BalancesDump) + balancesBytes, err := r.reconstructBalances(tx, kvGetter, slotData.ValidatorLength, slot, kv.ValidatorBalance, kv.BalancesDump) if err != nil { return nil, fmt.Errorf("failed to read validator balances: %w", err) } @@ -168,27 +171,27 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. ret.SetBalances(balances) - validatorSet, err := r.ReadValidatorsForHistoricalState(tx, slot) + validatorSet, err := r.ReadValidatorsForHistoricalState(tx, kvGetter, slot) if err != nil { return nil, fmt.Errorf("failed to read validators: %w", err) } ret.SetValidators(validatorSet) // Randomness randaoMixes := solid.NewHashVector(int(r.cfg.EpochsPerHistoricalVector)) - if err := r.readRandaoMixes(tx, slot, randaoMixes); err != nil { + if err := r.readRandaoMixes(tx, kvGetter, slot, randaoMixes); err != nil { return nil, fmt.Errorf("failed to read randao mixes: %w", err) } ret.SetRandaoMixes(randaoMixes) slashingsVector := solid.NewUint64VectorSSZ(int(r.cfg.EpochsPerSlashingsVector)) // Slashings - err = r.ReconstructUint64ListDump(tx, slot, kv.ValidatorSlashings, int(r.cfg.EpochsPerSlashingsVector), slashingsVector) + err = r.ReconstructUint64ListDump(kvGetter, slot, kv.ValidatorSlashings, int(r.cfg.EpochsPerSlashingsVector), slashingsVector) if err != nil { return nil, fmt.Errorf("failed to read slashings: %w", err) } ret.SetSlashings(slashingsVector) // Finality - currentCheckpoint, previousCheckpoint, finalizedCheckpoint, ok, err := state_accessors.ReadCheckpoints(tx, roundedSlot) + currentCheckpoint, previousCheckpoint, finalizedCheckpoint, ok, err := state_accessors.ReadCheckpoints(kvGetter, roundedSlot) if err != nil { return nil, fmt.Errorf("failed to read checkpoints: %w", err) } @@ -211,7 +214,7 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. ret.SetCurrentEpochAttestations(currentAtts) ret.SetPreviousEpochAttestations(previousAtts) } else { - currentIdxs, previousIdxs, err := r.ReadParticipations(tx, slot) + currentIdxs, previousIdxs, err := r.ReadParticipations(tx, kvGetter, slot) if err != nil { return nil, fmt.Errorf("failed to read participations: %w", err) } @@ -224,7 +227,7 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. } inactivityScores := solid.NewUint64ListSSZ(int(r.cfg.ValidatorRegistryLimit)) // Inactivity - err = r.ReconstructUint64ListDump(tx, slot, kv.InactivityScores, int(slotData.ValidatorLength), inactivityScores) + err = r.ReconstructUint64ListDump(kvGetter, slot, kv.InactivityScores, int(slotData.ValidatorLength), inactivityScores) if err != nil { return nil, fmt.Errorf("failed to read inactivity scores: %w", err) } @@ -232,7 +235,7 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. ret.SetInactivityScoresRaw(inactivityScores) // Sync syncCommitteeSlot := r.cfg.RoundSlotToSyncCommitteePeriod(slot) - currentSyncCommittee, err := state_accessors.ReadCurrentSyncCommittee(tx, syncCommitteeSlot) + currentSyncCommittee, err := state_accessors.ReadCurrentSyncCommittee(kvGetter, syncCommitteeSlot) if err != nil { return nil, fmt.Errorf("failed to read current sync committee: %w", err) } @@ -240,7 +243,7 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. currentSyncCommittee = r.genesisState.CurrentSyncCommittee() } - nextSyncCommittee, err := state_accessors.ReadNextSyncCommittee(tx, syncCommitteeSlot) + nextSyncCommittee, err := state_accessors.ReadNextSyncCommittee(kvGetter, syncCommitteeSlot) if err != nil { return nil, fmt.Errorf("failed to read next sync committee: %w", err) } @@ -277,30 +280,36 @@ func (r *HistoricalStatesReader) ReadHistoricalState(ctx context.Context, tx kv. return ret, nil } -func (r *HistoricalStatesReader) readHistoryHashVector(tx kv.Tx, genesisVector solid.HashVectorSSZ, slot, size uint64, table string, out solid.HashVectorSSZ) (err error) { +func (r *HistoricalStatesReader) readHistoryHashVector(tx kv.Tx, kvGetter state_accessors.GetValFn, genesisVector solid.HashVectorSSZ, slot, size uint64, table string, out solid.HashVectorSSZ) (err error) { var needFromGenesis, inserted uint64 if size > slot || slot-size <= r.genesisState.Slot() { needFromGenesis = size - (slot - r.genesisState.Slot()) } needFromDB := size - needFromGenesis - cursor, err := tx.Cursor(table) + highestAvaiableSlot, err := r.highestSlotInSnapshotsAndDB(tx, table) if err != nil { return err } - defer cursor.Close() + var currKeySlot uint64 - for k, v, err := cursor.Seek(base_encoding.Encode64ToBytes4(slot - needFromDB)); err == nil && k != nil; k, v, err = cursor.Next() { + for i := slot - needFromDB; i <= highestAvaiableSlot; i++ { + key := base_encoding.Encode64ToBytes4(i) + v, err := kvGetter(table, key) + if err != nil { + return err + } if len(v) != 32 { - return fmt.Errorf("invalid key %x", k) + return fmt.Errorf("invalid key %x", key) } - currKeySlot = base_encoding.Decode64FromBytes4(k) + currKeySlot = i out.Set(int(currKeySlot%size), common.BytesToHash(v)) inserted++ if inserted == needFromDB { break } } + for i := 0; i < int(needFromGenesis); i++ { currKeySlot++ out.Set(int(currKeySlot%size), genesisVector.Get(int(currKeySlot%size))) @@ -308,18 +317,8 @@ func (r *HistoricalStatesReader) readHistoryHashVector(tx kv.Tx, genesisVector s return nil } -func (r *HistoricalStatesReader) readEth1DataVotes(tx kv.Tx, eth1DataVotesLength, slot uint64, out *solid.ListSSZ[*cltypes.Eth1Data]) error { +func (r *HistoricalStatesReader) readEth1DataVotes(kvGetter state_accessors.GetValFn, eth1DataVotesLength, slot uint64, out *solid.ListSSZ[*cltypes.Eth1Data]) error { initialSlot := r.cfg.RoundSlotToVotePeriod(slot) - initialKey := base_encoding.Encode64ToBytes4(initialSlot) - cursor, err := tx.Cursor(kv.Eth1DataVotes) - if err != nil { - return err - } - defer cursor.Close() - k, v, err := cursor.Seek(initialKey) - if err != nil { - return err - } if initialSlot <= r.genesisState.Slot() { // We need to prepend the genesis votes for i := 0; i < r.genesisState.Eth1DataVotes().Len(); i++ { @@ -329,24 +328,53 @@ func (r *HistoricalStatesReader) readEth1DataVotes(tx kv.Tx, eth1DataVotesLength endSlot := r.cfg.RoundSlotToVotePeriod(slot + r.cfg.SlotsPerEpoch*r.cfg.EpochsPerEth1VotingPeriod) - for k != nil && base_encoding.Decode64FromBytes4(k) < endSlot { + for i := initialSlot; i < endSlot; i++ { if out.Len() >= int(eth1DataVotesLength) { break } + key := base_encoding.Encode64ToBytes4(i) + v, err := kvGetter(kv.Eth1DataVotes, key) + if err != nil { + return err + } + if len(v) == 0 { + continue + } eth1Data := &cltypes.Eth1Data{} if err := eth1Data.DecodeSSZ(v, 0); err != nil { return err } out.Append(eth1Data) - k, v, err = cursor.Next() - if err != nil { - return err - } } + return nil } -func (r *HistoricalStatesReader) readRandaoMixes(tx kv.Tx, slot uint64, out solid.HashVectorSSZ) error { +func (r *HistoricalStatesReader) highestSlotInSnapshotsAndDB(tx kv.Tx, tbl string) (uint64, error) { + cursor, err := tx.Cursor(tbl) + if err != nil { + return 0, err + } + defer cursor.Close() + k, _, err := cursor.Last() + if err != nil { + return 0, err + } + if k == nil { + if r.stateSn != nil { + return r.stateSn.BlocksAvailable(), nil + } + return 0, nil + } + avaiableInDB := base_encoding.Decode64FromBytes4(k) + var availableInSnapshots uint64 + if r.stateSn != nil { + availableInSnapshots = r.stateSn.BlocksAvailable() + } + return max(avaiableInDB, availableInSnapshots), nil +} + +func (r *HistoricalStatesReader) readRandaoMixes(tx kv.Tx, kvGetter state_accessors.GetValFn, slot uint64, out solid.HashVectorSSZ) error { size := r.cfg.EpochsPerHistoricalVector genesisVector := r.genesisState.RandaoMixes() var needFromGenesis, inserted uint64 @@ -358,17 +386,26 @@ func (r *HistoricalStatesReader) readRandaoMixes(tx kv.Tx, slot uint64, out soli } needFromDB := size - needFromGenesis - cursor, err := tx.Cursor(kv.RandaoMixes) + + highestAvaiableSlot, err := r.highestSlotInSnapshotsAndDB(tx, kv.RandaoMixes) if err != nil { return err } - defer cursor.Close() var currKeyEpoch uint64 - for k, v, err := cursor.Seek(base_encoding.Encode64ToBytes4(roundedSlot - (needFromDB)*r.cfg.SlotsPerEpoch)); err == nil && k != nil; k, v, err = cursor.Next() { + + for i := roundedSlot - (needFromDB)*r.cfg.SlotsPerEpoch; i <= highestAvaiableSlot; i++ { + key := base_encoding.Encode64ToBytes4(i) + v, err := kvGetter(kv.RandaoMixes, key) + if err != nil { + return err + } + if len(v) == 0 { + continue + } if len(v) != 32 { - return fmt.Errorf("invalid key %x", k) + return fmt.Errorf("invalid key %x", key) } - currKeyEpoch = base_encoding.Decode64FromBytes4(k) / r.cfg.SlotsPerEpoch + currKeyEpoch = i / r.cfg.SlotsPerEpoch out.Set(int(currKeyEpoch%size), common.BytesToHash(v)) inserted++ if inserted == needFromDB { @@ -379,8 +416,9 @@ func (r *HistoricalStatesReader) readRandaoMixes(tx kv.Tx, slot uint64, out soli currKeyEpoch++ out.Set(int(currKeyEpoch%size), genesisVector.Get(int(currKeyEpoch%size))) } + // Now we need to read the intra epoch randao mix. - intraRandaoMix, err := tx.GetOne(kv.IntraRandaoMixes, base_encoding.Encode64ToBytes4(slot)) + intraRandaoMix, err := kvGetter(kv.IntraRandaoMixes, base_encoding.Encode64ToBytes4(slot)) if err != nil { return err } @@ -391,7 +429,7 @@ func (r *HistoricalStatesReader) readRandaoMixes(tx kv.Tx, slot uint64, out soli return nil } -func (r *HistoricalStatesReader) reconstructDiffedUint64List(tx kv.Tx, validatorSetLength, slot uint64, diffBucket string, dumpBucket string) ([]byte, error) { +func (r *HistoricalStatesReader) reconstructDiffedUint64List(tx kv.Tx, kvGetter state_accessors.GetValFn, validatorSetLength, slot uint64, diffBucket string, dumpBucket string) ([]byte, error) { // Read the file remainder := slot % clparams.SlotsPerDump freshDumpSlot := slot - remainder @@ -404,12 +442,12 @@ func (r *HistoricalStatesReader) reconstructDiffedUint64List(tx kv.Tx, validator } forward := remainder <= midpoint || currentStageProgress <= freshDumpSlot+clparams.SlotsPerDump if forward { - compressed, err = tx.GetOne(dumpBucket, base_encoding.Encode64ToBytes4(freshDumpSlot)) + compressed, err = kvGetter(dumpBucket, base_encoding.Encode64ToBytes4(freshDumpSlot)) if err != nil { return nil, err } } else { - compressed, err = tx.GetOne(dumpBucket, base_encoding.Encode64ToBytes4(freshDumpSlot+clparams.SlotsPerDump)) + compressed, err = kvGetter(dumpBucket, base_encoding.Encode64ToBytes4(freshDumpSlot+clparams.SlotsPerDump)) if err != nil { return nil, err } @@ -438,43 +476,44 @@ func (r *HistoricalStatesReader) reconstructDiffedUint64List(tx kv.Tx, validator return nil, err } - diffCursor, err := tx.Cursor(diffBucket) + highestSlotAvailable, err := r.highestSlotInSnapshotsAndDB(tx, diffBucket) if err != nil { return nil, err } - defer diffCursor.Close() if forward { - for k, v, err := diffCursor.Seek(base_encoding.Encode64ToBytes4(freshDumpSlot)); err == nil && k != nil && base_encoding.Decode64FromBytes4(k) <= slot; k, v, err = diffCursor.Next() { + for currSlot := freshDumpSlot; currSlot <= slot && currSlot < highestSlotAvailable; currSlot++ { + key := base_encoding.Encode64ToBytes4(currSlot) + v, err := kvGetter(diffBucket, key) if err != nil { return nil, err } - if len(k) != 4 { - return nil, fmt.Errorf("invalid key %x", k) + if len(v) == 0 { + continue + } + if len(key) != 4 { + return nil, fmt.Errorf("invalid key %x", key) } - currSlot := base_encoding.Decode64FromBytes4(k) if currSlot == freshDumpSlot { continue } - if currSlot > slot { - return nil, fmt.Errorf("diff not found for slot %d", slot) - } currentList, err = base_encoding.ApplyCompressedSerializedUint64ListDiff(currentList, currentList, v, false) if err != nil { return nil, err } } } else { - for k, v, err := diffCursor.Seek(base_encoding.Encode64ToBytes4(freshDumpSlot + clparams.SlotsPerDump)); err == nil && k != nil && base_encoding.Decode64FromBytes4(k) > slot; k, v, err = diffCursor.Prev() { + for currSlot := freshDumpSlot + clparams.SlotsPerDump; currSlot > slot && currSlot > r.genesisState.Slot(); currSlot-- { + key := base_encoding.Encode64ToBytes4(currSlot) + v, err := kvGetter(diffBucket, key) if err != nil { return nil, err } - if len(k) != 4 { - return nil, fmt.Errorf("invalid key %x", k) - } - currSlot := base_encoding.Decode64FromBytes4(k) - if currSlot <= slot || currSlot > freshDumpSlot+clparams.SlotsPerDump { + if len(v) == 0 { continue } + if len(key) != 4 { + return nil, fmt.Errorf("invalid key %x", key) + } currentList, err = base_encoding.ApplyCompressedSerializedUint64ListDiff(currentList, currentList, v, true) if err != nil { return nil, err @@ -485,7 +524,7 @@ func (r *HistoricalStatesReader) reconstructDiffedUint64List(tx kv.Tx, validator return currentList, err } -func (r *HistoricalStatesReader) reconstructBalances(tx kv.Tx, validatorSetLength, slot uint64, diffBucket, dumpBucket string) ([]byte, error) { +func (r *HistoricalStatesReader) reconstructBalances(tx kv.Tx, kvGetter state_accessors.GetValFn, validatorSetLength, slot uint64, diffBucket, dumpBucket string) ([]byte, error) { remainder := slot % clparams.SlotsPerDump freshDumpSlot := slot - remainder @@ -501,12 +540,12 @@ func (r *HistoricalStatesReader) reconstructBalances(tx kv.Tx, validatorSetLengt midpoint := uint64(clparams.SlotsPerDump / 2) forward := remainder <= midpoint || currentStageProgress <= freshDumpSlot+clparams.SlotsPerDump if forward { - compressed, err = tx.GetOne(dumpBucket, base_encoding.Encode64ToBytes4(freshDumpSlot)) + compressed, err = kvGetter(dumpBucket, base_encoding.Encode64ToBytes4(freshDumpSlot)) if err != nil { return nil, err } } else { - compressed, err = tx.GetOne(dumpBucket, base_encoding.Encode64ToBytes4(freshDumpSlot+clparams.SlotsPerDump)) + compressed, err = kvGetter(dumpBucket, base_encoding.Encode64ToBytes4(freshDumpSlot+clparams.SlotsPerDump)) if err != nil { return nil, err } @@ -535,7 +574,7 @@ func (r *HistoricalStatesReader) reconstructBalances(tx kv.Tx, validatorSetLengt if i == freshDumpSlot { continue } - diff, err := tx.GetOne(diffBucket, base_encoding.Encode64ToBytes4(i)) + diff, err := kvGetter(diffBucket, base_encoding.Encode64ToBytes4(i)) if err != nil { return nil, err } @@ -549,7 +588,7 @@ func (r *HistoricalStatesReader) reconstructBalances(tx kv.Tx, validatorSetLengt } } else { for i := freshDumpSlot + clparams.SlotsPerDump; i > roundedSlot; i -= r.cfg.SlotsPerEpoch { - diff, err := tx.GetOne(diffBucket, base_encoding.Encode64ToBytes4(i)) + diff, err := kvGetter(diffBucket, base_encoding.Encode64ToBytes4(i)) if err != nil { return nil, err } @@ -563,17 +602,12 @@ func (r *HistoricalStatesReader) reconstructBalances(tx kv.Tx, validatorSetLengt } } - diffCursor, err := tx.Cursor(diffBucket) - if err != nil { - return nil, err - } - defer diffCursor.Close() if slot%r.cfg.SlotsPerEpoch == 0 { currentList = currentList[:validatorSetLength*8] return currentList, nil } - slotDiff, err := tx.GetOne(diffBucket, base_encoding.Encode64ToBytes4(slot)) + slotDiff, err := kvGetter(diffBucket, base_encoding.Encode64ToBytes4(slot)) if err != nil { return nil, err } @@ -585,27 +619,24 @@ func (r *HistoricalStatesReader) reconstructBalances(tx kv.Tx, validatorSetLengt return base_encoding.ApplyCompressedSerializedUint64ListDiff(currentList, currentList, slotDiff, false) } -func (r *HistoricalStatesReader) ReconstructUint64ListDump(tx kv.Tx, slot uint64, bkt string, size int, out solid.Uint64ListSSZ) error { - diffCursor, err := tx.Cursor(bkt) - if err != nil { - return err - } - defer diffCursor.Close() - - k, v, err := diffCursor.Seek(base_encoding.Encode64ToBytes4(slot)) - if err != nil { - return err - } - if k == nil { - return fmt.Errorf("diff not found for slot %d", slot) - } - keySlot := base_encoding.Decode64FromBytes4(k) - if keySlot > slot { - _, v, err = diffCursor.Prev() +func (r *HistoricalStatesReader) ReconstructUint64ListDump(kvGetter state_accessors.GetValFn, slot uint64, bkt string, size int, out solid.Uint64ListSSZ) error { + var ( + v []byte + err error + ) + // Try seeking <= to slot + for i := slot; i >= r.genesisState.Slot(); i-- { + key := base_encoding.Encode64ToBytes4(i) + v, err = kvGetter(bkt, key) if err != nil { return err } + if len(v) == 0 { + continue + } + break } + var b bytes.Buffer if _, err := b.Write(v); err != nil { return err @@ -625,9 +656,9 @@ func (r *HistoricalStatesReader) ReconstructUint64ListDump(tx kv.Tx, slot uint64 return out.DecodeSSZ(currentList, 0) } -func (r *HistoricalStatesReader) ReadValidatorsForHistoricalState(tx kv.Tx, slot uint64) (*solid.ValidatorSet, error) { +func (r *HistoricalStatesReader) ReadValidatorsForHistoricalState(tx kv.Tx, kvGetter state_accessors.GetValFn, slot uint64) (*solid.ValidatorSet, error) { // Read the minimal beacon state which have the small fields. - sd, err := state_accessors.ReadSlotData(tx, slot) + sd, err := state_accessors.ReadSlotData(kvGetter, slot) if err != nil { return nil, err } @@ -648,7 +679,7 @@ func (r *HistoricalStatesReader) ReadValidatorsForHistoricalState(tx kv.Tx, slot }) // Read the balances - bytesEffectiveBalances, err := r.reconstructDiffedUint64List(tx, validatorSetLength, slot, kv.ValidatorEffectiveBalance, kv.EffectiveBalancesDump) + bytesEffectiveBalances, err := r.reconstructDiffedUint64List(tx, kvGetter, validatorSetLength, slot, kv.ValidatorEffectiveBalance, kv.EffectiveBalancesDump) if err != nil { return nil, err } @@ -711,12 +742,12 @@ func (r *HistoricalStatesReader) readPendingEpochs(tx kv.Tx, slot uint64) (*soli } // readParticipations shuffles active indicies and returns the participation flags for the given epoch. -func (r *HistoricalStatesReader) ReadParticipations(tx kv.Tx, slot uint64) (*solid.ParticipationBitList, *solid.ParticipationBitList, error) { +func (r *HistoricalStatesReader) ReadParticipations(tx kv.Tx, kvGetter state_accessors.GetValFn, slot uint64) (*solid.ParticipationBitList, *solid.ParticipationBitList, error) { var beginSlot uint64 epoch, prevEpoch := r.computeRelevantEpochs(slot) beginSlot = prevEpoch * r.cfg.SlotsPerEpoch - currentActiveIndicies, err := state_accessors.ReadActiveIndicies(tx, epoch*r.cfg.SlotsPerEpoch) + currentActiveIndicies, err := state_accessors.ReadActiveIndicies(kvGetter, epoch*r.cfg.SlotsPerEpoch) if err != nil { return nil, nil, err } @@ -724,14 +755,14 @@ func (r *HistoricalStatesReader) ReadParticipations(tx kv.Tx, slot uint64) (*sol if epoch == 0 { previousActiveIndicies = currentActiveIndicies } else { - previousActiveIndicies, err = state_accessors.ReadActiveIndicies(tx, (epoch-1)*r.cfg.SlotsPerEpoch) + previousActiveIndicies, err = state_accessors.ReadActiveIndicies(kvGetter, (epoch-1)*r.cfg.SlotsPerEpoch) if err != nil { return nil, nil, err } } // Read the minimal beacon state which have the small fields. - sd, err := state_accessors.ReadSlotData(tx, slot) + sd, err := state_accessors.ReadSlotData(kvGetter, slot) if err != nil { return nil, nil, err } @@ -746,10 +777,7 @@ func (r *HistoricalStatesReader) ReadParticipations(tx kv.Tx, slot uint64) (*sol if err != nil { return nil, nil, err } - // trigger the cache for shuffled sets in parallel - if err := r.tryCachingEpochsInParallell(tx, [][]uint64{currentActiveIndicies, previousActiveIndicies}, []uint64{epoch, prevEpoch}); err != nil { - return nil, nil, err - } + // Read the previous idxs for i := beginSlot; i <= slot; i++ { // Read the block @@ -784,7 +812,7 @@ func (r *HistoricalStatesReader) ReadParticipations(tx kv.Tx, slot uint64) (*sol attestationEpoch := data.Slot / r.cfg.SlotsPerEpoch mixPosition := (attestationEpoch + r.cfg.EpochsPerHistoricalVector - r.cfg.MinSeedLookahead - 1) % r.cfg.EpochsPerHistoricalVector - mix, err := r.ReadRandaoMixBySlotAndIndex(tx, data.Slot, mixPosition) + mix, err := r.ReadRandaoMixBySlotAndIndex(tx, kvGetter, data.Slot, mixPosition) if err != nil { return false } @@ -795,7 +823,7 @@ func (r *HistoricalStatesReader) ReadParticipations(tx kv.Tx, slot uint64) (*sol return false } var participationFlagsIndicies []uint8 - participationFlagsIndicies, err = r.getAttestationParticipationFlagIndicies(tx, block.Version(), i, *data, i-data.Slot, true) + participationFlagsIndicies, err = r.getAttestationParticipationFlagIndicies(tx, kvGetter, block.Version(), i, *data, i-data.Slot, true) if err != nil { return false } @@ -836,12 +864,12 @@ func (r *HistoricalStatesReader) computeRelevantEpochs(slot uint64) (uint64, uin return epoch, epoch - 1 } -func (r *HistoricalStatesReader) tryCachingEpochsInParallell(tx kv.Tx, activeIdxs [][]uint64, epochs []uint64) error { +func (r *HistoricalStatesReader) tryCachingEpochsInParallell(tx kv.Tx, kvGetter state_accessors.GetValFn, activeIdxs [][]uint64, epochs []uint64) error { var wg sync.WaitGroup wg.Add(len(epochs)) for i, epoch := range epochs { mixPosition := (epoch + r.cfg.EpochsPerHistoricalVector - r.cfg.MinSeedLookahead - 1) % r.cfg.EpochsPerHistoricalVector - mix, err := r.ReadRandaoMixBySlotAndIndex(tx, epochs[0]*r.cfg.SlotsPerEpoch, mixPosition) + mix, err := r.ReadRandaoMixBySlotAndIndex(tx, kvGetter, epochs[0]*r.cfg.SlotsPerEpoch, mixPosition) if err != nil { return err } @@ -856,8 +884,8 @@ func (r *HistoricalStatesReader) tryCachingEpochsInParallell(tx kv.Tx, activeIdx return nil } -func (r *HistoricalStatesReader) ReadValidatorsBalances(tx kv.Tx, slot uint64) (solid.Uint64ListSSZ, error) { - sd, err := state_accessors.ReadSlotData(tx, slot) +func (r *HistoricalStatesReader) ReadValidatorsBalances(tx kv.Tx, kvGetter state_accessors.GetValFn, slot uint64) (solid.Uint64ListSSZ, error) { + sd, err := state_accessors.ReadSlotData(kvGetter, slot) if err != nil { return nil, err } @@ -866,7 +894,7 @@ func (r *HistoricalStatesReader) ReadValidatorsBalances(tx kv.Tx, slot uint64) ( return nil, nil } - balances, err := r.reconstructBalances(tx, sd.ValidatorLength, slot, kv.ValidatorBalance, kv.BalancesDump) + balances, err := r.reconstructBalances(tx, kvGetter, sd.ValidatorLength, slot, kv.ValidatorBalance, kv.BalancesDump) if err != nil { return nil, err } @@ -875,11 +903,11 @@ func (r *HistoricalStatesReader) ReadValidatorsBalances(tx kv.Tx, slot uint64) ( return balancesList, balancesList.DecodeSSZ(balances, 0) } -func (r *HistoricalStatesReader) ReadRandaoMixBySlotAndIndex(tx kv.Tx, slot, index uint64) (common.Hash, error) { +func (r *HistoricalStatesReader) ReadRandaoMixBySlotAndIndex(tx kv.Tx, kvGetter state_accessors.GetValFn, slot, index uint64) (common.Hash, error) { epoch := slot / r.cfg.SlotsPerEpoch epochSubIndex := epoch % r.cfg.EpochsPerHistoricalVector if index == epochSubIndex { - intraRandaoMix, err := tx.GetOne(kv.IntraRandaoMixes, base_encoding.Encode64ToBytes4(slot)) + intraRandaoMix, err := kvGetter(kv.IntraRandaoMixes, base_encoding.Encode64ToBytes4(slot)) if err != nil { return common.Hash{}, err } @@ -908,7 +936,7 @@ func (r *HistoricalStatesReader) ReadRandaoMixBySlotAndIndex(tx kv.Tx, slot, ind if needFromGenesis { return r.genesisState.GetRandaoMixes(epoch), nil } - mixBytes, err := tx.GetOne(kv.RandaoMixes, base_encoding.Encode64ToBytes4(epochLookup*r.cfg.SlotsPerEpoch)) + mixBytes, err := kvGetter(kv.RandaoMixes, base_encoding.Encode64ToBytes4(epochLookup*r.cfg.SlotsPerEpoch)) if err != nil { return common.Hash{}, err } diff --git a/cl/persistence/state/historical_states_reader/historical_states_reader_test.go b/cl/persistence/state/historical_states_reader/historical_states_reader_test.go index 38151494504..290239143bd 100644 --- a/cl/persistence/state/historical_states_reader/historical_states_reader_test.go +++ b/cl/persistence/state/historical_states_reader/historical_states_reader_test.go @@ -41,7 +41,7 @@ func runTest(t *testing.T, blocks []*cltypes.SignedBeaconBlock, preState, postSt ctx := context.Background() vt := state_accessors.NewStaticValidatorTable() - a := antiquary.NewAntiquary(ctx, nil, preState, vt, &clparams.MainnetBeaconConfig, datadir.New("/tmp"), nil, db, nil, reader, log.New(), true, true, true, false, nil) + a := antiquary.NewAntiquary(ctx, nil, preState, vt, &clparams.MainnetBeaconConfig, datadir.New("/tmp"), nil, db, nil, nil, reader, log.New(), true, true, true, false, nil) require.NoError(t, a.IncrementBeaconState(ctx, blocks[len(blocks)-1].Block.Slot+33)) // Now lets test it against the reader tx, err := db.BeginRw(ctx) @@ -50,7 +50,7 @@ func runTest(t *testing.T, blocks []*cltypes.SignedBeaconBlock, preState, postSt vt = state_accessors.NewStaticValidatorTable() require.NoError(t, state_accessors.ReadValidatorsTable(tx, vt)) - hr := historical_states_reader.NewHistoricalStatesReader(&clparams.MainnetBeaconConfig, reader, vt, preState) + hr := historical_states_reader.NewHistoricalStatesReader(&clparams.MainnetBeaconConfig, reader, vt, preState, nil) s, err := hr.ReadHistoricalState(ctx, tx, blocks[len(blocks)-1].Block.Slot) require.NoError(t, err) diff --git a/cl/persistence/state/state_accessors.go b/cl/persistence/state/state_accessors.go index 6e9fdf7b6e6..6d4ca4d6c10 100644 --- a/cl/persistence/state/state_accessors.go +++ b/cl/persistence/state/state_accessors.go @@ -18,16 +18,33 @@ package state_accessors import ( "bytes" + "encoding/binary" "github.com/erigontech/erigon-lib/kv" "github.com/erigontech/erigon/cl/cltypes" "github.com/erigontech/erigon/cl/cltypes/solid" "github.com/erigontech/erigon/cl/persistence/base_encoding" "github.com/erigontech/erigon/cl/phase1/core/state" + "github.com/erigontech/erigon/turbo/snapshotsync/freezeblocks" libcommon "github.com/erigontech/erigon-lib/common" ) +type GetValFn func(table string, key []byte) ([]byte, error) + +func GetValFnTxAndSnapshot(tx kv.Tx, snapshotRoTx *freezeblocks.CaplinStateView) GetValFn { + return func(table string, key []byte) ([]byte, error) { + if snapshotRoTx != nil { + slot := uint64(binary.BigEndian.Uint32(key)) + segment, ok := snapshotRoTx.VisibleSegment(slot, table) + if ok { + return segment.Get(slot) + } + } + return tx.GetOne(table, key) + } +} + // InitializeValidatorTable initializes the validator table in the database. func InitializeStaticTables(tx kv.RwTx, state *state.CachingBeaconState) error { var err error @@ -164,9 +181,9 @@ func SetStateProcessingProgress(tx kv.RwTx, progress uint64) error { return tx.Put(kv.StatesProcessingProgress, kv.StatesProcessingKey, base_encoding.Encode64ToBytes4(progress)) } -func ReadSlotData(tx kv.Tx, slot uint64) (*SlotData, error) { +func ReadSlotData(getFn GetValFn, slot uint64) (*SlotData, error) { sd := &SlotData{} - v, err := tx.GetOne(kv.SlotData, base_encoding.Encode64ToBytes4(slot)) + v, err := getFn(kv.SlotData, base_encoding.Encode64ToBytes4(slot)) if err != nil { return nil, err } @@ -178,9 +195,9 @@ func ReadSlotData(tx kv.Tx, slot uint64) (*SlotData, error) { return sd, sd.ReadFrom(buf) } -func ReadEpochData(tx kv.Tx, slot uint64) (*EpochData, error) { +func ReadEpochData(getFn GetValFn, slot uint64) (*EpochData, error) { ed := &EpochData{} - v, err := tx.GetOne(kv.EpochData, base_encoding.Encode64ToBytes4(slot)) + v, err := getFn(kv.EpochData, base_encoding.Encode64ToBytes4(slot)) if err != nil { return nil, err } @@ -193,10 +210,10 @@ func ReadEpochData(tx kv.Tx, slot uint64) (*EpochData, error) { } // ReadCheckpoints reads the checkpoints from the database, Current, Previous and Finalized -func ReadCheckpoints(tx kv.Tx, slot uint64) (current solid.Checkpoint, previous solid.Checkpoint, finalized solid.Checkpoint, ok bool, err error) { +func ReadCheckpoints(getFn GetValFn, slot uint64) (current solid.Checkpoint, previous solid.Checkpoint, finalized solid.Checkpoint, ok bool, err error) { ed := &EpochData{} var v []byte - v, err = tx.GetOne(kv.EpochData, base_encoding.Encode64ToBytes4(slot)) + v, err = getFn(kv.EpochData, base_encoding.Encode64ToBytes4(slot)) if err != nil { return } @@ -212,8 +229,8 @@ func ReadCheckpoints(tx kv.Tx, slot uint64) (current solid.Checkpoint, previous } // ReadCheckpoints reads the checkpoints from the database, Current, Previous and Finalized -func ReadNextSyncCommittee(tx kv.Tx, slot uint64) (committee *solid.SyncCommittee, err error) { - v, err := tx.GetOne(kv.NextSyncCommittee, base_encoding.Encode64ToBytes4(slot)) +func ReadNextSyncCommittee(getFn GetValFn, slot uint64) (committee *solid.SyncCommittee, err error) { + v, err := getFn(kv.NextSyncCommittee, base_encoding.Encode64ToBytes4(slot)) if err != nil { return nil, err } @@ -226,8 +243,8 @@ func ReadNextSyncCommittee(tx kv.Tx, slot uint64) (committee *solid.SyncCommitte } // ReadCheckpoints reads the checkpoints from the database, Current, Previous and Finalized -func ReadCurrentSyncCommittee(tx kv.Tx, slot uint64) (committee *solid.SyncCommittee, err error) { - v, err := tx.GetOne(kv.CurrentSyncCommittee, base_encoding.Encode64ToBytes4(slot)) +func ReadCurrentSyncCommittee(getFn GetValFn, slot uint64) (committee *solid.SyncCommittee, err error) { + v, err := getFn(kv.CurrentSyncCommittee, base_encoding.Encode64ToBytes4(slot)) if err != nil { return nil, err } @@ -301,9 +318,9 @@ func ReadValidatorsTable(tx kv.Tx, out *StaticValidatorTable) error { return err } -func ReadActiveIndicies(tx kv.Tx, slot uint64) ([]uint64, error) { +func ReadActiveIndicies(getFn GetValFn, slot uint64) ([]uint64, error) { key := base_encoding.Encode64ToBytes4(slot) - v, err := tx.GetOne(kv.ActiveValidatorIndicies, key) + v, err := getFn(kv.ActiveValidatorIndicies, key) if err != nil { return nil, err } diff --git a/cl/persistence/state/validator_events.go b/cl/persistence/state/validator_events.go index 3c5747c8166..b6c8e83d9ed 100644 --- a/cl/persistence/state/validator_events.go +++ b/cl/persistence/state/validator_events.go @@ -19,6 +19,7 @@ package state_accessors import ( "encoding/binary" "errors" + "sync" libcommon "github.com/erigontech/erigon-lib/common" "github.com/erigontech/erigon/cl/cltypes/solid" @@ -42,49 +43,68 @@ const ( type StateEvents struct { buf []byte + mu sync.Mutex } func NewStateEvents() *StateEvents { return &StateEvents{} } +func NewStateEventsFromBytes(buf []byte) *StateEvents { + return &StateEvents{buf: libcommon.Copy(buf)} +} + func (se *StateEvents) AddValidator(validatorIndex uint64, validator solid.Validator) { + se.mu.Lock() + defer se.mu.Unlock() se.buf = append(se.buf, byte(addValidator)) se.buf = binary.BigEndian.AppendUint64(se.buf, validatorIndex) se.buf = append(se.buf, validator...) } func (se *StateEvents) ChangeExitEpoch(validatorIndex uint64, exitEpoch uint64) { + se.mu.Lock() + defer se.mu.Unlock() se.buf = append(se.buf, byte(changeExitEpoch)) se.buf = binary.BigEndian.AppendUint64(se.buf, validatorIndex) se.buf = binary.BigEndian.AppendUint64(se.buf, exitEpoch) } func (se *StateEvents) ChangeWithdrawableEpoch(validatorIndex uint64, withdrawableEpoch uint64) { + se.mu.Lock() + defer se.mu.Unlock() se.buf = append(se.buf, byte(changeWithdrawableEpoch)) se.buf = binary.BigEndian.AppendUint64(se.buf, validatorIndex) se.buf = binary.BigEndian.AppendUint64(se.buf, withdrawableEpoch) } func (se *StateEvents) ChangeWithdrawalCredentials(validatorIndex uint64, withdrawalCredentials libcommon.Hash) { + se.mu.Lock() + defer se.mu.Unlock() se.buf = append(se.buf, byte(changeWithdrawalCredentials)) se.buf = binary.BigEndian.AppendUint64(se.buf, validatorIndex) se.buf = append(se.buf, withdrawalCredentials[:]...) } func (se *StateEvents) ChangeActivationEpoch(validatorIndex uint64, activationEpoch uint64) { + se.mu.Lock() + defer se.mu.Unlock() se.buf = append(se.buf, byte(changeActivationEpoch)) se.buf = binary.BigEndian.AppendUint64(se.buf, validatorIndex) se.buf = binary.BigEndian.AppendUint64(se.buf, activationEpoch) } func (se *StateEvents) ChangeActivationEligibilityEpoch(validatorIndex uint64, activationEligibilityEpoch uint64) { + se.mu.Lock() + defer se.mu.Unlock() se.buf = append(se.buf, byte(changeActivationEligibilityEpoch)) se.buf = binary.BigEndian.AppendUint64(se.buf, validatorIndex) se.buf = binary.BigEndian.AppendUint64(se.buf, activationEligibilityEpoch) } func (se *StateEvents) ChangeSlashed(validatorIndex uint64, slashed bool) { + se.mu.Lock() + defer se.mu.Unlock() se.buf = append(se.buf, byte(changeSlashed)) se.buf = binary.BigEndian.AppendUint64(se.buf, validatorIndex) se.buf = append(se.buf, byte(0)) @@ -94,10 +114,14 @@ func (se *StateEvents) ChangeSlashed(validatorIndex uint64, slashed bool) { } func (se *StateEvents) CopyBytes() []byte { + se.mu.Lock() + defer se.mu.Unlock() return libcommon.Copy(se.buf) } func (se *StateEvents) Reset() { + se.mu.Lock() + defer se.mu.Unlock() se.buf = se.buf[:0] } diff --git a/cl/sentinel/sentinel_requests_test.go b/cl/sentinel/sentinel_requests_test.go index cfd0fa65cd4..3fe6d03050d 100644 --- a/cl/sentinel/sentinel_requests_test.go +++ b/cl/sentinel/sentinel_requests_test.go @@ -55,7 +55,7 @@ func loadChain(t *testing.T) (db kv.RwDB, blocks []*cltypes.SignedBeaconBlock, f ctx := context.Background() vt := state_accessors.NewStaticValidatorTable() - a := antiquary.NewAntiquary(ctx, nil, preState, vt, &clparams.MainnetBeaconConfig, datadir.New("/tmp"), nil, db, nil, reader, log.New(), true, true, false, false, nil) + a := antiquary.NewAntiquary(ctx, nil, preState, vt, &clparams.MainnetBeaconConfig, datadir.New("/tmp"), nil, db, nil, nil, reader, log.New(), true, true, false, false, nil) require.NoError(t, a.IncrementBeaconState(ctx, blocks[len(blocks)-1].Block.Slot+33)) return } diff --git a/cmd/capcli/cli.go b/cmd/capcli/cli.go index 86e1e809841..948503f3253 100644 --- a/cmd/capcli/cli.go +++ b/cmd/capcli/cli.go @@ -27,6 +27,7 @@ import ( "net/http" "net/url" "os" + "runtime" "strconv" "strings" "time" @@ -80,6 +81,7 @@ var CLI struct { CheckBlobsSnapshots CheckBlobsSnapshots `cmd:"" help:"check blobs snapshots"` CheckBlobsSnapshotsCount CheckBlobsSnapshotsCount `cmd:"" help:"check blobs snapshots count"` DumpBlobsSnapshotsToStore DumpBlobsSnapshotsToStore `cmd:"" help:"dump blobs snapshots to store"` + DumpStateSnapshots DumpStateSnapshots `cmd:"" help:"dump state snapshots"` } type chainCfg struct { @@ -178,7 +180,7 @@ func (c *Chain) Run(ctx *Context) error { } downloader := network.NewBackwardBeaconDownloader(ctx, beacon, nil, nil, db) - cfg := stages.StageHistoryReconstruction(downloader, antiquary.NewAntiquary(ctx, nil, nil, nil, nil, dirs, nil, nil, nil, nil, nil, false, false, false, false, nil), csn, db, nil, beaconConfig, true, false, true, bRoot, bs.Slot(), "/tmp", 300*time.Millisecond, nil, nil, blobStorage, log.Root()) + cfg := stages.StageHistoryReconstruction(downloader, antiquary.NewAntiquary(ctx, nil, nil, nil, nil, dirs, nil, nil, nil, nil, nil, nil, false, false, false, false, nil), csn, db, nil, beaconConfig, true, false, true, bRoot, bs.Slot(), "/tmp", 300*time.Millisecond, nil, nil, blobStorage, log.Root()) return stages.SpawnStageHistoryDownload(cfg, ctx, log.Root()) } @@ -534,6 +536,7 @@ func (c *LoopSnapshots) Run(ctx *Context) error { type RetrieveHistoricalState struct { chainCfg outputFolder + withPPROF CompareFile string `help:"compare file" default:""` CompareSlot uint64 `help:"compare slot" default:"0"` Out string `help:"output file" default:""` @@ -580,7 +583,17 @@ func (r *RetrieveHistoricalState) Run(ctx *Context) error { return err } - hr := historical_states_reader.NewHistoricalStatesReader(beaconConfig, snr, vt, gSpot) + snTypes := freezeblocks.MakeCaplinStateSnapshotsTypes(db) + stateSn := freezeblocks.NewCaplinStateSnapshots(ethconfig.BlocksFreezing{}, beaconConfig, dirs, snTypes, log.Root()) + if err := stateSn.OpenFolder(); err != nil { + return err + } + if _, err := antiquary.FillStaticValidatorsTableIfNeeded(ctx, log.Root(), stateSn, vt); err != nil { + return err + } + fmt.Println(vt.WithdrawableEpoch(0, 1)) + r.withPPROF.withProfile() + hr := historical_states_reader.NewHistoricalStatesReader(beaconConfig, snr, vt, gSpot, stateSn) start := time.Now() haveState, err := hr.ReadHistoricalState(ctx, tx, r.CompareSlot) if err != nil { @@ -636,11 +649,11 @@ func (r *RetrieveHistoricalState) Run(ctx *Context) error { return err } if hRoot != wRoot { - // for i := 0; i < haveState.PreviousEpochParticipation().Length(); i++ { - // if haveState.PreviousEpochParticipation().Get(i) != wantState.PreviousEpochParticipation().Get(i) { - // log.Info("Participation mismatch", "index", i, "have", haveState.PreviousEpochParticipation().Get(i), "want", wantState.PreviousEpochParticipation().Get(i)) - // } - // } + for i := 0; i < haveState.PreviousEpochParticipation().Length(); i++ { + if haveState.BlockRoots().Get(i) != wantState.BlockRoots().Get(i) { + log.Info("block roots mismatch", "index", i, "have", haveState.BlockRoots().Get(i), "want", wantState.BlockRoots().Get(i)) + } + } return fmt.Errorf("state mismatch: got %s, want %s", libcommon.Hash(hRoot), libcommon.Hash(wRoot)) } return nil @@ -1172,3 +1185,60 @@ func (c *DumpBlobsSnapshotsToStore) Run(ctx *Context) error { return nil } + +type DumpStateSnapshots struct { + chainCfg + outputFolder + To uint64 `name:"to" help:"slot to dump"` + StepSize uint64 `name:"step-size" help:"step size" default:"10000"` +} + +func (c *DumpStateSnapshots) Run(ctx *Context) error { + _, beaconConfig, _, err := clparams.GetConfigsByNetworkName(c.Chain) + if err != nil { + return err + } + log.Root().SetHandler(log.LvlFilterHandler(log.LvlDebug, log.StderrHandler)) + log.Info("Started chain download", "chain", c.Chain) + + dirs := datadir.New(c.Datadir) + log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StderrHandler)) + + db, _, err := caplin1.OpenCaplinDatabase(ctx, beaconConfig, nil, dirs.CaplinIndexing, dirs.CaplinBlobs, nil, false, 0) + if err != nil { + return err + } + var to uint64 + db.View(ctx, func(tx kv.Tx) (err error) { + if c.To == 0 { + to, err = state_accessors.GetStateProcessingProgress(tx) + return + } + to = c.To + return + }) + + salt, err := snaptype.GetIndexSalt(dirs.Snap) + + if err != nil { + return err + } + snTypes := freezeblocks.MakeCaplinStateSnapshotsTypes(db) + stateSn := freezeblocks.NewCaplinStateSnapshots(ethconfig.BlocksFreezing{}, beaconConfig, dirs, snTypes, log.Root()) + if err := stateSn.OpenFolder(); err != nil { + return err + } + r, _ := stateSn.Get(kv.BlockRoot, 999424) + fmt.Printf("%x\n", r) + + if err := stateSn.DumpCaplinState(ctx, stateSn.BlocksAvailable(), to, c.StepSize, salt, dirs, runtime.NumCPU(), log.LvlInfo, log.Root()); err != nil { + return err + } + if err := stateSn.OpenFolder(); err != nil { + return err + } + r, _ = stateSn.Get(kv.BlockRoot, 999424) + fmt.Printf("%x\n", r) + + return nil +} diff --git a/cmd/caplin/caplin1/run.go b/cmd/caplin/caplin1/run.go index fa3a2a62f66..46063885805 100644 --- a/cmd/caplin/caplin1/run.go +++ b/cmd/caplin/caplin1/run.go @@ -379,8 +379,11 @@ func RunCaplinService(ctx context.Context, engine execution_client.ExecutionEngi return err } } - - antiq := antiquary.NewAntiquary(ctx, blobStorage, genesisState, vTables, beaconConfig, dirs, snDownloader, indexDB, csn, rcsn, logger, states, backfilling, blobBackfilling, config.SnapshotGenerationEnabled, snBuildSema) + stateSnapshots := freezeblocks.NewCaplinStateSnapshots(ethconfig.BlocksFreezing{}, beaconConfig, dirs, freezeblocks.MakeCaplinStateSnapshotsTypes(indexDB), logger) + if err := stateSnapshots.OpenFolder(); err != nil { + return err + } + antiq := antiquary.NewAntiquary(ctx, blobStorage, genesisState, vTables, beaconConfig, dirs, snDownloader, indexDB, stateSnapshots, csn, rcsn, logger, states, backfilling, blobBackfilling, config.SnapshotGenerationEnabled, snBuildSema) // Create the antiquary go func() { if err := antiq.Loop(); err != nil { @@ -392,7 +395,7 @@ func RunCaplinService(ctx context.Context, engine execution_client.ExecutionEngi return err } - statesReader := historical_states_reader.NewHistoricalStatesReader(beaconConfig, rcsn, vTables, genesisState) + statesReader := historical_states_reader.NewHistoricalStatesReader(beaconConfig, rcsn, vTables, genesisState, stateSnapshots) validatorParameters := validator_params.NewValidatorParams() if config.BeaconAPIRouter.Active { apiHandler := handler.NewApiHandler( @@ -427,6 +430,7 @@ func RunCaplinService(ctx context.Context, engine execution_client.ExecutionEngi proposerSlashingService, option.builderClient, validatorMonitor, + stateSnapshots, ) go beacon.ListenAndServe(&beacon.LayeredBeaconHandler{ ArchiveApi: apiHandler, diff --git a/erigon-lib/common/datadir/dirs.go b/erigon-lib/common/datadir/dirs.go index 266625088f2..3a654eead38 100644 --- a/erigon-lib/common/datadir/dirs.go +++ b/erigon-lib/common/datadir/dirs.go @@ -42,6 +42,7 @@ type Dirs struct { SnapHistory string SnapDomain string SnapAccessors string + SnapCaplin string Downloader string TxPool string Nodes string @@ -79,6 +80,7 @@ func New(datadir string) Dirs { CaplinIndexing: filepath.Join(datadir, "caplin", "indexing"), CaplinLatest: filepath.Join(datadir, "caplin", "latest"), CaplinGenesis: filepath.Join(datadir, "caplin", "genesis"), + SnapCaplin: filepath.Join(datadir, "snapshots", "caplin"), } dir.MustExist(dirs.Chaindata, dirs.Tmp, diff --git a/erigon-lib/downloader/snaptype/files.go b/erigon-lib/downloader/snaptype/files.go index 26cebf23e70..4ac92207dc2 100644 --- a/erigon-lib/downloader/snaptype/files.go +++ b/erigon-lib/downloader/snaptype/files.go @@ -152,6 +152,8 @@ func parseFileName(dir, fileName string) (res FileInfo, ok bool) { return } res.To = to * 1_000 + res.TypeString = parts[3] + res.Type, ok = ParseFileType(parts[3]) if !ok { return res, ok @@ -243,6 +245,7 @@ type FileInfo struct { From, To uint64 name, Path, Ext string Type Type + TypeString string // This is for giulio's generic snapshots } func (f FileInfo) TorrentFileExists() (bool, error) { return dir.FileExist(f.Path + ".torrent") } diff --git a/erigon-lib/downloader/snaptype/type.go b/erigon-lib/downloader/snaptype/type.go index 966a67ddf78..cfaff822251 100644 --- a/erigon-lib/downloader/snaptype/type.go +++ b/erigon-lib/downloader/snaptype/type.go @@ -469,6 +469,67 @@ func BuildIndex(ctx context.Context, info FileInfo, cfg recsplit.RecSplitArgs, l } } +func BuildIndexWithSnapName(ctx context.Context, info FileInfo, cfg recsplit.RecSplitArgs, lvl log.Lvl, p *background.Progress, walker func(idx *recsplit.RecSplit, i, offset uint64, word []byte) error, logger log.Logger) (err error) { + defer func() { + if rec := recover(); rec != nil { + err = fmt.Errorf("index panic: at=%s, %v, %s", info.Name(), rec, dbg.Stack()) + } + }() + + d, err := seg.NewDecompressor(info.Path) + if err != nil { + return fmt.Errorf("can't open %s for indexing: %w", info.Name(), err) + } + defer d.Close() + + if p != nil { + fname := info.Name() + p.Name.Store(&fname) + p.Total.Store(uint64(d.Count())) + } + cfg.KeyCount = d.Count() + cfg.IndexFile = filepath.Join(info.Dir(), strings.ReplaceAll(info.name, ".seg", ".idx")) + rs, err := recsplit.NewRecSplit(cfg, logger) + if err != nil { + return err + } + rs.LogLvl(lvl) + + defer d.EnableReadAhead().DisableReadAhead() + + for { + g := d.MakeGetter() + var i, offset, nextPos uint64 + word := make([]byte, 0, 4096) + + for g.HasNext() { + word, nextPos = g.Next(word[:0]) + if err := walker(rs, i, offset, word); err != nil { + return err + } + i++ + offset = nextPos + + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + } + + if err = rs.Build(ctx); err != nil { + if errors.Is(err, recsplit.ErrCollision) { + logger.Info("Building recsplit. Collision happened. It's ok. Restarting with another salt...", "err", err) + rs.ResetNextSalt() + continue + } + return err + } + + return nil + } +} + func ExtractRange(ctx context.Context, f FileInfo, extractor RangeExtractor, firstKey FirstKeyGetter, chainDB kv.RoDB, chainConfig *chain.Config, tmpDir string, workers int, lvl log.Lvl, logger log.Logger) (uint64, error) { var lastKeyValue uint64 diff --git a/holy.txt b/holy.txt new file mode 100644 index 00000000000..a1e9dee8119 --- /dev/null +++ b/holy.txt @@ -0,0 +1,65 @@ +0 0x607db06200000000000000000000000000000000000000000000000000000000 +1 0xd8ea171f3c94aea21ebc42a1ed61052acf3f9209c00e4efbaaddac09ed9b8078 +2 0x41420f0000000000000000000000000000000000000000000000000000000000 +3 0x30680b18ada5ceaeb222d0d15bd7058fbc4e1e5e3c002d504adee82cc013a517 +4 0x1158a7769cb0dfc3d015aa03bd76760d57bab8ad2fb77885a07917f1754194cf +5 0x986c1ffc2b1ecc1e36e4cb94ad932fed8ff8d33e8a2ac6729e905f1d503ac9cd +6 0x9d376a65dfa13037b5fe1c2066dfb7764a8e026e62d2fca64d15828a8123fafa +7 0xb6049c0ff9220d02083c30474400cbcb358a6d6804082757553468badccd8a48 +8 0xa4007911445daf00197d2cca907cebf0529773561b702a2136decb52edc87348 +9 0xec2cf9d12a2e597caa03cb8c9ffada78b83fe44c940f8737b076af563c289a67 +10 0x9301000000000000000000000000000000000000000000000000000000000000 +11 0xed774cba995d2b5cd55a05ac5126d4fb24a1dbf7523ebdc0b1dc8f7f02de30d6 +12 0xe7c27b3e6aaccb0c4f42eda4ffadd5f92fc28edfa5ef573f28ab5495f669282a +13 0xc64d19f5ea9664a3f7a8bae036771b68a5d22b646a3f16c34e4c29af0213ecb0 +14 0x6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220 +15 0x8fe5cf794229f584ba98f5f351b97617f0e3854d6a08e1fb7bc3ee75d67b2469 +16 0x8fe5cf794229f584ba98f5f351b97617f0e3854d6a08e1fb7bc3ee75d67b2469 +17 0x0f00000000000000000000000000000000000000000000000000000000000000 +18 0xa9ff63a992f9e2d2894c8947050fe7360e3811fb9e1468ffde2ca6e4413aa089 +19 0xe74aa97c38fea5a58af5514cd78e2180f0ba62f0709676c62d6e9af62fff77d0 +20 0xa9ff63a992f9e2d2894c8947050fe7360e3811fb9e1468ffde2ca6e4413aa089 +21 0x3d4be5d019ba15ea3ef304a83b8a067f2e79f46a3fac8069306a6c814a0a35eb +22 0x2961343e46fbbea5ef5373a5db989b5e2ed19baef61d9f26ee6ac63a9023862b +23 0x3843792806b2d08dd125002fd3e68d117c7860e4c863f7f60d59625a6f220d48 +24 0x69c41e47f10b14f920a3851f8a4f71d19f83a8d1e28204efccccd644747141be +25 0x0000000000000000000000000000000000000000000000000000000000000000 +26 0x0000000000000000000000000000000000000000000000000000000000000000 +27 0x0000000000000000000000000000000000000000000000000000000000000000 +28 0x0000000000000000000000000000000000000000000000000000000000000000 +29 0x0000000000000000000000000000000000000000000000000000000000000000 +30 0x0000000000000000000000000000000000000000000000000000000000000000 +31 0x0000000000000000000000000000000000000000000000000000000000000000 + +0 0x607db06200000000000000000000000000000000000000000000000000000000 +1 0xd8ea171f3c94aea21ebc42a1ed61052acf3f9209c00e4efbaaddac09ed9b8078 +2 0x41420f0000000000000000000000000000000000000000000000000000000000 +3 0x30680b18ada5ceaeb222d0d15bd7058fbc4e1e5e3c002d504adee82cc013a517 +4 0x1158a7769cb0dfc3d015aa03bd76760d57bab8ad2fb77885a07917f1754194cf +5 0x986c1ffc2b1ecc1e36e4cb94ad932fed8ff8d33e8a2ac6729e905f1d503ac9cd +6 0x9d376a65dfa13037b5fe1c2066dfb7764a8e026e62d2fca64d15828a8123fafa +7 0xb6049c0ff9220d02083c30474400cbcb358a6d6804082757553468badccd8a48 +8 0xa4007911445daf00197d2cca907cebf0529773561b702a2136decb52edc87348 +9 0xec2cf9d12a2e597caa03cb8c9ffada78b83fe44c940f8737b076af563c289a67 +10 0x9301000000000000000000000000000000000000000000000000000000000000 +11 0xed774cba995d2b5cd55a05ac5126d4fb24a1dbf7523ebdc0b1dc8f7f02de30d6 +12 0xe7c27b3e6aaccb0c4f42eda4ffadd5f92fc28edfa5ef573f28ab5495f669282a +13 0xc64d19f5ea9664a3f7a8bae036771b68a5d22b646a3f16c34e4c29af0213ecb0 +14 0x6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220 +15 0xe15e05e8cb173b5d2a4bc5acea5aab50ea2e616fcc41bee9833377ce40135897 +16 0xe1550113957228a811660a70c48bccbf1acc5f511aad5beff415cb23c5a6b2b7 +17 0x0f00000000000000000000000000000000000000000000000000000000000000 +18 0xa9ff63a992f9e2d2894c8947050fe7360e3811fb9e1468ffde2ca6e4413aa089 +19 0xe74aa97c38fea5a58af5514cd78e2180f0ba62f0709676c62d6e9af62fff77d0 +20 0xa9ff63a992f9e2d2894c8947050fe7360e3811fb9e1468ffde2ca6e4413aa089 +21 0x3d4be5d019ba15ea3ef304a83b8a067f2e79f46a3fac8069306a6c814a0a35eb +22 0x2961343e46fbbea5ef5373a5db989b5e2ed19baef61d9f26ee6ac63a9023862b +23 0x3843792806b2d08dd125002fd3e68d117c7860e4c863f7f60d59625a6f220d48 +24 0x69c41e47f10b14f920a3851f8a4f71d19f83a8d1e28204efccccd644747141be +25 0x0000000000000000000000000000000000000000000000000000000000000000 +26 0x0000000000000000000000000000000000000000000000000000000000000000 +27 0x0000000000000000000000000000000000000000000000000000000000000000 +28 0x0000000000000000000000000000000000000000000000000000000000000000 +29 0x0000000000000000000000000000000000000000000000000000000000000000 +30 0x0000000000000000000000000000000000000000000000000000000000000000 +31 0x0000000000000000000000000000000000000000000000000000000000000000 diff --git a/turbo/snapshotsync/freezeblocks/block_snapshots.go b/turbo/snapshotsync/freezeblocks/block_snapshots.go index 0593d8453e7..18169a85573 100644 --- a/turbo/snapshotsync/freezeblocks/block_snapshots.go +++ b/turbo/snapshotsync/freezeblocks/block_snapshots.go @@ -223,6 +223,9 @@ type DirtySegment struct { refcount atomic.Int32 canDelete atomic.Bool + + // caplin state snapshots only + filePath string } type VisibleSegment struct { @@ -231,6 +234,28 @@ type VisibleSegment struct { src *DirtySegment } +func (v *VisibleSegment) Get(globalId uint64) ([]byte, error) { + idxSlot := v.src.Index() + + if idxSlot == nil { + return nil, nil + } + blockOffset := idxSlot.OrdinalLookup(globalId - idxSlot.BaseDataID()) + + gg := v.src.MakeGetter() + gg.Reset(blockOffset) + if !gg.HasNext() { + return nil, nil + } + var buf []byte + buf, _ = gg.Next(buf) + if len(buf) == 0 { + return nil, nil + } + + return buf, nil +} + func DirtySegmentLess(i, j *DirtySegment) bool { if i.from != j.from { return i.from < j.from diff --git a/turbo/snapshotsync/freezeblocks/caplin_state_snapshots.go b/turbo/snapshotsync/freezeblocks/caplin_state_snapshots.go new file mode 100644 index 00000000000..10e286d91b7 --- /dev/null +++ b/turbo/snapshotsync/freezeblocks/caplin_state_snapshots.go @@ -0,0 +1,667 @@ +// Copyright 2024 The Erigon Authors +// This file is part of Erigon. +// +// Erigon is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Erigon is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with Erigon. If not, see . + +package freezeblocks + +import ( + "context" + "encoding/binary" + "errors" + "fmt" + "math" + "os" + "path/filepath" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/tidwall/btree" + + "github.com/erigontech/erigon-lib/log/v3" + "github.com/erigontech/erigon-lib/recsplit" + + "github.com/erigontech/erigon-lib/chain/snapcfg" + libcommon "github.com/erigontech/erigon-lib/common" + "github.com/erigontech/erigon-lib/common/background" + "github.com/erigontech/erigon-lib/common/datadir" + "github.com/erigontech/erigon-lib/common/dbg" + "github.com/erigontech/erigon-lib/downloader/snaptype" + "github.com/erigontech/erigon-lib/kv" + "github.com/erigontech/erigon-lib/seg" + + "github.com/erigontech/erigon/cl/clparams" + "github.com/erigontech/erigon/cl/persistence/base_encoding" + "github.com/erigontech/erigon/eth/ethconfig" +) + +func getKvGetterForStateTable(db kv.RoDB, tableName string) KeyValueGetter { + return func(numId uint64) ([]byte, []byte, error) { + var key, value []byte + var err error + if err := db.View(context.TODO(), func(tx kv.Tx) error { + key = base_encoding.Encode64ToBytes4(numId) + value, err = tx.GetOne(tableName, base_encoding.Encode64ToBytes4(numId)) + return err + }); err != nil { + return nil, nil, err + } + return key, value, nil + } +} + +func MakeCaplinStateSnapshotsTypes(db kv.RoDB) SnapshotTypes { + return SnapshotTypes{ + KeyValueGetters: map[string]KeyValueGetter{ + kv.ValidatorEffectiveBalance: getKvGetterForStateTable(db, kv.ValidatorEffectiveBalance), + kv.ValidatorSlashings: getKvGetterForStateTable(db, kv.ValidatorSlashings), + kv.ValidatorBalance: getKvGetterForStateTable(db, kv.ValidatorBalance), + kv.StateEvents: getKvGetterForStateTable(db, kv.StateEvents), + kv.ActiveValidatorIndicies: getKvGetterForStateTable(db, kv.ActiveValidatorIndicies), + kv.StateRoot: getKvGetterForStateTable(db, kv.StateRoot), + kv.BlockRoot: getKvGetterForStateTable(db, kv.BlockRoot), + kv.SlotData: getKvGetterForStateTable(db, kv.SlotData), + kv.EpochData: getKvGetterForStateTable(db, kv.EpochData), + kv.InactivityScores: getKvGetterForStateTable(db, kv.InactivityScores), + kv.NextSyncCommittee: getKvGetterForStateTable(db, kv.NextSyncCommittee), + kv.CurrentSyncCommittee: getKvGetterForStateTable(db, kv.CurrentSyncCommittee), + kv.Eth1DataVotes: getKvGetterForStateTable(db, kv.Eth1DataVotes), + kv.IntraRandaoMixes: getKvGetterForStateTable(db, kv.IntraRandaoMixes), + kv.RandaoMixes: getKvGetterForStateTable(db, kv.RandaoMixes), + kv.Proposers: getKvGetterForStateTable(db, kv.Proposers), + kv.BalancesDump: getKvGetterForStateTable(db, kv.BalancesDump), + kv.EffectiveBalancesDump: getKvGetterForStateTable(db, kv.EffectiveBalancesDump), + }, + Compression: map[string]bool{}, + } +} + +// value: chunked(ssz(SignedBeaconBlocks)) +// slot -> beacon_slot_segment_offset + +type CaplinStateSnapshots struct { + indicesReady atomic.Bool + segmentsReady atomic.Bool + + Salt uint32 + + dirtySegmentsLock sync.RWMutex + visibleSegmentsLock sync.RWMutex + + // BeaconBlocks *segments + // BlobSidecars *segments + Segments map[string]*segments + snapshotTypes SnapshotTypes + + dir string + tmpdir string + segmentsMax atomic.Uint64 // all types of .seg files are available - up to this number + idxMax atomic.Uint64 // all types of .idx files are available - up to this number + cfg ethconfig.BlocksFreezing + logger log.Logger + // allows for pruning segments - this is the min availible segment + segmentsMin atomic.Uint64 + // chain cfg + beaconCfg *clparams.BeaconChainConfig +} + +type KeyValueGetter func(numId uint64) ([]byte, []byte, error) + +type SnapshotTypes struct { + KeyValueGetters map[string]KeyValueGetter + Compression map[string]bool +} + +// NewCaplinStateSnapshots - opens all snapshots. But to simplify everything: +// - it opens snapshots only on App start and immutable after +// - all snapshots of given blocks range must exist - to make this blocks range available +// - gaps are not allowed +// - segment have [from:to) semantic +func NewCaplinStateSnapshots(cfg ethconfig.BlocksFreezing, beaconCfg *clparams.BeaconChainConfig, dirs datadir.Dirs, snapshotTypes SnapshotTypes, logger log.Logger) *CaplinStateSnapshots { + // BeaconBlocks := &segments{ + // DirtySegments: btree.NewBTreeGOptions[*DirtySegment](DirtySegmentLess, btree.Options{Degree: 128, NoLocks: false}), + // } + // BlobSidecars := &segments{ + // DirtySegments: btree.NewBTreeGOptions[*DirtySegment](DirtySegmentLess, btree.Options{Degree: 128, NoLocks: false}), + // } + Segments := make(map[string]*segments) + for k := range snapshotTypes.KeyValueGetters { + Segments[k] = &segments{ + DirtySegments: btree.NewBTreeGOptions[*DirtySegment](DirtySegmentLess, btree.Options{Degree: 128, NoLocks: false}), + } + } + c := &CaplinStateSnapshots{snapshotTypes: snapshotTypes, dir: dirs.SnapCaplin, tmpdir: dirs.Tmp, cfg: cfg, Segments: Segments, logger: logger, beaconCfg: beaconCfg} + c.recalcVisibleFiles() + return c +} + +func (s *CaplinStateSnapshots) IndicesMax() uint64 { return s.idxMax.Load() } +func (s *CaplinStateSnapshots) SegmentsMax() uint64 { return s.segmentsMax.Load() } + +func (s *CaplinStateSnapshots) LogStat(str string) { + s.logger.Info(fmt.Sprintf("[snapshots:%s] Stat", str), + "blocks", libcommon.PrettyCounter(s.SegmentsMax()+1), "indices", libcommon.PrettyCounter(s.IndicesMax()+1)) +} + +func (s *CaplinStateSnapshots) LS() { + if s == nil { + return + } + view := s.View() + defer view.Close() + + for _, roTx := range view.roTxs { + if roTx != nil { + for _, seg := range roTx.VisibleSegments { + s.logger.Info("[agg] ", "f", seg.src.Decompressor.FileName(), "words", seg.src.Decompressor.Count()) + } + } + } +} + +func (s *CaplinStateSnapshots) SegFileNames(from, to uint64) []string { + view := s.View() + defer view.Close() + + var res []string + + for _, roTx := range view.roTxs { + if roTx == nil { + continue + } + for _, seg := range roTx.VisibleSegments { + if seg.from >= from && seg.to <= to { + res = append(res, seg.src.FileName()) + } + } + } + return res +} + +func (s *CaplinStateSnapshots) BlocksAvailable() uint64 { + return min(s.segmentsMax.Load(), s.idxMax.Load()) +} + +func (s *CaplinStateSnapshots) Close() { + if s == nil { + return + } + s.dirtySegmentsLock.Lock() + defer s.dirtySegmentsLock.Unlock() + + s.closeWhatNotInList(nil) +} + +func (s *CaplinStateSnapshots) openSegIfNeed(sn *DirtySegment, filepath string) error { + if sn.Decompressor != nil { + return nil + } + var err error + sn.Decompressor, err = seg.NewDecompressor(filepath) + if err != nil { + return fmt.Errorf("%w, fileName: %s", err, filepath) + } + return nil +} + +// OpenList stops on optimistic=false, continue opening files on optimistic=true +func (s *CaplinStateSnapshots) OpenList(fileNames []string, optimistic bool) error { + defer s.recalcVisibleFiles() + + s.dirtySegmentsLock.Lock() + defer s.dirtySegmentsLock.Unlock() + + s.closeWhatNotInList(fileNames) + var segmentsMax uint64 + var segmentsMaxSet bool +Loop: + for _, fName := range fileNames { + f, _, _ := snaptype.ParseFileName(s.dir, fName) + + var processed bool = true + var exists bool + var sn *DirtySegment + + segments, ok := s.Segments[f.TypeString] + if !ok { + continue + } + filePath := filepath.Join(s.dir, fName) + segments.DirtySegments.Walk(func(segments []*DirtySegment) bool { + for _, sn2 := range segments { + if sn2.Decompressor == nil { // it's ok if some segment was not able to open + continue + } + if filePath == sn2.filePath { + sn = sn2 + exists = true + break + } + } + return true + }) + if !exists { + sn = &DirtySegment{ + // segType: f.Type, Unsupported + version: f.Version, + Range: Range{f.From, f.To}, + frozen: snapcfg.IsFrozen(s.cfg.ChainName, f), + filePath: filePath, + } + } + if err := s.openSegIfNeed(sn, filePath); err != nil { + if errors.Is(err, os.ErrNotExist) { + if optimistic { + continue Loop + } else { + break Loop + } + } + if optimistic { + s.logger.Warn("[snapshots] open segment", "err", err) + continue Loop + } else { + return err + } + } + + if !exists { + // it's possible to iterate over .seg file even if you don't have index + // then make segment available even if index open may fail + segments.DirtySegments.Set(sn) + } + if err := openIdxForCaplinStateIfNeeded(sn, filePath, optimistic); err != nil { + return err + } + // Only bob sidecars count for progression + if processed { + if f.To > 0 { + segmentsMax = f.To - 1 + } else { + segmentsMax = 0 + } + segmentsMaxSet = true + } + } + + if segmentsMaxSet { + s.segmentsMax.Store(segmentsMax) + } + s.segmentsReady.Store(true) + return nil +} + +func openIdxForCaplinStateIfNeeded(s *DirtySegment, filePath string, optimistic bool) error { + if s.Decompressor == nil { + return nil + } + err := openIdxIfNeedForCaplinState(s, filePath) + if err != nil { + if !errors.Is(err, os.ErrNotExist) { + if optimistic { + log.Warn("[snapshots] open index", "err", err) + } else { + return err + } + } + } + + return nil +} + +func openIdxIfNeedForCaplinState(s *DirtySegment, filePath string) (err error) { + s.indexes = make([]*recsplit.Index, 1) + if s.indexes[0] != nil { + return nil + } + + filePath = strings.ReplaceAll(filePath, ".seg", ".idx") + index, err := recsplit.OpenIndex(filePath) + if err != nil { + return fmt.Errorf("%w, fileName: %s", err, filePath) + } + + s.indexes[0] = index + + return nil +} + +func isIndexed(s *DirtySegment) bool { + if s.Decompressor == nil { + return false + } + + for _, idx := range s.indexes { + if idx == nil { + return false + } + } + return true +} + +func (s *CaplinStateSnapshots) recalcVisibleFiles() { + defer func() { + s.idxMax.Store(s.idxAvailability()) + s.indicesReady.Store(true) + }() + + s.visibleSegmentsLock.Lock() + defer s.visibleSegmentsLock.Unlock() + + getNewVisibleSegments := func(dirtySegments *btree.BTreeG[*DirtySegment]) []*VisibleSegment { + newVisibleSegments := make([]*VisibleSegment, 0, dirtySegments.Len()) + dirtySegments.Walk(func(segments []*DirtySegment) bool { + for _, sn := range segments { + if sn.canDelete.Load() { + continue + } + if !isIndexed(sn) { + continue + } + for len(newVisibleSegments) > 0 && newVisibleSegments[len(newVisibleSegments)-1].src.isSubSetOf(sn) { + newVisibleSegments[len(newVisibleSegments)-1].src = nil + newVisibleSegments = newVisibleSegments[:len(newVisibleSegments)-1] + } + newVisibleSegments = append(newVisibleSegments, &VisibleSegment{ + Range: sn.Range, + segType: sn.segType, + src: sn, + }) + } + return true + }) + return newVisibleSegments + } + + for _, segments := range s.Segments { + segments.VisibleSegments = getNewVisibleSegments(segments.DirtySegments) + var maxIdx uint64 + if len(segments.VisibleSegments) > 0 { + maxIdx = segments.VisibleSegments[len(segments.VisibleSegments)-1].to - 1 + } + segments.maxVisibleBlock.Store(maxIdx) + } +} + +func (s *CaplinStateSnapshots) idxAvailability() uint64 { + minVisible := uint64(math.MaxUint64) + for _, segments := range s.Segments { + if segments.maxVisibleBlock.Load() < minVisible { + minVisible = segments.maxVisibleBlock.Load() + } + } + if minVisible == math.MaxUint64 { + return 0 + } + return minVisible +} + +func listAllSegFilesInDir(dir string) []string { + files, err := os.ReadDir(dir) + if err != nil { + panic(err) + } + list := make([]string, 0, len(files)) + for _, f := range files { + if f.IsDir() { + continue + } + // check if it's a .seg file + if filepath.Ext(f.Name()) != ".seg" { + continue + } + list = append(list, f.Name()) + } + return list +} + +func (s *CaplinStateSnapshots) OpenFolder() error { + return s.OpenList(listAllSegFilesInDir(s.dir), false) +} + +func (s *CaplinStateSnapshots) closeWhatNotInList(l []string) { + protectFiles := make(map[string]struct{}, len(l)) + for _, fName := range l { + protectFiles[fName] = struct{}{} + } + + for _, segments := range s.Segments { + toClose := make([]*DirtySegment, 0) + segments.DirtySegments.Walk(func(segments []*DirtySegment) bool { + for _, sn := range segments { + if sn.Decompressor == nil { + continue + } + _, name := filepath.Split(sn.FilePath()) + if _, ok := protectFiles[name]; ok { + continue + } + toClose = append(toClose, sn) + } + return true + }) + for _, sn := range toClose { + sn.close() + segments.DirtySegments.Delete(sn) + } + } +} + +type CaplinStateView struct { + s *CaplinStateSnapshots + roTxs map[string]*segmentsRotx + closed bool +} + +func (s *CaplinStateSnapshots) View() *CaplinStateView { + if s == nil { + return nil + } + s.visibleSegmentsLock.RLock() + defer s.visibleSegmentsLock.RUnlock() + + v := &CaplinStateView{s: s, roTxs: make(map[string]*segmentsRotx)} + // BeginRo increments refcount - which is contended + s.dirtySegmentsLock.RLock() + defer s.dirtySegmentsLock.RUnlock() + + for k, segments := range s.Segments { + v.roTxs[k] = segments.BeginRotx() + } + return v +} + +func (v *CaplinStateView) Close() { + if v == nil { + return + } + if v.closed { + return + } + for _, segments := range v.roTxs { + segments.Close() + } + v.s = nil + v.closed = true +} + +func (v *CaplinStateView) VisibleSegments(tbl string) []*VisibleSegment { + if v.s == nil || v.s.Segments[tbl] == nil { + return nil + } + return v.s.Segments[tbl].VisibleSegments +} + +func (v *CaplinStateView) VisibleSegment(slot uint64, tbl string) (*VisibleSegment, bool) { + for _, seg := range v.VisibleSegments(tbl) { + if !(slot >= seg.from && slot < seg.to) { + continue + } + return seg, true + } + return nil, false +} + +func dumpCaplinState(ctx context.Context, snapName string, kvGetter KeyValueGetter, fromSlot uint64, toSlot, blocksPerFile uint64, salt uint32, dirs datadir.Dirs, workers int, lvl log.Lvl, logger log.Logger, compress bool) error { + tmpDir, snapDir := dirs.Tmp, dirs.SnapCaplin + + segName := snaptype.BeaconBlocks.FileName(0, fromSlot, toSlot) + // a little bit ugly. + segName = strings.ReplaceAll(segName, "beaconblocks", snapName) + f, _, _ := snaptype.ParseFileName(snapDir, segName) + + compressCfg := seg.DefaultCfg + compressCfg.Workers = workers + sn, err := seg.NewCompressor(ctx, fmt.Sprintf("Snapshots %s", snapName), f.Path, tmpDir, compressCfg, lvl, logger) + if err != nil { + return err + } + defer sn.Close() + + // Generate .seg file, which is just the list of beacon blocks. + for i := fromSlot; i < toSlot; i++ { + // read root. + _, dump, err := kvGetter(i) + if err != nil { + return err + } + if i%20_000 == 0 { + logger.Log(lvl, fmt.Sprintf("Dumping %s", snapName), "progress", i) + } + if compress { + if err := sn.AddWord(dump); err != nil { + return err + } + } else { + if err := sn.AddUncompressedWord(dump); err != nil { + return err + } + } + } + if sn.Count() != int(blocksPerFile) { + return fmt.Errorf("expected %d blocks, got %d", blocksPerFile, sn.Count()) + } + if err := sn.Compress(); err != nil { + return err + } + // Generate .idx file, which is the slot => offset mapping. + p := &background.Progress{} + + // Ugly hack to wait for fsync + time.Sleep(15 * time.Second) + + return simpleIdx(ctx, f, salt, tmpDir, p, lvl, logger) +} + +func simpleIdx(ctx context.Context, sn snaptype.FileInfo, salt uint32, tmpDir string, p *background.Progress, lvl log.Lvl, logger log.Logger) (err error) { + num := make([]byte, binary.MaxVarintLen64) + cfg := recsplit.RecSplitArgs{ + Enums: true, + BucketSize: 2000, + LeafSize: 8, + TmpDir: tmpDir, + Salt: &salt, + BaseDataID: sn.From, + } + if err := snaptype.BuildIndexWithSnapName(ctx, sn, cfg, log.LvlDebug, p, func(idx *recsplit.RecSplit, i, offset uint64, word []byte) error { + if i%20_000 == 0 { + logger.Log(lvl, "Generating idx for "+sn.Name(), "progress", i) + } + p.Processed.Add(1) + n := binary.PutUvarint(num, i) + if err := idx.AddKey(num[:n], offset); err != nil { + return err + } + return nil + }, logger); err != nil { + return fmt.Errorf("idx: %w", err) + } + + return nil +} + +func (s *CaplinStateSnapshots) DumpCaplinState(ctx context.Context, fromSlot, toSlot, blocksPerFile uint64, salt uint32, dirs datadir.Dirs, workers int, lvl log.Lvl, logger log.Logger) error { + fromSlot = (fromSlot / blocksPerFile) * blocksPerFile + toSlot = (toSlot / blocksPerFile) * blocksPerFile + for snapName, kvGetter := range s.snapshotTypes.KeyValueGetters { + for i := fromSlot; i < toSlot; i += blocksPerFile { + if toSlot-i < blocksPerFile { + break + } + // keep beaconblocks here but whatever.... + to := i + blocksPerFile + logger.Log(lvl, fmt.Sprintf("Dumping %s", snapName), "from", i, "to", to) + if err := dumpCaplinState(ctx, snapName, kvGetter, i, to, blocksPerFile, salt, dirs, workers, lvl, logger, s.snapshotTypes.Compression[snapName]); err != nil { + return err + } + } + } + return nil +} + +func (s *CaplinStateSnapshots) BuildMissingIndices(ctx context.Context, logger log.Logger) error { + if s == nil { + return nil + } + // if !s.segmentsReady.Load() { + // return fmt.Errorf("not all snapshot segments are available") + // } + + // wait for Downloader service to download all expected snapshots + segments, _, err := SegmentsCaplin(s.dir, 0) + if err != nil { + return err + } + noneDone := true + for index := range segments { + segment := segments[index] + // The same slot=>offset mapping is used for both beacon blocks and blob sidecars. + if segment.Type.Enum() != snaptype.CaplinEnums.BeaconBlocks && segment.Type.Enum() != snaptype.CaplinEnums.BlobSidecars { + continue + } + if segment.Type.HasIndexFiles(segment, logger) { + continue + } + p := &background.Progress{} + noneDone = false + if err := BeaconSimpleIdx(ctx, segment, s.Salt, s.tmpdir, p, log.LvlDebug, logger); err != nil { + return err + } + } + if noneDone { + return nil + } + + return s.OpenFolder() +} + +func (s *CaplinStateSnapshots) Get(tbl string, slot uint64) ([]byte, error) { + defer func() { + if rec := recover(); rec != nil { + panic(fmt.Sprintf("Get(%s, %d), %s, %s\n", tbl, slot, rec, dbg.Stack())) + } + }() + + view := s.View() + defer view.Close() + + seg, ok := view.VisibleSegment(slot, tbl) + if !ok { + return nil, nil + } + + return seg.Get(slot) +}