From 4aed10079239c754aa140c4ef037d9a46ad7ddad Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Tue, 28 Mar 2023 18:34:29 +0200 Subject: [PATCH 001/132] Introduced Stakers model --- go.mod | 1 + go.sum | 2 + vms/platformvm/state/models/stakers_model.go | 203 +++++++++++++++++++ 3 files changed, 206 insertions(+) create mode 100644 vms/platformvm/state/models/stakers_model.go diff --git a/go.mod b/go.mod index fc2f9f2dad3e..a23357836583 100644 --- a/go.mod +++ b/go.mod @@ -28,6 +28,7 @@ require ( github.com/huin/goupnp v1.0.3 github.com/jackpal/gateway v1.0.6 github.com/jackpal/go-nat-pmp v1.0.2 + github.com/leanovate/gopter v0.2.9 github.com/mr-tron/base58 v1.2.0 github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d github.com/onsi/ginkgo/v2 v2.4.0 diff --git a/go.sum b/go.sum index 35c0caade4f0..8bcad7be1771 100644 --- a/go.sum +++ b/go.sum @@ -312,6 +312,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= +github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= diff --git a/vms/platformvm/state/models/stakers_model.go b/vms/platformvm/state/models/stakers_model.go new file mode 100644 index 000000000000..fc92c85b316d --- /dev/null +++ b/vms/platformvm/state/models/stakers_model.go @@ -0,0 +1,203 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package models + +import ( + "golang.org/x/exp/maps" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/vms/platformvm/state" +) + +var ( + _ state.Stakers = (*stakersStorageModel)(nil) + _ state.StakerIterator = (*stakersStorageIteratorModel)(nil) +) + +type subnetNodeKey struct { + subnetID ids.ID + nodeID ids.NodeID +} + +type stakersStorageModel struct { + currentValidators map[subnetNodeKey]*state.Staker + currentDelegators map[subnetNodeKey](map[ids.ID]*state.Staker) // -> (txID -> Staker) + + pendingValidators map[subnetNodeKey]*state.Staker + pendingDelegators map[subnetNodeKey](map[ids.ID]*state.Staker) // -> (txID -> Staker) +} + +func (m *stakersStorageModel) GetCurrentValidator(subnetID ids.ID, nodeID ids.NodeID) (*state.Staker, error) { + return getValidator(subnetID, nodeID, m.currentValidators) +} + +func (m *stakersStorageModel) GetPendingValidator(subnetID ids.ID, nodeID ids.NodeID) (*state.Staker, error) { + return getValidator(subnetID, nodeID, m.pendingValidators) +} + +func getValidator(subnetID ids.ID, nodeID ids.NodeID, domain map[subnetNodeKey]*state.Staker) (*state.Staker, error) { + key := subnetNodeKey{ + subnetID: subnetID, + nodeID: nodeID, + } + res, found := domain[key] + if !found { + return nil, database.ErrNotFound + } + return res, nil +} + +func (m *stakersStorageModel) PutCurrentValidator(staker *state.Staker) { + putValidator(staker, m.currentValidators) +} + +func (m *stakersStorageModel) PutPendingValidator(staker *state.Staker) { + putValidator(staker, m.pendingValidators) +} + +func putValidator(staker *state.Staker, domain map[subnetNodeKey]*state.Staker) { + key := subnetNodeKey{ + subnetID: staker.SubnetID, + nodeID: staker.NodeID, + } + domain[key] = staker +} + +func (m *stakersStorageModel) DeleteCurrentValidator(staker *state.Staker) { + deleteValidator(staker, m.currentValidators) +} + +func (m *stakersStorageModel) DeletePendingValidator(staker *state.Staker) { + deleteValidator(staker, m.pendingValidators) +} + +func deleteValidator(staker *state.Staker, domain map[subnetNodeKey]*state.Staker) { + key := subnetNodeKey{ + subnetID: staker.SubnetID, + nodeID: staker.NodeID, + } + delete(domain, key) +} + +func (m *stakersStorageModel) GetCurrentDelegatorIterator(subnetID ids.ID, nodeID ids.NodeID) (state.StakerIterator, error) { + return getDelegatorIterator(subnetID, nodeID, m.currentDelegators), nil +} + +func (m *stakersStorageModel) GetPendingDelegatorIterator(subnetID ids.ID, nodeID ids.NodeID) (state.StakerIterator, error) { + return getDelegatorIterator(subnetID, nodeID, m.pendingDelegators), nil +} + +func getDelegatorIterator(subnetID ids.ID, nodeID ids.NodeID, domain map[subnetNodeKey](map[ids.ID]*state.Staker)) state.StakerIterator { + key := subnetNodeKey{ + subnetID: subnetID, + nodeID: nodeID, + } + dels, found := domain[key] + if !found { + return state.EmptyIterator + } + + sortedDels := maps.Values(dels) + utils.Sort(sortedDels) + return &stakersStorageIteratorModel{ + current: nil, + sortedStakers: sortedDels, + } +} + +func (m *stakersStorageModel) PutCurrentDelegator(staker *state.Staker) { + putDelegator(staker, m.currentDelegators) +} + +func (m *stakersStorageModel) PutPendingDelegator(staker *state.Staker) { + putDelegator(staker, m.pendingDelegators) +} + +func putDelegator(staker *state.Staker, domain map[subnetNodeKey]map[ids.ID]*state.Staker) { + key := subnetNodeKey{ + subnetID: staker.SubnetID, + nodeID: staker.NodeID, + } + domain[key][staker.TxID] = staker +} + +func (m *stakersStorageModel) DeleteCurrentDelegator(staker *state.Staker) { + deleteDelegator(staker, m.currentDelegators) +} + +func (m *stakersStorageModel) DeletePendingDelegator(staker *state.Staker) { + deleteDelegator(staker, m.pendingDelegators) +} + +func deleteDelegator(staker *state.Staker, domain map[subnetNodeKey]map[ids.ID]*state.Staker) { + key := subnetNodeKey{ + subnetID: staker.SubnetID, + nodeID: staker.NodeID, + } + + dels, found := domain[key] + if !found { + return + } + delete(dels, staker.TxID) + + // prune + if len(dels) == 0 { + delete(domain, key) + } +} + +func (m *stakersStorageModel) GetCurrentStakerIterator() (state.StakerIterator, error) { + return getCurrentStakerIterator(m.currentValidators, m.currentDelegators), nil +} + +func (m *stakersStorageModel) GetPendingStakerIterator() (state.StakerIterator, error) { + return getCurrentStakerIterator(m.pendingValidators, m.pendingDelegators), nil +} + +func getCurrentStakerIterator( + validators map[subnetNodeKey]*state.Staker, + delegators map[subnetNodeKey](map[ids.ID]*state.Staker), +) state.StakerIterator { + allStakers := maps.Values(validators) + for _, dels := range delegators { + allStakers = append(allStakers, maps.Values(dels)...) + } + utils.Sort(allStakers) + return &stakersStorageIteratorModel{ + current: nil, + sortedStakers: allStakers, + } +} + +type stakersStorageIteratorModel struct { + current *state.Staker + + // sortedStakers contains the sorted list of stakers + // as it should be returned by iteration. + // sortedStakers must be sorted upon stakersStorageIteratorModel creation. + // Stakers are evicted from sortedStakers as Value() is called. + sortedStakers []*state.Staker +} + +func (i *stakersStorageIteratorModel) Next() bool { + if len(i.sortedStakers) == 0 { + return false + } + + i.current = i.sortedStakers[0] + i.sortedStakers = i.sortedStakers[1:] + return true +} + +func (i *stakersStorageIteratorModel) Value() *state.Staker { + return i.current +} + +func (i *stakersStorageIteratorModel) Release() { + i.current = nil + i.sortedStakers = nil +} From 94ddc256f8131f26c87716b311d6dc20cf97ed0a Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Tue, 28 Mar 2023 20:02:52 +0200 Subject: [PATCH 002/132] introduced staker generator --- .../state/models/stakers_generator_test.go | 200 ++++++++++++++++++ ...kers_model.go => stakers_storage_model.go} | 0 2 files changed, 200 insertions(+) create mode 100644 vms/platformvm/state/models/stakers_generator_test.go rename vms/platformvm/state/models/{stakers_model.go => stakers_storage_model.go} (100%) diff --git a/vms/platformvm/state/models/stakers_generator_test.go b/vms/platformvm/state/models/stakers_generator_test.go new file mode 100644 index 000000000000..bb4bd0f75d8f --- /dev/null +++ b/vms/platformvm/state/models/stakers_generator_test.go @@ -0,0 +1,200 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package models + +import ( + "fmt" + "reflect" + "testing" + "time" + + blst "github.com/supranational/blst/bindings/go" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/vms/platformvm/state" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/leanovate/gopter" + "github.com/leanovate/gopter/gen" + "github.com/leanovate/gopter/prop" +) + +var StakerGenerator = genStakerTimeData.FlatMap( + func(v interface{}) gopter.Gen { + macro := v.(stakerTimeData) + + return gen.Struct(reflect.TypeOf(state.Staker{}), map[string]gopter.Gen{ + "TxID": genID, + "NodeID": genNodeID, + "PublicKey": genBlsKey, + "SubnetID": genID, + "Weight": gen.UInt64(), + "StartTime": gen.Const(macro.StartTime), + "EndTime": gen.Const(macro.EndTime), + "PotentialReward": gen.UInt64(), + "NextTime": gen.Const(macro.NextTime), + "Priority": gen.Const(macro.Priority), + }) + }, + reflect.TypeOf(stakerTimeData{}), +) + +func TestGeneratedStakersValidity(t *testing.T) { + properties := gopter.NewProperties(nil) + + properties.Property("EndTime never before StartTime", prop.ForAll( + func(s state.Staker) string { + if s.EndTime.Before(s.StartTime) { + return fmt.Sprintf("startTime %v not before endTime %v, staker %v", + s.StartTime, s.EndTime, s) + } + return "" + }, + StakerGenerator, + )) + + properties.Property("NextTime coherent with priority", prop.ForAll( + func(s state.Staker) string { + switch p := s.Priority; p { + case txs.PrimaryNetworkDelegatorApricotPendingPriority, + txs.PrimaryNetworkDelegatorBanffPendingPriority, + txs.SubnetPermissionlessDelegatorPendingPriority, + txs.PrimaryNetworkValidatorPendingPriority, + txs.SubnetPermissionlessValidatorPendingPriority, + txs.SubnetPermissionedValidatorPendingPriority: + if !s.NextTime.Equal(s.StartTime) { + return fmt.Sprintf("pending staker has nextTime %v different from startTime %v, staker %v", + s.NextTime, s.StartTime, s) + } + return "" + + case txs.PrimaryNetworkDelegatorCurrentPriority, + txs.SubnetPermissionlessDelegatorCurrentPriority, + txs.PrimaryNetworkValidatorCurrentPriority, + txs.SubnetPermissionlessValidatorCurrentPriority, + txs.SubnetPermissionedValidatorCurrentPriority: + if !s.NextTime.Equal(s.EndTime) { + return fmt.Sprintf("current staker has nextTime %v different from endTime %v, staker %v", + s.NextTime, s.EndTime, s) + } + return "" + + default: + return fmt.Sprintf("priority %v unhandled in test", p) + } + }, + StakerGenerator, + )) + + properties.TestingRun(t) +} + +// stakerTimeData holds Staker's time related data in order to generate them +// while fullfilling the following constrains: +// 1. EndTime >= StartTime +// 2. NextTime == EndTime for current priorities +// 3. NextTime == StartTime for pending priorities +type stakerTimeData struct { + StartTime time.Time + EndTime time.Time + Priority txs.Priority + NextTime time.Time +} + +var genStakerTimeData = genStakerMicroData.FlatMap( + func(v interface{}) gopter.Gen { + micro := v.(stakerMicroData) + + var ( + startTime = micro.StartTime + endTime = micro.StartTime.Add(time.Duration(micro.Duration * int64(time.Hour))) + priority = micro.Priority + ) + + startTimeGen := gen.Const(startTime) + endTimeGen := gen.Const(endTime) + priorityGen := gen.Const(priority) + var nextTimeGen gopter.Gen + if priority == txs.SubnetPermissionedValidatorCurrentPriority || + priority == txs.SubnetPermissionlessDelegatorCurrentPriority || + priority == txs.SubnetPermissionlessValidatorCurrentPriority || + priority == txs.PrimaryNetworkDelegatorCurrentPriority || + priority == txs.PrimaryNetworkValidatorCurrentPriority { + nextTimeGen = gen.Const(endTime) + } else { + nextTimeGen = gen.Const(startTime) + } + + return gen.Struct(reflect.TypeOf(stakerTimeData{}), map[string]gopter.Gen{ + "StartTime": startTimeGen, + "EndTime": endTimeGen, + "Priority": priorityGen, + "NextTime": nextTimeGen, + }) + }, + reflect.TypeOf(stakerMicroData{}), +) + +// stakerMicroData holds seed attributes to generate stakerMacroData +type stakerMicroData struct { + StartTime time.Time + Duration int64 + Priority txs.Priority +} + +// genStakerMicroData is the helper to generate stakerMicroData +var genStakerMicroData = gen.Struct(reflect.TypeOf(&stakerMicroData{}), map[string]gopter.Gen{ + "StartTime": gen.Time(), + "Duration": gen.Int64Range(1, 365*24), + "Priority": gen.OneConstOf( + txs.PrimaryNetworkDelegatorApricotPendingPriority, + txs.PrimaryNetworkValidatorPendingPriority, + txs.PrimaryNetworkDelegatorBanffPendingPriority, + txs.SubnetPermissionlessValidatorPendingPriority, + txs.SubnetPermissionlessDelegatorPendingPriority, + txs.SubnetPermissionedValidatorPendingPriority, + txs.SubnetPermissionedValidatorCurrentPriority, + txs.SubnetPermissionlessDelegatorCurrentPriority, + txs.SubnetPermissionlessValidatorCurrentPriority, + txs.PrimaryNetworkDelegatorCurrentPriority, + txs.PrimaryNetworkValidatorCurrentPriority, + ), +}) + +var genBlsKey = gen.SliceOfN(lengthID, gen.UInt8()).FlatMap( + func(v interface{}) gopter.Gen { + byteSlice := v.([]byte) + sk := blst.KeyGen(byteSlice) + pk := bls.PublicFromSecretKey(sk) + return gen.Const(pk) + }, + reflect.TypeOf([]byte{}), +) + +const ( + lengthID = 32 + lengthNodeID = 20 +) + +// genID is the helper generator for ids.ID objects +var genID = gen.SliceOfN(lengthID, gen.UInt8()).FlatMap( + func(v interface{}) gopter.Gen { + byteSlice := v.([]byte) + var byteArray [lengthID]byte + copy(byteArray[:], byteSlice) + return gen.Const(ids.ID(byteArray)) + }, + reflect.TypeOf([]byte{}), +) + +// genID is the helper generator for ids.NodeID objects +var genNodeID = gen.SliceOfN(lengthNodeID, gen.UInt8()).FlatMap( + func(v interface{}) gopter.Gen { + byteSlice := v.([]byte) + var byteArray [lengthNodeID]byte + copy(byteArray[:], byteSlice) + return gen.Const(ids.NodeID(byteArray)) + }, + reflect.TypeOf([]byte{}), +) diff --git a/vms/platformvm/state/models/stakers_model.go b/vms/platformvm/state/models/stakers_storage_model.go similarity index 100% rename from vms/platformvm/state/models/stakers_model.go rename to vms/platformvm/state/models/stakers_storage_model.go From 43cf22a7e834c46c97a8a5f812119d08fae101d8 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Wed, 29 Mar 2023 14:22:54 +0200 Subject: [PATCH 003/132] stakers store model property tests --- .../state/models/stakers_generator_test.go | 213 ++++++---- .../state/models/stakers_storage_model.go | 17 +- .../models/stakers_storage_model_test.go | 367 ++++++++++++++++++ 3 files changed, 527 insertions(+), 70 deletions(-) create mode 100644 vms/platformvm/state/models/stakers_storage_model_test.go diff --git a/vms/platformvm/state/models/stakers_generator_test.go b/vms/platformvm/state/models/stakers_generator_test.go index bb4bd0f75d8f..7e64c158371b 100644 --- a/vms/platformvm/state/models/stakers_generator_test.go +++ b/vms/platformvm/state/models/stakers_generator_test.go @@ -20,25 +20,36 @@ import ( "github.com/leanovate/gopter/prop" ) -var StakerGenerator = genStakerTimeData.FlatMap( - func(v interface{}) gopter.Gen { - macro := v.(stakerTimeData) - - return gen.Struct(reflect.TypeOf(state.Staker{}), map[string]gopter.Gen{ - "TxID": genID, - "NodeID": genNodeID, - "PublicKey": genBlsKey, - "SubnetID": genID, - "Weight": gen.UInt64(), - "StartTime": gen.Const(macro.StartTime), - "EndTime": gen.Const(macro.EndTime), - "PotentialReward": gen.UInt64(), - "NextTime": gen.Const(macro.NextTime), - "Priority": gen.Const(macro.Priority), - }) - }, - reflect.TypeOf(stakerTimeData{}), -) +func stakerGenerator(prio *priorityType, subnet *ids.ID, nodeID *ids.NodeID) gopter.Gen { + return genStakerTimeData(prio).FlatMap( + func(v interface{}) gopter.Gen { + macro := v.(stakerTimeData) + + genStakerSubnetID := genID + genStakerNodeID := genNodeID + if subnet != nil { + genStakerSubnetID = gen.Const(*subnet) + } + if nodeID != nil { + genStakerNodeID = gen.Const(*nodeID) + } + + return gen.Struct(reflect.TypeOf(state.Staker{}), map[string]gopter.Gen{ + "TxID": genID, + "NodeID": genStakerNodeID, + "PublicKey": genBlsKey, + "SubnetID": genStakerSubnetID, + "Weight": gen.UInt64(), + "StartTime": gen.Const(macro.StartTime), + "EndTime": gen.Const(macro.EndTime), + "PotentialReward": gen.UInt64(), + "NextTime": gen.Const(macro.NextTime), + "Priority": gen.Const(macro.Priority), + }) + }, + reflect.TypeOf(stakerTimeData{}), + ) +} func TestGeneratedStakersValidity(t *testing.T) { properties := gopter.NewProperties(nil) @@ -51,7 +62,7 @@ func TestGeneratedStakersValidity(t *testing.T) { } return "" }, - StakerGenerator, + stakerGenerator(nil, nil, nil), )) properties.Property("NextTime coherent with priority", prop.ForAll( @@ -84,7 +95,24 @@ func TestGeneratedStakersValidity(t *testing.T) { return fmt.Sprintf("priority %v unhandled in test", p) } }, - StakerGenerator, + stakerGenerator(nil, nil, nil), + )) + + subnetID := ids.GenerateTestID() + nodeID := ids.GenerateTestNodeID() + properties.Property("EndTime never before StartTime", prop.ForAll( + func(s state.Staker) string { + if s.SubnetID != subnetID { + return fmt.Sprintf("unexpected subnetID, expected %v, got %v", + subnetID, s.SubnetID) + } + if s.NodeID != nodeID { + return fmt.Sprintf("unexpected nodeID, expected %v, got %v", + nodeID, s.NodeID) + } + return "" + }, + stakerGenerator(nil, &subnetID, &nodeID), )) properties.TestingRun(t) @@ -102,39 +130,41 @@ type stakerTimeData struct { NextTime time.Time } -var genStakerTimeData = genStakerMicroData.FlatMap( - func(v interface{}) gopter.Gen { - micro := v.(stakerMicroData) +func genStakerTimeData(prio *priorityType) gopter.Gen { + return genStakerMicroData(prio).FlatMap( + func(v interface{}) gopter.Gen { + micro := v.(stakerMicroData) - var ( - startTime = micro.StartTime - endTime = micro.StartTime.Add(time.Duration(micro.Duration * int64(time.Hour))) - priority = micro.Priority - ) + var ( + startTime = micro.StartTime + endTime = micro.StartTime.Add(time.Duration(micro.Duration * int64(time.Hour))) + priority = micro.Priority + ) - startTimeGen := gen.Const(startTime) - endTimeGen := gen.Const(endTime) - priorityGen := gen.Const(priority) - var nextTimeGen gopter.Gen - if priority == txs.SubnetPermissionedValidatorCurrentPriority || - priority == txs.SubnetPermissionlessDelegatorCurrentPriority || - priority == txs.SubnetPermissionlessValidatorCurrentPriority || - priority == txs.PrimaryNetworkDelegatorCurrentPriority || - priority == txs.PrimaryNetworkValidatorCurrentPriority { - nextTimeGen = gen.Const(endTime) - } else { - nextTimeGen = gen.Const(startTime) - } - - return gen.Struct(reflect.TypeOf(stakerTimeData{}), map[string]gopter.Gen{ - "StartTime": startTimeGen, - "EndTime": endTimeGen, - "Priority": priorityGen, - "NextTime": nextTimeGen, - }) - }, - reflect.TypeOf(stakerMicroData{}), -) + startTimeGen := gen.Const(startTime) + endTimeGen := gen.Const(endTime) + priorityGen := gen.Const(priority) + var nextTimeGen gopter.Gen + if priority == txs.SubnetPermissionedValidatorCurrentPriority || + priority == txs.SubnetPermissionlessDelegatorCurrentPriority || + priority == txs.SubnetPermissionlessValidatorCurrentPriority || + priority == txs.PrimaryNetworkDelegatorCurrentPriority || + priority == txs.PrimaryNetworkValidatorCurrentPriority { + nextTimeGen = gen.Const(endTime) + } else { + nextTimeGen = gen.Const(startTime) + } + + return gen.Struct(reflect.TypeOf(stakerTimeData{}), map[string]gopter.Gen{ + "StartTime": startTimeGen, + "EndTime": endTimeGen, + "Priority": priorityGen, + "NextTime": nextTimeGen, + }) + }, + reflect.TypeOf(stakerMicroData{}), + ) +} // stakerMicroData holds seed attributes to generate stakerMacroData type stakerMicroData struct { @@ -144,23 +174,68 @@ type stakerMicroData struct { } // genStakerMicroData is the helper to generate stakerMicroData -var genStakerMicroData = gen.Struct(reflect.TypeOf(&stakerMicroData{}), map[string]gopter.Gen{ - "StartTime": gen.Time(), - "Duration": gen.Int64Range(1, 365*24), - "Priority": gen.OneConstOf( - txs.PrimaryNetworkDelegatorApricotPendingPriority, - txs.PrimaryNetworkValidatorPendingPriority, - txs.PrimaryNetworkDelegatorBanffPendingPriority, - txs.SubnetPermissionlessValidatorPendingPriority, - txs.SubnetPermissionlessDelegatorPendingPriority, - txs.SubnetPermissionedValidatorPendingPriority, - txs.SubnetPermissionedValidatorCurrentPriority, - txs.SubnetPermissionlessDelegatorCurrentPriority, - txs.SubnetPermissionlessValidatorCurrentPriority, - txs.PrimaryNetworkDelegatorCurrentPriority, - txs.PrimaryNetworkValidatorCurrentPriority, - ), -}) +func genStakerMicroData(prio *priorityType) gopter.Gen { + return gen.Struct(reflect.TypeOf(&stakerMicroData{}), map[string]gopter.Gen{ + "StartTime": gen.Time(), + "Duration": gen.Int64Range(1, 365*24), + "Priority": genPriority(prio), + }) +} + +type priorityType uint8 + +const ( + currentValidator priorityType = iota + 1 + currentDelegator + pendingValidator + pendingDelegator +) + +func genPriority(p *priorityType) gopter.Gen { + if p == nil { + return gen.OneConstOf( + txs.PrimaryNetworkDelegatorApricotPendingPriority, + txs.PrimaryNetworkValidatorPendingPriority, + txs.PrimaryNetworkDelegatorBanffPendingPriority, + txs.SubnetPermissionlessValidatorPendingPriority, + txs.SubnetPermissionlessDelegatorPendingPriority, + txs.SubnetPermissionedValidatorPendingPriority, + txs.SubnetPermissionedValidatorCurrentPriority, + txs.SubnetPermissionlessDelegatorCurrentPriority, + txs.SubnetPermissionlessValidatorCurrentPriority, + txs.PrimaryNetworkDelegatorCurrentPriority, + txs.PrimaryNetworkValidatorCurrentPriority, + ) + } + + switch *p { + case currentValidator: + return gen.OneConstOf( + txs.SubnetPermissionedValidatorCurrentPriority, + txs.SubnetPermissionlessValidatorCurrentPriority, + txs.PrimaryNetworkValidatorCurrentPriority, + ) + case currentDelegator: + return gen.OneConstOf( + txs.SubnetPermissionlessDelegatorCurrentPriority, + txs.PrimaryNetworkDelegatorCurrentPriority, + ) + case pendingValidator: + return gen.OneConstOf( + txs.PrimaryNetworkValidatorPendingPriority, + txs.SubnetPermissionlessValidatorPendingPriority, + txs.SubnetPermissionedValidatorPendingPriority, + ) + case pendingDelegator: + return gen.OneConstOf( + txs.PrimaryNetworkDelegatorApricotPendingPriority, + txs.PrimaryNetworkDelegatorBanffPendingPriority, + txs.SubnetPermissionlessDelegatorPendingPriority, + ) + default: + panic("unhandled priority type") + } +} var genBlsKey = gen.SliceOfN(lengthID, gen.UInt8()).FlatMap( func(v interface{}) gopter.Gen { diff --git a/vms/platformvm/state/models/stakers_storage_model.go b/vms/platformvm/state/models/stakers_storage_model.go index fc92c85b316d..b5e16ba17a75 100644 --- a/vms/platformvm/state/models/stakers_storage_model.go +++ b/vms/platformvm/state/models/stakers_storage_model.go @@ -30,6 +30,15 @@ type stakersStorageModel struct { pendingDelegators map[subnetNodeKey](map[ids.ID]*state.Staker) // -> (txID -> Staker) } +func newStakersStorageModel() *stakersStorageModel { + return &stakersStorageModel{ + currentValidators: make(map[subnetNodeKey]*state.Staker), + currentDelegators: make(map[subnetNodeKey]map[ids.ID]*state.Staker), + pendingValidators: make(map[subnetNodeKey]*state.Staker), + pendingDelegators: make(map[subnetNodeKey]map[ids.ID]*state.Staker), + } +} + func (m *stakersStorageModel) GetCurrentValidator(subnetID ids.ID, nodeID ids.NodeID) (*state.Staker, error) { return getValidator(subnetID, nodeID, m.currentValidators) } @@ -121,7 +130,13 @@ func putDelegator(staker *state.Staker, domain map[subnetNodeKey]map[ids.ID]*sta subnetID: staker.SubnetID, nodeID: staker.NodeID, } - domain[key][staker.TxID] = staker + + ls, found := domain[key] + if !found { + ls = make(map[ids.ID]*state.Staker) + domain[key] = ls + } + ls[staker.TxID] = staker } func (m *stakersStorageModel) DeleteCurrentDelegator(staker *state.Staker) { diff --git a/vms/platformvm/state/models/stakers_storage_model_test.go b/vms/platformvm/state/models/stakers_storage_model_test.go new file mode 100644 index 000000000000..9313b671f845 --- /dev/null +++ b/vms/platformvm/state/models/stakers_storage_model_test.go @@ -0,0 +1,367 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package models + +import ( + "fmt" + "reflect" + "testing" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/vms/platformvm/state" + "github.com/leanovate/gopter" + "github.com/leanovate/gopter/gen" + "github.com/leanovate/gopter/prop" +) + +func TestStakersStorageMode(t *testing.T) { + properties := gopter.NewProperties(nil) + + properties.Property("some current validator ops", prop.ForAll( + func(s state.Staker) string { + store := newStakersStorageModel() + + // no staker before insertion + _, err := store.GetCurrentValidator(s.SubnetID, s.NodeID) + if err != database.ErrNotFound { + return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) + } + + // it's fine deleting unknown validator + store.DeleteCurrentValidator(&s) + + currIT, err := store.GetCurrentStakerIterator() + if err != nil { + return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) + } + if currIT.Next() { + return fmt.Sprintf("expected empty iterator, got at least element %v", currIT.Value()) + } + currIT.Release() + + // staker after insertion + store.PutCurrentValidator(&s) + retrievedStaker, err := store.GetCurrentValidator(s.SubnetID, s.NodeID) + if err != nil { + return fmt.Sprintf("expected no error, got %v", err) + } + if !reflect.DeepEqual(&s, retrievedStaker) { + return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &s, retrievedStaker) + } + + currIT, err = store.GetCurrentStakerIterator() + if err != nil { + return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) + } + if !currIT.Next() { + return "expected non-empty iterator, got no elements" + } + if !reflect.DeepEqual(currIT.Value(), retrievedStaker) { + return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &s, retrievedStaker) + } + currIT.Release() + + // no staker after deletion + store.DeleteCurrentValidator(&s) + _, err = store.GetCurrentValidator(s.SubnetID, s.NodeID) + if err != database.ErrNotFound { + return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) + } + + currIT, err = store.GetCurrentStakerIterator() + if err != nil { + return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) + } + if currIT.Next() { + return fmt.Sprintf("expected empty iterator, got at least element %v", currIT.Value()) + } + currIT.Release() + + return "" + }, + stakerGenerator(nil, nil, nil), + )) + + properties.Property("some pending validator ops", prop.ForAll( + func(s state.Staker) string { + store := newStakersStorageModel() + + // no staker before insertion + _, err := store.GetPendingValidator(s.SubnetID, s.NodeID) + if err != database.ErrNotFound { + return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) + } + + // it's fine deleting unknown validator + store.DeletePendingValidator(&s) + + currIT, err := store.GetPendingStakerIterator() + if err != nil { + return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) + } + if currIT.Next() { + return fmt.Sprintf("expected empty iterator, got at least element %v", currIT.Value()) + } + currIT.Release() + + // staker after insertion + store.PutPendingValidator(&s) + retrievedStaker, err := store.GetPendingValidator(s.SubnetID, s.NodeID) + if err != nil { + return fmt.Sprintf("expected no error, got %v", err) + } + if !reflect.DeepEqual(&s, retrievedStaker) { + return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &s, retrievedStaker) + } + + currIT, err = store.GetPendingStakerIterator() + if err != nil { + return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) + } + if !currIT.Next() { + return "expected non-empty iterator, got no elements" + } + if !reflect.DeepEqual(currIT.Value(), retrievedStaker) { + return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &s, retrievedStaker) + } + currIT.Release() + + // no staker after deletion + store.DeletePendingValidator(&s) + _, err = store.GetPendingValidator(s.SubnetID, s.NodeID) + if err != database.ErrNotFound { + return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) + } + + currIT, err = store.GetPendingStakerIterator() + if err != nil { + return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) + } + if currIT.Next() { + return fmt.Sprintf("expected empty iterator, got at least element %v", currIT.Value()) + } + currIT.Release() + + return "" + }, + stakerGenerator(nil, nil, nil), + )) + + var ( + valPrio = currentValidator + delPrio = currentDelegator + subnetID = ids.GenerateTestID() + nodeID = ids.GenerateTestNodeID() + ) + properties.Property("some current delegators ops", prop.ForAll( + func(val state.Staker, dels []state.Staker) string { + store := newStakersStorageModel() + + // store validator + store.PutCurrentValidator(&val) + + // check validator - version 1 + retrievedValidator, err := store.GetCurrentValidator(val.SubnetID, val.NodeID) + if err != nil { + return fmt.Sprintf("expected no error, got %v", err) + } + if !reflect.DeepEqual(&val, retrievedValidator) { + return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &val, retrievedValidator) + } + + // check validator - version 2 + valIt, err := store.GetCurrentStakerIterator() + if err != nil { + return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) + } + if !valIt.Next() { + return "expected non-empty iterator, got no elements" + } + if !reflect.DeepEqual(valIt.Value(), retrievedValidator) { + return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &val, retrievedValidator) + } + valIt.Release() + + // store delegators + for _, del := range dels { + cpy := del + store.PutCurrentDelegator(&cpy) + } + + // check delegators - version 1 + delIt, err := store.GetCurrentDelegatorIterator(subnetID, nodeID) + if err != nil { + return fmt.Sprintf("unexpected failure in current delegators iterator creation, error %v", err) + } + for delIt.Next() { + found := false + for _, del := range dels { + if reflect.DeepEqual(delIt.Value(), &del) { + found = true + break + } + } + if !found { + return fmt.Sprintf("found extra delegator %v", delIt.Value()) + } + } + delIt.Release() + + // check delegators - version 2 + for _, del := range dels { + found := false + delIt, err := store.GetCurrentDelegatorIterator(subnetID, nodeID) + if err != nil { + return fmt.Sprintf("unexpected failure in current delegators iterator creation, error %v", err) + } + for delIt.Next() { + if reflect.DeepEqual(delIt.Value(), &del) { + found = true + break + } + } + delIt.Release() + + if !found { + return fmt.Sprintf("missing delegator %v", del) + } + } + + // delege delegators + for _, del := range dels { + cpy := del + store.DeleteCurrentDelegator(&cpy) + + // check deleted delegator is not there anymore + delIt, err := store.GetCurrentDelegatorIterator(subnetID, nodeID) + if err != nil { + return fmt.Sprintf("unexpected failure in current delegators iterator creation, error %v", err) + } + + found := false + for delIt.Next() { + if reflect.DeepEqual(delIt.Value(), &del) { + found = true + break + } + } + delIt.Release() + if found { + return fmt.Sprintf("found deleted delegator %v", del) + } + } + + return "" + }, + stakerGenerator(&valPrio, &subnetID, &nodeID), + gen.SliceOfN(20, stakerGenerator(&delPrio, &subnetID, &nodeID)), + )) + + properties.Property("some pending delegators ops", prop.ForAll( + func(val state.Staker, dels []state.Staker) string { + store := newStakersStorageModel() + + // store validator + store.PutCurrentValidator(&val) + + // check validator - version 1 + retrievedValidator, err := store.GetCurrentValidator(val.SubnetID, val.NodeID) + if err != nil { + return fmt.Sprintf("expected no error, got %v", err) + } + if !reflect.DeepEqual(&val, retrievedValidator) { + return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &val, retrievedValidator) + } + + // check validator - version 2 + valIt, err := store.GetCurrentStakerIterator() + if err != nil { + return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) + } + if !valIt.Next() { + return "expected non-empty iterator, got no elements" + } + if !reflect.DeepEqual(valIt.Value(), retrievedValidator) { + return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &val, retrievedValidator) + } + valIt.Release() + + // store delegators + for _, del := range dels { + cpy := del + store.PutPendingDelegator(&cpy) + } + + // check delegators - version 1 + delIt, err := store.GetPendingDelegatorIterator(subnetID, nodeID) + if err != nil { + return fmt.Sprintf("unexpected failure in pending delegators iterator creation, error %v", err) + } + for delIt.Next() { + found := false + for _, del := range dels { + if reflect.DeepEqual(delIt.Value(), &del) { + found = true + break + } + } + if !found { + return fmt.Sprintf("found extra delegator %v", delIt.Value()) + } + } + delIt.Release() + + // check delegators - version 2 + for _, del := range dels { + found := false + delIt, err := store.GetPendingDelegatorIterator(subnetID, nodeID) + if err != nil { + return fmt.Sprintf("unexpected failure in pending delegators iterator creation, error %v", err) + } + for delIt.Next() { + if reflect.DeepEqual(delIt.Value(), &del) { + found = true + break + } + } + delIt.Release() + + if !found { + return fmt.Sprintf("missing delegator %v", del) + } + } + + // delege delegators + for _, del := range dels { + cpy := del + store.DeletePendingDelegator(&cpy) + + // check deleted delegator is not there anymore + delIt, err := store.GetPendingDelegatorIterator(subnetID, nodeID) + if err != nil { + return fmt.Sprintf("unexpected failure in pending delegators iterator creation, error %v", err) + } + + found := false + for delIt.Next() { + if reflect.DeepEqual(delIt.Value(), &del) { + found = true + break + } + } + delIt.Release() + if found { + return fmt.Sprintf("found deleted delegator %v", del) + } + } + + return "" + }, + stakerGenerator(&valPrio, &subnetID, &nodeID), + gen.SliceOfN(20, stakerGenerator(&delPrio, &subnetID, &nodeID)), + )) + + properties.TestingRun(t) +} From d0ea2a04fac17c565e70b31a231d0d716e243eee Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Thu, 30 Mar 2023 09:37:06 +0200 Subject: [PATCH 004/132] chain state property tests --- vms/platformvm/state/models/helpers_test.go | 131 ++++ .../state/models/stakers_ops_test.go | 725 ++++++++++++++++++ .../models/stakers_storage_model_test.go | 367 --------- 3 files changed, 856 insertions(+), 367 deletions(-) create mode 100644 vms/platformvm/state/models/helpers_test.go create mode 100644 vms/platformvm/state/models/stakers_ops_test.go delete mode 100644 vms/platformvm/state/models/stakers_storage_model_test.go diff --git a/vms/platformvm/state/models/helpers_test.go b/vms/platformvm/state/models/helpers_test.go new file mode 100644 index 000000000000..396d1d4074bb --- /dev/null +++ b/vms/platformvm/state/models/helpers_test.go @@ -0,0 +1,131 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package models + +import ( + "fmt" + "time" + + "github.com/ava-labs/avalanchego/chains" + "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database/versiondb" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/uptime" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/formatting" + "github.com/ava-labs/avalanchego/utils/json" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/version" + "github.com/ava-labs/avalanchego/vms/platformvm/api" + "github.com/ava-labs/avalanchego/vms/platformvm/config" + "github.com/ava-labs/avalanchego/vms/platformvm/metrics" + "github.com/ava-labs/avalanchego/vms/platformvm/reward" + "github.com/ava-labs/avalanchego/vms/platformvm/state" + "github.com/prometheus/client_golang/prometheus" +) + +var ( + xChainID = ids.Empty.Prefix(0) + cChainID = ids.Empty.Prefix(1) + avaxAssetID = ids.ID{'y', 'e', 'e', 't'} + + defaultMinStakingDuration = 24 * time.Hour + defaultMaxStakingDuration = 365 * 24 * time.Hour + defaultGenesisTime = time.Date(1997, 1, 1, 0, 0, 0, 0, time.UTC) + defaultValidateStartTime = defaultGenesisTime + defaultValidateEndTime = defaultValidateStartTime.Add(10 * defaultMinStakingDuration) + defaultTxFee = uint64(100) + + testNetworkID = 10 // To be used in tests +) + +func buildChainState() (state.State, error) { + baseDBManager := manager.NewMemDB(version.Semantic1_0_0) + baseDB := versiondb.New(baseDBManager.Current().Database) + + cfg := defaultConfig() + + ctx := snow.DefaultContextTest() + ctx.NetworkID = 10 + ctx.XChainID = xChainID + ctx.CChainID = cChainID + ctx.AVAXAssetID = avaxAssetID + + genesisBytes, err := buildGenesisTest(ctx) + if err != nil { + return nil, err + } + + rewardsCalc := reward.NewCalculator(cfg.RewardConfig) + return state.New( + baseDB, + genesisBytes, + prometheus.NewRegistry(), + cfg, + ctx, + metrics.Noop, + rewardsCalc, + &utils.Atomic[bool]{}, + ) +} + +func defaultConfig() *config.Config { + vdrs := validators.NewManager() + primaryVdrs := validators.NewSet() + _ = vdrs.Add(constants.PrimaryNetworkID, primaryVdrs) + return &config.Config{ + Chains: chains.TestManager, + UptimeLockedCalculator: uptime.NewLockedCalculator(), + Validators: vdrs, + TxFee: defaultTxFee, + CreateSubnetTxFee: 100 * defaultTxFee, + CreateBlockchainTxFee: 100 * defaultTxFee, + MinValidatorStake: 5 * units.MilliAvax, + MaxValidatorStake: 500 * units.MilliAvax, + MinDelegatorStake: 1 * units.MilliAvax, + MinStakeDuration: defaultMinStakingDuration, + MaxStakeDuration: defaultMaxStakingDuration, + RewardConfig: reward.Config{ + MaxConsumptionRate: .12 * reward.PercentDenominator, + MinConsumptionRate: .10 * reward.PercentDenominator, + MintingPeriod: 365 * 24 * time.Hour, + SupplyCap: 720 * units.MegaAvax, + }, + ApricotPhase3Time: defaultValidateEndTime, + ApricotPhase5Time: defaultValidateEndTime, + BanffTime: time.Time{}, // neglecting fork ordering this for package tests + } +} + +func buildGenesisTest(ctx *snow.Context) ([]byte, error) { + // no UTXOs, not nor validators in this genesis + genesisUTXOs := make([]api.UTXO, 0) + genesisValidators := make([]api.PermissionlessValidator, 0) + buildGenesisArgs := api.BuildGenesisArgs{ + NetworkID: json.Uint32(testNetworkID), + AvaxAssetID: ctx.AVAXAssetID, + UTXOs: genesisUTXOs, + Validators: genesisValidators, + Chains: nil, + Time: json.Uint64(defaultGenesisTime.Unix()), + InitialSupply: json.Uint64(360 * units.MegaAvax), + Encoding: formatting.Hex, + } + + buildGenesisResponse := api.BuildGenesisReply{} + platformvmSS := api.StaticService{} + if err := platformvmSS.BuildGenesis(nil, &buildGenesisArgs, &buildGenesisResponse); err != nil { + return nil, fmt.Errorf("problem while building platform chain's genesis state: %w", err) + } + + genesisBytes, err := formatting.Decode(buildGenesisResponse.Encoding, buildGenesisResponse.Bytes) + if err != nil { + return nil, err + } + + return genesisBytes, nil +} diff --git a/vms/platformvm/state/models/stakers_ops_test.go b/vms/platformvm/state/models/stakers_ops_test.go new file mode 100644 index 000000000000..99ed4a421bb1 --- /dev/null +++ b/vms/platformvm/state/models/stakers_ops_test.go @@ -0,0 +1,725 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package models + +import ( + "errors" + "fmt" + "reflect" + "testing" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/vms/platformvm/state" + "github.com/leanovate/gopter" + "github.com/leanovate/gopter/gen" + "github.com/leanovate/gopter/prop" +) + +var errNonEmptyIteratorExpected = errors.New("expected non-empty iterator, got no elements") + +func TestSimpleStakerOpsForStakersStorage(t *testing.T) { + properties := gopter.NewProperties(nil) + + properties.Property("some current validator ops", prop.ForAll( + func(s state.Staker) string { + store := newStakersStorageModel() + + // no staker before insertion + _, err := store.GetCurrentValidator(s.SubnetID, s.NodeID) + if err != database.ErrNotFound { + return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) + } + + // it's fine deleting unknown validator + store.DeleteCurrentValidator(&s) + + currIT, err := store.GetCurrentStakerIterator() + if err != nil { + return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) + } + if currIT.Next() { + return fmt.Sprintf("expected empty iterator, got at least element %v", currIT.Value()) + } + currIT.Release() + + // staker after insertion + store.PutCurrentValidator(&s) + retrievedStaker, err := store.GetCurrentValidator(s.SubnetID, s.NodeID) + if err != nil { + return fmt.Sprintf("expected no error, got %v", err) + } + if !reflect.DeepEqual(&s, retrievedStaker) { + return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &s, retrievedStaker) + } + + currIT, err = store.GetCurrentStakerIterator() + if err != nil { + return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) + } + if !currIT.Next() { + return errNonEmptyIteratorExpected.Error() + } + if !reflect.DeepEqual(currIT.Value(), retrievedStaker) { + return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &s, retrievedStaker) + } + currIT.Release() + + // no staker after deletion + store.DeleteCurrentValidator(&s) + _, err = store.GetCurrentValidator(s.SubnetID, s.NodeID) + if err != database.ErrNotFound { + return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) + } + + currIT, err = store.GetCurrentStakerIterator() + if err != nil { + return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) + } + if currIT.Next() { + return fmt.Sprintf("expected empty iterator, got at least element %v", currIT.Value()) + } + currIT.Release() + + return "" + }, + stakerGenerator(nil, nil, nil), + )) + + properties.Property("some pending validator ops", prop.ForAll( + func(s state.Staker) string { + store := newStakersStorageModel() + + // no staker before insertion + _, err := store.GetPendingValidator(s.SubnetID, s.NodeID) + if err != database.ErrNotFound { + return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) + } + + // it's fine deleting unknown validator + store.DeletePendingValidator(&s) + + currIT, err := store.GetPendingStakerIterator() + if err != nil { + return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) + } + if currIT.Next() { + return fmt.Sprintf("expected empty iterator, got at least element %v", currIT.Value()) + } + currIT.Release() + + // staker after insertion + store.PutPendingValidator(&s) + retrievedStaker, err := store.GetPendingValidator(s.SubnetID, s.NodeID) + if err != nil { + return fmt.Sprintf("expected no error, got %v", err) + } + if !reflect.DeepEqual(&s, retrievedStaker) { + return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &s, retrievedStaker) + } + + currIT, err = store.GetPendingStakerIterator() + if err != nil { + return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) + } + if !currIT.Next() { + return errNonEmptyIteratorExpected.Error() + } + if !reflect.DeepEqual(currIT.Value(), retrievedStaker) { + return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &s, retrievedStaker) + } + currIT.Release() + + // no staker after deletion + store.DeletePendingValidator(&s) + _, err = store.GetPendingValidator(s.SubnetID, s.NodeID) + if err != database.ErrNotFound { + return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) + } + + currIT, err = store.GetPendingStakerIterator() + if err != nil { + return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) + } + if currIT.Next() { + return fmt.Sprintf("expected empty iterator, got at least element %v", currIT.Value()) + } + currIT.Release() + + return "" + }, + stakerGenerator(nil, nil, nil), + )) + + var ( + valPrio = currentValidator + delPrio = currentDelegator + subnetID = ids.GenerateTestID() + nodeID = ids.GenerateTestNodeID() + ) + properties.Property("some current delegators ops", prop.ForAll( + func(val state.Staker, dels []state.Staker) string { + store := newStakersStorageModel() + + // store validator + store.PutCurrentValidator(&val) + + // check validator - version 1 + retrievedValidator, err := store.GetCurrentValidator(val.SubnetID, val.NodeID) + if err != nil { + return fmt.Sprintf("expected no error, got %v", err) + } + if !reflect.DeepEqual(&val, retrievedValidator) { + return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &val, retrievedValidator) + } + + // check validator - version 2 + valIt, err := store.GetCurrentStakerIterator() + if err != nil { + return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) + } + if !valIt.Next() { + return errNonEmptyIteratorExpected.Error() + } + if !reflect.DeepEqual(valIt.Value(), retrievedValidator) { + return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &val, retrievedValidator) + } + valIt.Release() + + // store delegators + for _, del := range dels { + cpy := del + store.PutCurrentDelegator(&cpy) + } + + // check delegators - version 1 + delIt, err := store.GetCurrentDelegatorIterator(subnetID, nodeID) + if err != nil { + return fmt.Sprintf("unexpected failure in current delegators iterator creation, error %v", err) + } + for delIt.Next() { + found := false + for _, del := range dels { + if reflect.DeepEqual(*delIt.Value(), del) { + found = true + break + } + } + if !found { + return fmt.Sprintf("found extra delegator %v", delIt.Value()) + } + } + delIt.Release() + + // check delegators - version 2 + for _, del := range dels { + found := false + delIt, err := store.GetCurrentDelegatorIterator(subnetID, nodeID) + if err != nil { + return fmt.Sprintf("unexpected failure in current delegators iterator creation, error %v", err) + } + for delIt.Next() { + if reflect.DeepEqual(*delIt.Value(), del) { + found = true + break + } + } + delIt.Release() + + if !found { + return fmt.Sprintf("missing delegator %v", del) + } + } + + // delege delegators + for _, del := range dels { + cpy := del + store.DeleteCurrentDelegator(&cpy) + + // check deleted delegator is not there anymore + delIt, err := store.GetCurrentDelegatorIterator(subnetID, nodeID) + if err != nil { + return fmt.Sprintf("unexpected failure in current delegators iterator creation, error %v", err) + } + + found := false + for delIt.Next() { + if reflect.DeepEqual(*delIt.Value(), del) { + found = true + break + } + } + delIt.Release() + if found { + return fmt.Sprintf("found deleted delegator %v", del) + } + } + + return "" + }, + stakerGenerator(&valPrio, &subnetID, &nodeID), + gen.SliceOfN(20, stakerGenerator(&delPrio, &subnetID, &nodeID)), + )) + + properties.Property("some pending delegators ops", prop.ForAll( + func(val state.Staker, dels []state.Staker) string { + store := newStakersStorageModel() + + // store validator + store.PutCurrentValidator(&val) + + // check validator - version 1 + retrievedValidator, err := store.GetCurrentValidator(val.SubnetID, val.NodeID) + if err != nil { + return fmt.Sprintf("expected no error, got %v", err) + } + if !reflect.DeepEqual(&val, retrievedValidator) { + return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &val, retrievedValidator) + } + + // check validator - version 2 + valIt, err := store.GetCurrentStakerIterator() + if err != nil { + return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) + } + if !valIt.Next() { + return errNonEmptyIteratorExpected.Error() + } + if !reflect.DeepEqual(valIt.Value(), retrievedValidator) { + return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &val, retrievedValidator) + } + valIt.Release() + + // store delegators + for _, del := range dels { + cpy := del + store.PutPendingDelegator(&cpy) + } + + // check delegators - version 1 + delIt, err := store.GetPendingDelegatorIterator(subnetID, nodeID) + if err != nil { + return fmt.Sprintf("unexpected failure in pending delegators iterator creation, error %v", err) + } + for delIt.Next() { + found := false + for _, del := range dels { + if reflect.DeepEqual(*delIt.Value(), del) { + found = true + break + } + } + if !found { + return fmt.Sprintf("found extra delegator %v", delIt.Value()) + } + } + delIt.Release() + + // check delegators - version 2 + for _, del := range dels { + found := false + delIt, err := store.GetPendingDelegatorIterator(subnetID, nodeID) + if err != nil { + return fmt.Sprintf("unexpected failure in pending delegators iterator creation, error %v", err) + } + for delIt.Next() { + if reflect.DeepEqual(*delIt.Value(), del) { + found = true + break + } + } + delIt.Release() + + if !found { + return fmt.Sprintf("missing delegator %v", del) + } + } + + // delege delegators + for _, del := range dels { + cpy := del + store.DeletePendingDelegator(&cpy) + + // check deleted delegator is not there anymore + delIt, err := store.GetPendingDelegatorIterator(subnetID, nodeID) + if err != nil { + return fmt.Sprintf("unexpected failure in pending delegators iterator creation, error %v", err) + } + + found := false + for delIt.Next() { + if reflect.DeepEqual(*delIt.Value(), del) { + found = true + break + } + } + delIt.Release() + if found { + return fmt.Sprintf("found deleted delegator %v", del) + } + } + + return "" + }, + stakerGenerator(&valPrio, &subnetID, &nodeID), + gen.SliceOfN(20, stakerGenerator(&delPrio, &subnetID, &nodeID)), + )) + + properties.TestingRun(t) +} + +func TestSimpleStakerOpsForState(t *testing.T) { + properties := gopter.NewProperties(nil) + + properties.Property("some current validator ops", prop.ForAll( + func(s state.Staker) string { + store, err := buildChainState() + if err != nil { + return fmt.Sprintf("unexpected error while creating chain state, err %v", err) + } + + // no staker before insertion + _, err = store.GetCurrentValidator(s.SubnetID, s.NodeID) + if err != database.ErrNotFound { + return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) + } + + // it's fine deleting unknown validator + store.DeleteCurrentValidator(&s) + + currIT, err := store.GetCurrentStakerIterator() + if err != nil { + return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) + } + if currIT.Next() { + return fmt.Sprintf("expected empty iterator, got at least element %v", currIT.Value()) + } + currIT.Release() + + // staker after insertion + store.PutCurrentValidator(&s) + retrievedStaker, err := store.GetCurrentValidator(s.SubnetID, s.NodeID) + if err != nil { + return fmt.Sprintf("expected no error, got %v", err) + } + if !reflect.DeepEqual(&s, retrievedStaker) { + return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &s, retrievedStaker) + } + + currIT, err = store.GetCurrentStakerIterator() + if err != nil { + return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) + } + if !currIT.Next() { + return errNonEmptyIteratorExpected.Error() + } + if !reflect.DeepEqual(currIT.Value(), retrievedStaker) { + return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &s, retrievedStaker) + } + currIT.Release() + + // no staker after deletion + store.DeleteCurrentValidator(&s) + _, err = store.GetCurrentValidator(s.SubnetID, s.NodeID) + if err != database.ErrNotFound { + return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) + } + + currIT, err = store.GetCurrentStakerIterator() + if err != nil { + return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) + } + if currIT.Next() { + return fmt.Sprintf("expected empty iterator, got at least element %v", currIT.Value()) + } + currIT.Release() + + return "" + }, + stakerGenerator(nil, nil, nil), + )) + + properties.Property("some pending validator ops", prop.ForAll( + func(s state.Staker) string { + store := newStakersStorageModel() + + // no staker before insertion + _, err := store.GetPendingValidator(s.SubnetID, s.NodeID) + if err != database.ErrNotFound { + return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) + } + + // it's fine deleting unknown validator + store.DeletePendingValidator(&s) + + currIT, err := store.GetPendingStakerIterator() + if err != nil { + return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) + } + if currIT.Next() { + return fmt.Sprintf("expected empty iterator, got at least element %v", currIT.Value()) + } + currIT.Release() + + // staker after insertion + store.PutPendingValidator(&s) + retrievedStaker, err := store.GetPendingValidator(s.SubnetID, s.NodeID) + if err != nil { + return fmt.Sprintf("expected no error, got %v", err) + } + if !reflect.DeepEqual(&s, retrievedStaker) { + return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &s, retrievedStaker) + } + + currIT, err = store.GetPendingStakerIterator() + if err != nil { + return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) + } + if !currIT.Next() { + return errNonEmptyIteratorExpected.Error() + } + if !reflect.DeepEqual(currIT.Value(), retrievedStaker) { + return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &s, retrievedStaker) + } + currIT.Release() + + // no staker after deletion + store.DeletePendingValidator(&s) + _, err = store.GetPendingValidator(s.SubnetID, s.NodeID) + if err != database.ErrNotFound { + return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) + } + + currIT, err = store.GetPendingStakerIterator() + if err != nil { + return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) + } + if currIT.Next() { + return fmt.Sprintf("expected empty iterator, got at least element %v", currIT.Value()) + } + currIT.Release() + + return "" + }, + stakerGenerator(nil, nil, nil), + )) + + var ( + valPrio = currentValidator + delPrio = currentDelegator + subnetID = ids.GenerateTestID() + nodeID = ids.GenerateTestNodeID() + ) + properties.Property("some current delegators ops", prop.ForAll( + func(val state.Staker, dels []state.Staker) string { + store := newStakersStorageModel() + + // store validator + store.PutCurrentValidator(&val) + + // check validator - version 1 + retrievedValidator, err := store.GetCurrentValidator(val.SubnetID, val.NodeID) + if err != nil { + return fmt.Sprintf("expected no error, got %v", err) + } + if !reflect.DeepEqual(&val, retrievedValidator) { + return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &val, retrievedValidator) + } + + // check validator - version 2 + valIt, err := store.GetCurrentStakerIterator() + if err != nil { + return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) + } + if !valIt.Next() { + return errNonEmptyIteratorExpected.Error() + } + if !reflect.DeepEqual(valIt.Value(), retrievedValidator) { + return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &val, retrievedValidator) + } + valIt.Release() + + // store delegators + for _, del := range dels { + cpy := del + store.PutCurrentDelegator(&cpy) + } + + // check delegators - version 1 + delIt, err := store.GetCurrentDelegatorIterator(subnetID, nodeID) + if err != nil { + return fmt.Sprintf("unexpected failure in current delegators iterator creation, error %v", err) + } + for delIt.Next() { + found := false + for _, del := range dels { + if reflect.DeepEqual(*delIt.Value(), del) { + found = true + break + } + } + if !found { + return fmt.Sprintf("found extra delegator %v", delIt.Value()) + } + } + delIt.Release() + + // check delegators - version 2 + for _, del := range dels { + found := false + delIt, err := store.GetCurrentDelegatorIterator(subnetID, nodeID) + if err != nil { + return fmt.Sprintf("unexpected failure in current delegators iterator creation, error %v", err) + } + for delIt.Next() { + if reflect.DeepEqual(*delIt.Value(), del) { + found = true + break + } + } + delIt.Release() + + if !found { + return fmt.Sprintf("missing delegator %v", del) + } + } + + // delege delegators + for _, del := range dels { + cpy := del + store.DeleteCurrentDelegator(&cpy) + + // check deleted delegator is not there anymore + delIt, err := store.GetCurrentDelegatorIterator(subnetID, nodeID) + if err != nil { + return fmt.Sprintf("unexpected failure in current delegators iterator creation, error %v", err) + } + + found := false + for delIt.Next() { + if reflect.DeepEqual(*delIt.Value(), del) { + found = true + break + } + } + delIt.Release() + if found { + return fmt.Sprintf("found deleted delegator %v", del) + } + } + + return "" + }, + stakerGenerator(&valPrio, &subnetID, &nodeID), + gen.SliceOfN(20, stakerGenerator(&delPrio, &subnetID, &nodeID)), + )) + + properties.Property("some pending delegators ops", prop.ForAll( + func(val state.Staker, dels []state.Staker) string { + store := newStakersStorageModel() + + // store validator + store.PutCurrentValidator(&val) + + // check validator - version 1 + retrievedValidator, err := store.GetCurrentValidator(val.SubnetID, val.NodeID) + if err != nil { + return fmt.Sprintf("expected no error, got %v", err) + } + if !reflect.DeepEqual(&val, retrievedValidator) { + return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &val, retrievedValidator) + } + + // check validator - version 2 + valIt, err := store.GetCurrentStakerIterator() + if err != nil { + return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) + } + if !valIt.Next() { + return errNonEmptyIteratorExpected.Error() + } + if !reflect.DeepEqual(valIt.Value(), retrievedValidator) { + return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &val, retrievedValidator) + } + valIt.Release() + + // store delegators + for _, del := range dels { + cpy := del + store.PutPendingDelegator(&cpy) + } + + // check delegators - version 1 + delIt, err := store.GetPendingDelegatorIterator(subnetID, nodeID) + if err != nil { + return fmt.Sprintf("unexpected failure in pending delegators iterator creation, error %v", err) + } + for delIt.Next() { + found := false + for _, del := range dels { + if reflect.DeepEqual(*delIt.Value(), del) { + found = true + break + } + } + if !found { + return fmt.Sprintf("found extra delegator %v", delIt.Value()) + } + } + delIt.Release() + + // check delegators - version 2 + for _, del := range dels { + found := false + delIt, err := store.GetPendingDelegatorIterator(subnetID, nodeID) + if err != nil { + return fmt.Sprintf("unexpected failure in pending delegators iterator creation, error %v", err) + } + for delIt.Next() { + if reflect.DeepEqual(*delIt.Value(), del) { + found = true + break + } + } + delIt.Release() + + if !found { + return fmt.Sprintf("missing delegator %v", del) + } + } + + // delege delegators + for _, del := range dels { + cpy := del + store.DeletePendingDelegator(&cpy) + + // check deleted delegator is not there anymore + delIt, err := store.GetPendingDelegatorIterator(subnetID, nodeID) + if err != nil { + return fmt.Sprintf("unexpected failure in pending delegators iterator creation, error %v", err) + } + + found := false + for delIt.Next() { + if reflect.DeepEqual(*delIt.Value(), del) { + found = true + break + } + } + delIt.Release() + if found { + return fmt.Sprintf("found deleted delegator %v", del) + } + } + + return "" + }, + stakerGenerator(&valPrio, &subnetID, &nodeID), + gen.SliceOfN(20, stakerGenerator(&delPrio, &subnetID, &nodeID)), + )) + + properties.TestingRun(t) +} + +// TODO ABENEGIA: do for diffs as well diff --git a/vms/platformvm/state/models/stakers_storage_model_test.go b/vms/platformvm/state/models/stakers_storage_model_test.go deleted file mode 100644 index 9313b671f845..000000000000 --- a/vms/platformvm/state/models/stakers_storage_model_test.go +++ /dev/null @@ -1,367 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package models - -import ( - "fmt" - "reflect" - "testing" - - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/vms/platformvm/state" - "github.com/leanovate/gopter" - "github.com/leanovate/gopter/gen" - "github.com/leanovate/gopter/prop" -) - -func TestStakersStorageMode(t *testing.T) { - properties := gopter.NewProperties(nil) - - properties.Property("some current validator ops", prop.ForAll( - func(s state.Staker) string { - store := newStakersStorageModel() - - // no staker before insertion - _, err := store.GetCurrentValidator(s.SubnetID, s.NodeID) - if err != database.ErrNotFound { - return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) - } - - // it's fine deleting unknown validator - store.DeleteCurrentValidator(&s) - - currIT, err := store.GetCurrentStakerIterator() - if err != nil { - return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) - } - if currIT.Next() { - return fmt.Sprintf("expected empty iterator, got at least element %v", currIT.Value()) - } - currIT.Release() - - // staker after insertion - store.PutCurrentValidator(&s) - retrievedStaker, err := store.GetCurrentValidator(s.SubnetID, s.NodeID) - if err != nil { - return fmt.Sprintf("expected no error, got %v", err) - } - if !reflect.DeepEqual(&s, retrievedStaker) { - return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &s, retrievedStaker) - } - - currIT, err = store.GetCurrentStakerIterator() - if err != nil { - return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) - } - if !currIT.Next() { - return "expected non-empty iterator, got no elements" - } - if !reflect.DeepEqual(currIT.Value(), retrievedStaker) { - return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &s, retrievedStaker) - } - currIT.Release() - - // no staker after deletion - store.DeleteCurrentValidator(&s) - _, err = store.GetCurrentValidator(s.SubnetID, s.NodeID) - if err != database.ErrNotFound { - return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) - } - - currIT, err = store.GetCurrentStakerIterator() - if err != nil { - return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) - } - if currIT.Next() { - return fmt.Sprintf("expected empty iterator, got at least element %v", currIT.Value()) - } - currIT.Release() - - return "" - }, - stakerGenerator(nil, nil, nil), - )) - - properties.Property("some pending validator ops", prop.ForAll( - func(s state.Staker) string { - store := newStakersStorageModel() - - // no staker before insertion - _, err := store.GetPendingValidator(s.SubnetID, s.NodeID) - if err != database.ErrNotFound { - return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) - } - - // it's fine deleting unknown validator - store.DeletePendingValidator(&s) - - currIT, err := store.GetPendingStakerIterator() - if err != nil { - return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) - } - if currIT.Next() { - return fmt.Sprintf("expected empty iterator, got at least element %v", currIT.Value()) - } - currIT.Release() - - // staker after insertion - store.PutPendingValidator(&s) - retrievedStaker, err := store.GetPendingValidator(s.SubnetID, s.NodeID) - if err != nil { - return fmt.Sprintf("expected no error, got %v", err) - } - if !reflect.DeepEqual(&s, retrievedStaker) { - return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &s, retrievedStaker) - } - - currIT, err = store.GetPendingStakerIterator() - if err != nil { - return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) - } - if !currIT.Next() { - return "expected non-empty iterator, got no elements" - } - if !reflect.DeepEqual(currIT.Value(), retrievedStaker) { - return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &s, retrievedStaker) - } - currIT.Release() - - // no staker after deletion - store.DeletePendingValidator(&s) - _, err = store.GetPendingValidator(s.SubnetID, s.NodeID) - if err != database.ErrNotFound { - return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) - } - - currIT, err = store.GetPendingStakerIterator() - if err != nil { - return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) - } - if currIT.Next() { - return fmt.Sprintf("expected empty iterator, got at least element %v", currIT.Value()) - } - currIT.Release() - - return "" - }, - stakerGenerator(nil, nil, nil), - )) - - var ( - valPrio = currentValidator - delPrio = currentDelegator - subnetID = ids.GenerateTestID() - nodeID = ids.GenerateTestNodeID() - ) - properties.Property("some current delegators ops", prop.ForAll( - func(val state.Staker, dels []state.Staker) string { - store := newStakersStorageModel() - - // store validator - store.PutCurrentValidator(&val) - - // check validator - version 1 - retrievedValidator, err := store.GetCurrentValidator(val.SubnetID, val.NodeID) - if err != nil { - return fmt.Sprintf("expected no error, got %v", err) - } - if !reflect.DeepEqual(&val, retrievedValidator) { - return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &val, retrievedValidator) - } - - // check validator - version 2 - valIt, err := store.GetCurrentStakerIterator() - if err != nil { - return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) - } - if !valIt.Next() { - return "expected non-empty iterator, got no elements" - } - if !reflect.DeepEqual(valIt.Value(), retrievedValidator) { - return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &val, retrievedValidator) - } - valIt.Release() - - // store delegators - for _, del := range dels { - cpy := del - store.PutCurrentDelegator(&cpy) - } - - // check delegators - version 1 - delIt, err := store.GetCurrentDelegatorIterator(subnetID, nodeID) - if err != nil { - return fmt.Sprintf("unexpected failure in current delegators iterator creation, error %v", err) - } - for delIt.Next() { - found := false - for _, del := range dels { - if reflect.DeepEqual(delIt.Value(), &del) { - found = true - break - } - } - if !found { - return fmt.Sprintf("found extra delegator %v", delIt.Value()) - } - } - delIt.Release() - - // check delegators - version 2 - for _, del := range dels { - found := false - delIt, err := store.GetCurrentDelegatorIterator(subnetID, nodeID) - if err != nil { - return fmt.Sprintf("unexpected failure in current delegators iterator creation, error %v", err) - } - for delIt.Next() { - if reflect.DeepEqual(delIt.Value(), &del) { - found = true - break - } - } - delIt.Release() - - if !found { - return fmt.Sprintf("missing delegator %v", del) - } - } - - // delege delegators - for _, del := range dels { - cpy := del - store.DeleteCurrentDelegator(&cpy) - - // check deleted delegator is not there anymore - delIt, err := store.GetCurrentDelegatorIterator(subnetID, nodeID) - if err != nil { - return fmt.Sprintf("unexpected failure in current delegators iterator creation, error %v", err) - } - - found := false - for delIt.Next() { - if reflect.DeepEqual(delIt.Value(), &del) { - found = true - break - } - } - delIt.Release() - if found { - return fmt.Sprintf("found deleted delegator %v", del) - } - } - - return "" - }, - stakerGenerator(&valPrio, &subnetID, &nodeID), - gen.SliceOfN(20, stakerGenerator(&delPrio, &subnetID, &nodeID)), - )) - - properties.Property("some pending delegators ops", prop.ForAll( - func(val state.Staker, dels []state.Staker) string { - store := newStakersStorageModel() - - // store validator - store.PutCurrentValidator(&val) - - // check validator - version 1 - retrievedValidator, err := store.GetCurrentValidator(val.SubnetID, val.NodeID) - if err != nil { - return fmt.Sprintf("expected no error, got %v", err) - } - if !reflect.DeepEqual(&val, retrievedValidator) { - return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &val, retrievedValidator) - } - - // check validator - version 2 - valIt, err := store.GetCurrentStakerIterator() - if err != nil { - return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) - } - if !valIt.Next() { - return "expected non-empty iterator, got no elements" - } - if !reflect.DeepEqual(valIt.Value(), retrievedValidator) { - return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &val, retrievedValidator) - } - valIt.Release() - - // store delegators - for _, del := range dels { - cpy := del - store.PutPendingDelegator(&cpy) - } - - // check delegators - version 1 - delIt, err := store.GetPendingDelegatorIterator(subnetID, nodeID) - if err != nil { - return fmt.Sprintf("unexpected failure in pending delegators iterator creation, error %v", err) - } - for delIt.Next() { - found := false - for _, del := range dels { - if reflect.DeepEqual(delIt.Value(), &del) { - found = true - break - } - } - if !found { - return fmt.Sprintf("found extra delegator %v", delIt.Value()) - } - } - delIt.Release() - - // check delegators - version 2 - for _, del := range dels { - found := false - delIt, err := store.GetPendingDelegatorIterator(subnetID, nodeID) - if err != nil { - return fmt.Sprintf("unexpected failure in pending delegators iterator creation, error %v", err) - } - for delIt.Next() { - if reflect.DeepEqual(delIt.Value(), &del) { - found = true - break - } - } - delIt.Release() - - if !found { - return fmt.Sprintf("missing delegator %v", del) - } - } - - // delege delegators - for _, del := range dels { - cpy := del - store.DeletePendingDelegator(&cpy) - - // check deleted delegator is not there anymore - delIt, err := store.GetPendingDelegatorIterator(subnetID, nodeID) - if err != nil { - return fmt.Sprintf("unexpected failure in pending delegators iterator creation, error %v", err) - } - - found := false - for delIt.Next() { - if reflect.DeepEqual(delIt.Value(), &del) { - found = true - break - } - } - delIt.Release() - if found { - return fmt.Sprintf("found deleted delegator %v", del) - } - } - - return "" - }, - stakerGenerator(&valPrio, &subnetID, &nodeID), - gen.SliceOfN(20, stakerGenerator(&delPrio, &subnetID, &nodeID)), - )) - - properties.TestingRun(t) -} From 245fa589bb144da92f3fc439404f25841d88379b Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Thu, 30 Mar 2023 10:58:55 +0200 Subject: [PATCH 005/132] diff property tests + fix --- vms/platformvm/state/models/helpers_test.go | 10 + .../state/models/stakers_ops_test.go | 362 +++++++++++++++++- vms/platformvm/state/stakers.go | 1 + 3 files changed, 372 insertions(+), 1 deletion(-) diff --git a/vms/platformvm/state/models/helpers_test.go b/vms/platformvm/state/models/helpers_test.go index 396d1d4074bb..933f700c3dfb 100644 --- a/vms/platformvm/state/models/helpers_test.go +++ b/vms/platformvm/state/models/helpers_test.go @@ -29,6 +29,8 @@ import ( ) var ( + _ state.Versions = (*versionsHolder)(nil) + xChainID = ids.Empty.Prefix(0) cChainID = ids.Empty.Prefix(1) avaxAssetID = ids.ID{'y', 'e', 'e', 't'} @@ -43,6 +45,14 @@ var ( testNetworkID = 10 // To be used in tests ) +type versionsHolder struct { + baseState state.State +} + +func (h *versionsHolder) GetState(blkID ids.ID) (state.Chain, bool) { + return h.baseState, blkID == h.baseState.GetLastAccepted() +} + func buildChainState() (state.State, error) { baseDBManager := manager.NewMemDB(version.Semantic1_0_0) baseDB := versiondb.New(baseDBManager.Current().Database) diff --git a/vms/platformvm/state/models/stakers_ops_test.go b/vms/platformvm/state/models/stakers_ops_test.go index 99ed4a421bb1..8dd3cc7d42ea 100644 --- a/vms/platformvm/state/models/stakers_ops_test.go +++ b/vms/platformvm/state/models/stakers_ops_test.go @@ -722,4 +722,364 @@ func TestSimpleStakerOpsForState(t *testing.T) { properties.TestingRun(t) } -// TODO ABENEGIA: do for diffs as well +func TestSimpleStakerOpsForDiff(t *testing.T) { + properties := gopter.NewProperties(nil) + + properties.Property("some current validator ops", prop.ForAll( + func(s state.Staker) string { + baseState, err := buildChainState() + if err != nil { + return fmt.Sprintf("unexpected error while creating chain base state, err %v", err) + } + + genesisID := baseState.GetLastAccepted() + versions := &versionsHolder{ + baseState: baseState, + } + store, err := state.NewDiff(genesisID, versions) + if err != nil { + return fmt.Sprintf("unexpected error while creating diff, err %v", err) + } + + // no staker before insertion + _, err = store.GetCurrentValidator(s.SubnetID, s.NodeID) + if err != database.ErrNotFound { + return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) + } + + // it's fine deleting unknown validator + store.DeleteCurrentValidator(&s) + + currIT, err := store.GetCurrentStakerIterator() + if err != nil { + return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) + } + if currIT.Next() { + return fmt.Sprintf("expected empty iterator, got at least element %v", currIT.Value()) + } + currIT.Release() + + // staker after insertion + store.PutCurrentValidator(&s) + retrievedStaker, err := store.GetCurrentValidator(s.SubnetID, s.NodeID) + if err != nil { + return fmt.Sprintf("expected no error, got %v", err) + } + if !reflect.DeepEqual(&s, retrievedStaker) { + return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &s, retrievedStaker) + } + + currIT, err = store.GetCurrentStakerIterator() + if err != nil { + return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) + } + if !currIT.Next() { + return errNonEmptyIteratorExpected.Error() + } + if !reflect.DeepEqual(currIT.Value(), retrievedStaker) { + return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &s, retrievedStaker) + } + currIT.Release() + + // no staker after deletion + store.DeleteCurrentValidator(&s) + _, err = store.GetCurrentValidator(s.SubnetID, s.NodeID) + if err != database.ErrNotFound { + return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) + } + + currIT, err = store.GetCurrentStakerIterator() + if err != nil { + return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) + } + if currIT.Next() { + return fmt.Sprintf("expected empty iterator, got at least element %v", currIT.Value()) + } + currIT.Release() + + return "" + }, + stakerGenerator(nil, nil, nil), + )) + + properties.Property("some pending validator ops", prop.ForAll( + func(s state.Staker) string { + store := newStakersStorageModel() + + // no staker before insertion + _, err := store.GetPendingValidator(s.SubnetID, s.NodeID) + if err != database.ErrNotFound { + return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) + } + + // it's fine deleting unknown validator + store.DeletePendingValidator(&s) + + currIT, err := store.GetPendingStakerIterator() + if err != nil { + return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) + } + if currIT.Next() { + return fmt.Sprintf("expected empty iterator, got at least element %v", currIT.Value()) + } + currIT.Release() + + // staker after insertion + store.PutPendingValidator(&s) + retrievedStaker, err := store.GetPendingValidator(s.SubnetID, s.NodeID) + if err != nil { + return fmt.Sprintf("expected no error, got %v", err) + } + if !reflect.DeepEqual(&s, retrievedStaker) { + return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &s, retrievedStaker) + } + + currIT, err = store.GetPendingStakerIterator() + if err != nil { + return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) + } + if !currIT.Next() { + return errNonEmptyIteratorExpected.Error() + } + if !reflect.DeepEqual(currIT.Value(), retrievedStaker) { + return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &s, retrievedStaker) + } + currIT.Release() + + // no staker after deletion + store.DeletePendingValidator(&s) + _, err = store.GetPendingValidator(s.SubnetID, s.NodeID) + if err != database.ErrNotFound { + return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) + } + + currIT, err = store.GetPendingStakerIterator() + if err != nil { + return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) + } + if currIT.Next() { + return fmt.Sprintf("expected empty iterator, got at least element %v", currIT.Value()) + } + currIT.Release() + + return "" + }, + stakerGenerator(nil, nil, nil), + )) + + var ( + valPrio = currentValidator + delPrio = currentDelegator + subnetID = ids.GenerateTestID() + nodeID = ids.GenerateTestNodeID() + ) + properties.Property("some current delegators ops", prop.ForAll( + func(val state.Staker, dels []state.Staker) string { + store := newStakersStorageModel() + + // store validator + store.PutCurrentValidator(&val) + + // check validator - version 1 + retrievedValidator, err := store.GetCurrentValidator(val.SubnetID, val.NodeID) + if err != nil { + return fmt.Sprintf("expected no error, got %v", err) + } + if !reflect.DeepEqual(&val, retrievedValidator) { + return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &val, retrievedValidator) + } + + // check validator - version 2 + valIt, err := store.GetCurrentStakerIterator() + if err != nil { + return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) + } + if !valIt.Next() { + return errNonEmptyIteratorExpected.Error() + } + if !reflect.DeepEqual(valIt.Value(), retrievedValidator) { + return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &val, retrievedValidator) + } + valIt.Release() + + // store delegators + for _, del := range dels { + cpy := del + store.PutCurrentDelegator(&cpy) + } + + // check delegators - version 1 + delIt, err := store.GetCurrentDelegatorIterator(subnetID, nodeID) + if err != nil { + return fmt.Sprintf("unexpected failure in current delegators iterator creation, error %v", err) + } + for delIt.Next() { + found := false + for _, del := range dels { + if reflect.DeepEqual(*delIt.Value(), del) { + found = true + break + } + } + if !found { + return fmt.Sprintf("found extra delegator %v", delIt.Value()) + } + } + delIt.Release() + + // check delegators - version 2 + for _, del := range dels { + found := false + delIt, err := store.GetCurrentDelegatorIterator(subnetID, nodeID) + if err != nil { + return fmt.Sprintf("unexpected failure in current delegators iterator creation, error %v", err) + } + for delIt.Next() { + if reflect.DeepEqual(*delIt.Value(), del) { + found = true + break + } + } + delIt.Release() + + if !found { + return fmt.Sprintf("missing delegator %v", del) + } + } + + // delege delegators + for _, del := range dels { + cpy := del + store.DeleteCurrentDelegator(&cpy) + + // check deleted delegator is not there anymore + delIt, err := store.GetCurrentDelegatorIterator(subnetID, nodeID) + if err != nil { + return fmt.Sprintf("unexpected failure in current delegators iterator creation, error %v", err) + } + + found := false + for delIt.Next() { + if reflect.DeepEqual(*delIt.Value(), del) { + found = true + break + } + } + delIt.Release() + if found { + return fmt.Sprintf("found deleted delegator %v", del) + } + } + + return "" + }, + stakerGenerator(&valPrio, &subnetID, &nodeID), + gen.SliceOfN(20, stakerGenerator(&delPrio, &subnetID, &nodeID)), + )) + + properties.Property("some pending delegators ops", prop.ForAll( + func(val state.Staker, dels []state.Staker) string { + store := newStakersStorageModel() + + // store validator + store.PutCurrentValidator(&val) + + // check validator - version 1 + retrievedValidator, err := store.GetCurrentValidator(val.SubnetID, val.NodeID) + if err != nil { + return fmt.Sprintf("expected no error, got %v", err) + } + if !reflect.DeepEqual(&val, retrievedValidator) { + return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &val, retrievedValidator) + } + + // check validator - version 2 + valIt, err := store.GetCurrentStakerIterator() + if err != nil { + return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) + } + if !valIt.Next() { + return errNonEmptyIteratorExpected.Error() + } + if !reflect.DeepEqual(valIt.Value(), retrievedValidator) { + return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &val, retrievedValidator) + } + valIt.Release() + + // store delegators + for _, del := range dels { + cpy := del + store.PutPendingDelegator(&cpy) + } + + // check delegators - version 1 + delIt, err := store.GetPendingDelegatorIterator(subnetID, nodeID) + if err != nil { + return fmt.Sprintf("unexpected failure in pending delegators iterator creation, error %v", err) + } + for delIt.Next() { + found := false + for _, del := range dels { + if reflect.DeepEqual(*delIt.Value(), del) { + found = true + break + } + } + if !found { + return fmt.Sprintf("found extra delegator %v", delIt.Value()) + } + } + delIt.Release() + + // check delegators - version 2 + for _, del := range dels { + found := false + delIt, err := store.GetPendingDelegatorIterator(subnetID, nodeID) + if err != nil { + return fmt.Sprintf("unexpected failure in pending delegators iterator creation, error %v", err) + } + for delIt.Next() { + if reflect.DeepEqual(*delIt.Value(), del) { + found = true + break + } + } + delIt.Release() + + if !found { + return fmt.Sprintf("missing delegator %v", del) + } + } + + // delege delegators + for _, del := range dels { + cpy := del + store.DeletePendingDelegator(&cpy) + + // check deleted delegator is not there anymore + delIt, err := store.GetPendingDelegatorIterator(subnetID, nodeID) + if err != nil { + return fmt.Sprintf("unexpected failure in pending delegators iterator creation, error %v", err) + } + + found := false + for delIt.Next() { + if reflect.DeepEqual(*delIt.Value(), del) { + found = true + break + } + } + delIt.Release() + if found { + return fmt.Sprintf("found deleted delegator %v", del) + } + } + + return "" + }, + stakerGenerator(&valPrio, &subnetID, &nodeID), + gen.SliceOfN(20, stakerGenerator(&delPrio, &subnetID, &nodeID)), + )) + + properties.TestingRun(t) +} diff --git a/vms/platformvm/state/stakers.go b/vms/platformvm/state/stakers.go index 5276ff4f8204..a04cf0ca2d20 100644 --- a/vms/platformvm/state/stakers.go +++ b/vms/platformvm/state/stakers.go @@ -297,6 +297,7 @@ func (s *diffStakers) PutValidator(staker *Staker) { s.addedStakers = btree.NewG(defaultTreeDegree, (*Staker).Less) } s.addedStakers.ReplaceOrInsert(staker) + delete(s.deletedStakers, staker.TxID) } func (s *diffStakers) DeleteValidator(staker *Staker) { From 2e9fd5f3e776618192583736fc36ca3c84fa484b Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Thu, 30 Mar 2023 11:36:36 +0200 Subject: [PATCH 006/132] reduced code duplication in tests --- .../state/models/stakers_ops_test.go | 749 +----------------- 1 file changed, 40 insertions(+), 709 deletions(-) diff --git a/vms/platformvm/state/models/stakers_ops_test.go b/vms/platformvm/state/models/stakers_ops_test.go index 8dd3cc7d42ea..35400758c82a 100644 --- a/vms/platformvm/state/models/stakers_ops_test.go +++ b/vms/platformvm/state/models/stakers_ops_test.go @@ -19,726 +19,48 @@ import ( var errNonEmptyIteratorExpected = errors.New("expected non-empty iterator, got no elements") -func TestSimpleStakerOpsForStakersStorage(t *testing.T) { - properties := gopter.NewProperties(nil) - - properties.Property("some current validator ops", prop.ForAll( - func(s state.Staker) string { - store := newStakersStorageModel() - - // no staker before insertion - _, err := store.GetCurrentValidator(s.SubnetID, s.NodeID) - if err != database.ErrNotFound { - return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) - } - - // it's fine deleting unknown validator - store.DeleteCurrentValidator(&s) - - currIT, err := store.GetCurrentStakerIterator() - if err != nil { - return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) - } - if currIT.Next() { - return fmt.Sprintf("expected empty iterator, got at least element %v", currIT.Value()) - } - currIT.Release() - - // staker after insertion - store.PutCurrentValidator(&s) - retrievedStaker, err := store.GetCurrentValidator(s.SubnetID, s.NodeID) - if err != nil { - return fmt.Sprintf("expected no error, got %v", err) - } - if !reflect.DeepEqual(&s, retrievedStaker) { - return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &s, retrievedStaker) - } - - currIT, err = store.GetCurrentStakerIterator() - if err != nil { - return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) - } - if !currIT.Next() { - return errNonEmptyIteratorExpected.Error() - } - if !reflect.DeepEqual(currIT.Value(), retrievedStaker) { - return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &s, retrievedStaker) - } - currIT.Release() - - // no staker after deletion - store.DeleteCurrentValidator(&s) - _, err = store.GetCurrentValidator(s.SubnetID, s.NodeID) - if err != database.ErrNotFound { - return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) - } - - currIT, err = store.GetCurrentStakerIterator() - if err != nil { - return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) - } - if currIT.Next() { - return fmt.Sprintf("expected empty iterator, got at least element %v", currIT.Value()) - } - currIT.Release() - - return "" - }, - stakerGenerator(nil, nil, nil), - )) - - properties.Property("some pending validator ops", prop.ForAll( - func(s state.Staker) string { - store := newStakersStorageModel() - - // no staker before insertion - _, err := store.GetPendingValidator(s.SubnetID, s.NodeID) - if err != database.ErrNotFound { - return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) - } - - // it's fine deleting unknown validator - store.DeletePendingValidator(&s) - - currIT, err := store.GetPendingStakerIterator() - if err != nil { - return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) - } - if currIT.Next() { - return fmt.Sprintf("expected empty iterator, got at least element %v", currIT.Value()) - } - currIT.Release() - - // staker after insertion - store.PutPendingValidator(&s) - retrievedStaker, err := store.GetPendingValidator(s.SubnetID, s.NodeID) - if err != nil { - return fmt.Sprintf("expected no error, got %v", err) - } - if !reflect.DeepEqual(&s, retrievedStaker) { - return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &s, retrievedStaker) - } - - currIT, err = store.GetPendingStakerIterator() - if err != nil { - return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) - } - if !currIT.Next() { - return errNonEmptyIteratorExpected.Error() - } - if !reflect.DeepEqual(currIT.Value(), retrievedStaker) { - return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &s, retrievedStaker) - } - currIT.Release() - - // no staker after deletion - store.DeletePendingValidator(&s) - _, err = store.GetPendingValidator(s.SubnetID, s.NodeID) - if err != database.ErrNotFound { - return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) - } - - currIT, err = store.GetPendingStakerIterator() - if err != nil { - return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) - } - if currIT.Next() { - return fmt.Sprintf("expected empty iterator, got at least element %v", currIT.Value()) - } - currIT.Release() - - return "" - }, - stakerGenerator(nil, nil, nil), - )) - - var ( - valPrio = currentValidator - delPrio = currentDelegator - subnetID = ids.GenerateTestID() - nodeID = ids.GenerateTestNodeID() - ) - properties.Property("some current delegators ops", prop.ForAll( - func(val state.Staker, dels []state.Staker) string { - store := newStakersStorageModel() - - // store validator - store.PutCurrentValidator(&val) - - // check validator - version 1 - retrievedValidator, err := store.GetCurrentValidator(val.SubnetID, val.NodeID) - if err != nil { - return fmt.Sprintf("expected no error, got %v", err) - } - if !reflect.DeepEqual(&val, retrievedValidator) { - return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &val, retrievedValidator) - } - - // check validator - version 2 - valIt, err := store.GetCurrentStakerIterator() - if err != nil { - return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) - } - if !valIt.Next() { - return errNonEmptyIteratorExpected.Error() - } - if !reflect.DeepEqual(valIt.Value(), retrievedValidator) { - return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &val, retrievedValidator) - } - valIt.Release() - - // store delegators - for _, del := range dels { - cpy := del - store.PutCurrentDelegator(&cpy) - } - - // check delegators - version 1 - delIt, err := store.GetCurrentDelegatorIterator(subnetID, nodeID) - if err != nil { - return fmt.Sprintf("unexpected failure in current delegators iterator creation, error %v", err) - } - for delIt.Next() { - found := false - for _, del := range dels { - if reflect.DeepEqual(*delIt.Value(), del) { - found = true - break - } - } - if !found { - return fmt.Sprintf("found extra delegator %v", delIt.Value()) - } - } - delIt.Release() - - // check delegators - version 2 - for _, del := range dels { - found := false - delIt, err := store.GetCurrentDelegatorIterator(subnetID, nodeID) - if err != nil { - return fmt.Sprintf("unexpected failure in current delegators iterator creation, error %v", err) - } - for delIt.Next() { - if reflect.DeepEqual(*delIt.Value(), del) { - found = true - break - } - } - delIt.Release() - - if !found { - return fmt.Sprintf("missing delegator %v", del) - } - } - - // delege delegators - for _, del := range dels { - cpy := del - store.DeleteCurrentDelegator(&cpy) - - // check deleted delegator is not there anymore - delIt, err := store.GetCurrentDelegatorIterator(subnetID, nodeID) - if err != nil { - return fmt.Sprintf("unexpected failure in current delegators iterator creation, error %v", err) - } - - found := false - for delIt.Next() { - if reflect.DeepEqual(*delIt.Value(), del) { - found = true - break - } - } - delIt.Release() - if found { - return fmt.Sprintf("found deleted delegator %v", del) - } - } - - return "" - }, - stakerGenerator(&valPrio, &subnetID, &nodeID), - gen.SliceOfN(20, stakerGenerator(&delPrio, &subnetID, &nodeID)), - )) - - properties.Property("some pending delegators ops", prop.ForAll( - func(val state.Staker, dels []state.Staker) string { - store := newStakersStorageModel() - - // store validator - store.PutCurrentValidator(&val) - - // check validator - version 1 - retrievedValidator, err := store.GetCurrentValidator(val.SubnetID, val.NodeID) - if err != nil { - return fmt.Sprintf("expected no error, got %v", err) - } - if !reflect.DeepEqual(&val, retrievedValidator) { - return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &val, retrievedValidator) - } - - // check validator - version 2 - valIt, err := store.GetCurrentStakerIterator() - if err != nil { - return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) - } - if !valIt.Next() { - return errNonEmptyIteratorExpected.Error() - } - if !reflect.DeepEqual(valIt.Value(), retrievedValidator) { - return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &val, retrievedValidator) - } - valIt.Release() - - // store delegators - for _, del := range dels { - cpy := del - store.PutPendingDelegator(&cpy) - } - - // check delegators - version 1 - delIt, err := store.GetPendingDelegatorIterator(subnetID, nodeID) - if err != nil { - return fmt.Sprintf("unexpected failure in pending delegators iterator creation, error %v", err) - } - for delIt.Next() { - found := false - for _, del := range dels { - if reflect.DeepEqual(*delIt.Value(), del) { - found = true - break - } - } - if !found { - return fmt.Sprintf("found extra delegator %v", delIt.Value()) - } - } - delIt.Release() - - // check delegators - version 2 - for _, del := range dels { - found := false - delIt, err := store.GetPendingDelegatorIterator(subnetID, nodeID) - if err != nil { - return fmt.Sprintf("unexpected failure in pending delegators iterator creation, error %v", err) - } - for delIt.Next() { - if reflect.DeepEqual(*delIt.Value(), del) { - found = true - break - } - } - delIt.Release() - - if !found { - return fmt.Sprintf("missing delegator %v", del) - } - } - - // delege delegators - for _, del := range dels { - cpy := del - store.DeletePendingDelegator(&cpy) - - // check deleted delegator is not there anymore - delIt, err := store.GetPendingDelegatorIterator(subnetID, nodeID) - if err != nil { - return fmt.Sprintf("unexpected failure in pending delegators iterator creation, error %v", err) - } - - found := false - for delIt.Next() { - if reflect.DeepEqual(*delIt.Value(), del) { - found = true - break - } - } - delIt.Release() - if found { - return fmt.Sprintf("found deleted delegator %v", del) - } - } - - return "" - }, - stakerGenerator(&valPrio, &subnetID, &nodeID), - gen.SliceOfN(20, stakerGenerator(&delPrio, &subnetID, &nodeID)), - )) - - properties.TestingRun(t) -} - -func TestSimpleStakerOpsForState(t *testing.T) { - properties := gopter.NewProperties(nil) - - properties.Property("some current validator ops", prop.ForAll( - func(s state.Staker) string { - store, err := buildChainState() - if err != nil { - return fmt.Sprintf("unexpected error while creating chain state, err %v", err) - } - - // no staker before insertion - _, err = store.GetCurrentValidator(s.SubnetID, s.NodeID) - if err != database.ErrNotFound { - return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) - } - - // it's fine deleting unknown validator - store.DeleteCurrentValidator(&s) - - currIT, err := store.GetCurrentStakerIterator() - if err != nil { - return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) - } - if currIT.Next() { - return fmt.Sprintf("expected empty iterator, got at least element %v", currIT.Value()) - } - currIT.Release() - - // staker after insertion - store.PutCurrentValidator(&s) - retrievedStaker, err := store.GetCurrentValidator(s.SubnetID, s.NodeID) - if err != nil { - return fmt.Sprintf("expected no error, got %v", err) - } - if !reflect.DeepEqual(&s, retrievedStaker) { - return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &s, retrievedStaker) - } - - currIT, err = store.GetCurrentStakerIterator() - if err != nil { - return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) - } - if !currIT.Next() { - return errNonEmptyIteratorExpected.Error() - } - if !reflect.DeepEqual(currIT.Value(), retrievedStaker) { - return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &s, retrievedStaker) - } - currIT.Release() - - // no staker after deletion - store.DeleteCurrentValidator(&s) - _, err = store.GetCurrentValidator(s.SubnetID, s.NodeID) - if err != database.ErrNotFound { - return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) - } - - currIT, err = store.GetCurrentStakerIterator() - if err != nil { - return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) - } - if currIT.Next() { - return fmt.Sprintf("expected empty iterator, got at least element %v", currIT.Value()) - } - currIT.Release() - - return "" - }, - stakerGenerator(nil, nil, nil), - )) - - properties.Property("some pending validator ops", prop.ForAll( - func(s state.Staker) string { - store := newStakersStorageModel() - - // no staker before insertion - _, err := store.GetPendingValidator(s.SubnetID, s.NodeID) - if err != database.ErrNotFound { - return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) - } - - // it's fine deleting unknown validator - store.DeletePendingValidator(&s) - - currIT, err := store.GetPendingStakerIterator() - if err != nil { - return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) - } - if currIT.Next() { - return fmt.Sprintf("expected empty iterator, got at least element %v", currIT.Value()) - } - currIT.Release() - - // staker after insertion - store.PutPendingValidator(&s) - retrievedStaker, err := store.GetPendingValidator(s.SubnetID, s.NodeID) - if err != nil { - return fmt.Sprintf("expected no error, got %v", err) - } - if !reflect.DeepEqual(&s, retrievedStaker) { - return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &s, retrievedStaker) - } - - currIT, err = store.GetPendingStakerIterator() - if err != nil { - return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) - } - if !currIT.Next() { - return errNonEmptyIteratorExpected.Error() - } - if !reflect.DeepEqual(currIT.Value(), retrievedStaker) { - return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &s, retrievedStaker) - } - currIT.Release() - - // no staker after deletion - store.DeletePendingValidator(&s) - _, err = store.GetPendingValidator(s.SubnetID, s.NodeID) - if err != database.ErrNotFound { - return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) - } - - currIT, err = store.GetPendingStakerIterator() - if err != nil { - return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) - } - if currIT.Next() { - return fmt.Sprintf("expected empty iterator, got at least element %v", currIT.Value()) - } - currIT.Release() - - return "" +func TestSimpleStakersOperations(t *testing.T) { + storeCreators := map[string]func() (state.Stakers, error){ + "base state": func() (state.Stakers, error) { + return buildChainState() }, - stakerGenerator(nil, nil, nil), - )) - - var ( - valPrio = currentValidator - delPrio = currentDelegator - subnetID = ids.GenerateTestID() - nodeID = ids.GenerateTestNodeID() - ) - properties.Property("some current delegators ops", prop.ForAll( - func(val state.Staker, dels []state.Staker) string { - store := newStakersStorageModel() - - // store validator - store.PutCurrentValidator(&val) - - // check validator - version 1 - retrievedValidator, err := store.GetCurrentValidator(val.SubnetID, val.NodeID) - if err != nil { - return fmt.Sprintf("expected no error, got %v", err) - } - if !reflect.DeepEqual(&val, retrievedValidator) { - return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &val, retrievedValidator) - } - - // check validator - version 2 - valIt, err := store.GetCurrentStakerIterator() + "diff": func() (state.Stakers, error) { + baseState, err := buildChainState() if err != nil { - return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) + return nil, fmt.Errorf("unexpected error while creating chain base state, err %v", err) } - if !valIt.Next() { - return errNonEmptyIteratorExpected.Error() - } - if !reflect.DeepEqual(valIt.Value(), retrievedValidator) { - return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &val, retrievedValidator) - } - valIt.Release() - // store delegators - for _, del := range dels { - cpy := del - store.PutCurrentDelegator(&cpy) + genesisID := baseState.GetLastAccepted() + versions := &versionsHolder{ + baseState: baseState, } - - // check delegators - version 1 - delIt, err := store.GetCurrentDelegatorIterator(subnetID, nodeID) + store, err := state.NewDiff(genesisID, versions) if err != nil { - return fmt.Sprintf("unexpected failure in current delegators iterator creation, error %v", err) - } - for delIt.Next() { - found := false - for _, del := range dels { - if reflect.DeepEqual(*delIt.Value(), del) { - found = true - break - } - } - if !found { - return fmt.Sprintf("found extra delegator %v", delIt.Value()) - } - } - delIt.Release() - - // check delegators - version 2 - for _, del := range dels { - found := false - delIt, err := store.GetCurrentDelegatorIterator(subnetID, nodeID) - if err != nil { - return fmt.Sprintf("unexpected failure in current delegators iterator creation, error %v", err) - } - for delIt.Next() { - if reflect.DeepEqual(*delIt.Value(), del) { - found = true - break - } - } - delIt.Release() - - if !found { - return fmt.Sprintf("missing delegator %v", del) - } - } - - // delege delegators - for _, del := range dels { - cpy := del - store.DeleteCurrentDelegator(&cpy) - - // check deleted delegator is not there anymore - delIt, err := store.GetCurrentDelegatorIterator(subnetID, nodeID) - if err != nil { - return fmt.Sprintf("unexpected failure in current delegators iterator creation, error %v", err) - } - - found := false - for delIt.Next() { - if reflect.DeepEqual(*delIt.Value(), del) { - found = true - break - } - } - delIt.Release() - if found { - return fmt.Sprintf("found deleted delegator %v", del) - } + return nil, fmt.Errorf("unexpected error while creating diff, err %v", err) } - - return "" + return store, nil }, - stakerGenerator(&valPrio, &subnetID, &nodeID), - gen.SliceOfN(20, stakerGenerator(&delPrio, &subnetID, &nodeID)), - )) - - properties.Property("some pending delegators ops", prop.ForAll( - func(val state.Staker, dels []state.Staker) string { - store := newStakersStorageModel() - - // store validator - store.PutCurrentValidator(&val) - - // check validator - version 1 - retrievedValidator, err := store.GetCurrentValidator(val.SubnetID, val.NodeID) - if err != nil { - return fmt.Sprintf("expected no error, got %v", err) - } - if !reflect.DeepEqual(&val, retrievedValidator) { - return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &val, retrievedValidator) - } - - // check validator - version 2 - valIt, err := store.GetCurrentStakerIterator() - if err != nil { - return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) - } - if !valIt.Next() { - return errNonEmptyIteratorExpected.Error() - } - if !reflect.DeepEqual(valIt.Value(), retrievedValidator) { - return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &val, retrievedValidator) - } - valIt.Release() - - // store delegators - for _, del := range dels { - cpy := del - store.PutPendingDelegator(&cpy) - } - - // check delegators - version 1 - delIt, err := store.GetPendingDelegatorIterator(subnetID, nodeID) - if err != nil { - return fmt.Sprintf("unexpected failure in pending delegators iterator creation, error %v", err) - } - for delIt.Next() { - found := false - for _, del := range dels { - if reflect.DeepEqual(*delIt.Value(), del) { - found = true - break - } - } - if !found { - return fmt.Sprintf("found extra delegator %v", delIt.Value()) - } - } - delIt.Release() - - // check delegators - version 2 - for _, del := range dels { - found := false - delIt, err := store.GetPendingDelegatorIterator(subnetID, nodeID) - if err != nil { - return fmt.Sprintf("unexpected failure in pending delegators iterator creation, error %v", err) - } - for delIt.Next() { - if reflect.DeepEqual(*delIt.Value(), del) { - found = true - break - } - } - delIt.Release() - - if !found { - return fmt.Sprintf("missing delegator %v", del) - } - } - - // delege delegators - for _, del := range dels { - cpy := del - store.DeletePendingDelegator(&cpy) - - // check deleted delegator is not there anymore - delIt, err := store.GetPendingDelegatorIterator(subnetID, nodeID) - if err != nil { - return fmt.Sprintf("unexpected failure in pending delegators iterator creation, error %v", err) - } - - found := false - for delIt.Next() { - if reflect.DeepEqual(*delIt.Value(), del) { - found = true - break - } - } - delIt.Release() - if found { - return fmt.Sprintf("found deleted delegator %v", del) - } - } - - return "" + "storage model": func() (state.Stakers, error) { //nolint:golint,unparam + return newStakersStorageModel(), nil }, - stakerGenerator(&valPrio, &subnetID, &nodeID), - gen.SliceOfN(20, stakerGenerator(&delPrio, &subnetID, &nodeID)), - )) - - properties.TestingRun(t) + } + + for storeType, storeCreatorF := range storeCreators { + t.Run(storeType, func(t *testing.T) { + properties := simpleStakerStateProperties(storeCreatorF) + properties.TestingRun(t) + }) + } } -func TestSimpleStakerOpsForDiff(t *testing.T) { +func simpleStakerStateProperties(storeCreatorF func() (state.Stakers, error)) *gopter.Properties { properties := gopter.NewProperties(nil) properties.Property("some current validator ops", prop.ForAll( func(s state.Staker) string { - baseState, err := buildChainState() - if err != nil { - return fmt.Sprintf("unexpected error while creating chain base state, err %v", err) - } - - genesisID := baseState.GetLastAccepted() - versions := &versionsHolder{ - baseState: baseState, - } - store, err := state.NewDiff(genesisID, versions) + store, err := storeCreatorF() if err != nil { - return fmt.Sprintf("unexpected error while creating diff, err %v", err) + return fmt.Sprintf("unexpected error while creating staker store, err %v", err) } // no staker before insertion @@ -804,10 +126,13 @@ func TestSimpleStakerOpsForDiff(t *testing.T) { properties.Property("some pending validator ops", prop.ForAll( func(s state.Staker) string { - store := newStakersStorageModel() + store, err := storeCreatorF() + if err != nil { + return fmt.Sprintf("unexpected error while creating staker store, err %v", err) + } // no staker before insertion - _, err := store.GetPendingValidator(s.SubnetID, s.NodeID) + _, err = store.GetPendingValidator(s.SubnetID, s.NodeID) if err != database.ErrNotFound { return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) } @@ -875,7 +200,10 @@ func TestSimpleStakerOpsForDiff(t *testing.T) { ) properties.Property("some current delegators ops", prop.ForAll( func(val state.Staker, dels []state.Staker) string { - store := newStakersStorageModel() + store, err := storeCreatorF() + if err != nil { + return fmt.Sprintf("unexpected error while creating staker store, err %v", err) + } // store validator store.PutCurrentValidator(&val) @@ -979,7 +307,10 @@ func TestSimpleStakerOpsForDiff(t *testing.T) { properties.Property("some pending delegators ops", prop.ForAll( func(val state.Staker, dels []state.Staker) string { - store := newStakersStorageModel() + store, err := storeCreatorF() + if err != nil { + return fmt.Sprintf("unexpected error while creating staker store, err %v", err) + } // store validator store.PutCurrentValidator(&val) @@ -1081,5 +412,5 @@ func TestSimpleStakerOpsForDiff(t *testing.T) { gen.SliceOfN(20, stakerGenerator(&delPrio, &subnetID, &nodeID)), )) - properties.TestingRun(t) + return properties } From cbd246bac99d9b89747c9a86b2825357c8e4bb9a Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Thu, 30 Mar 2023 12:07:14 +0200 Subject: [PATCH 007/132] fix insert after delete delegator in diff --- vms/platformvm/state/models/stakers_ops_test.go | 10 ++++++++++ vms/platformvm/state/stakers.go | 2 ++ 2 files changed, 12 insertions(+) diff --git a/vms/platformvm/state/models/stakers_ops_test.go b/vms/platformvm/state/models/stakers_ops_test.go index 35400758c82a..e96ce57b7f06 100644 --- a/vms/platformvm/state/models/stakers_ops_test.go +++ b/vms/platformvm/state/models/stakers_ops_test.go @@ -233,6 +233,11 @@ func simpleStakerStateProperties(storeCreatorF func() (state.Stakers, error)) *g // store delegators for _, del := range dels { cpy := del + + // it's fine deleting unknown delegator + store.DeleteCurrentDelegator(&cpy) + + // finally store the delegator store.PutCurrentDelegator(&cpy) } @@ -340,6 +345,11 @@ func simpleStakerStateProperties(storeCreatorF func() (state.Stakers, error)) *g // store delegators for _, del := range dels { cpy := del + + // it's fine deleting unknown delegator + store.DeletePendingDelegator(&cpy) + + // finally store the delegator store.PutPendingDelegator(&cpy) } diff --git a/vms/platformvm/state/stakers.go b/vms/platformvm/state/stakers.go index a04cf0ca2d20..2113979cb42f 100644 --- a/vms/platformvm/state/stakers.go +++ b/vms/platformvm/state/stakers.go @@ -349,11 +349,13 @@ func (s *diffStakers) PutDelegator(staker *Staker) { validatorDiff.addedDelegators = btree.NewG(defaultTreeDegree, (*Staker).Less) } validatorDiff.addedDelegators.ReplaceOrInsert(staker) + delete(validatorDiff.deletedDelegators, staker.TxID) if s.addedStakers == nil { s.addedStakers = btree.NewG(defaultTreeDegree, (*Staker).Less) } s.addedStakers.ReplaceOrInsert(staker) + delete(s.deletedStakers, staker.TxID) } func (s *diffStakers) DeleteDelegator(staker *Staker) { From e7691da5bbfb5d16e5a85bfde357e1d82139d58c Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Thu, 30 Mar 2023 12:17:39 +0200 Subject: [PATCH 008/132] extended test coverage --- .../state/models/stakers_ops_test.go | 70 +++++++++++++++---- 1 file changed, 55 insertions(+), 15 deletions(-) diff --git a/vms/platformvm/state/models/stakers_ops_test.go b/vms/platformvm/state/models/stakers_ops_test.go index e96ce57b7f06..f41ef9593486 100644 --- a/vms/platformvm/state/models/stakers_ops_test.go +++ b/vms/platformvm/state/models/stakers_ops_test.go @@ -241,7 +241,27 @@ func simpleStakerStateProperties(storeCreatorF func() (state.Stakers, error)) *g store.PutCurrentDelegator(&cpy) } - // check delegators - version 1 + // check no missing delegators by subnetID, nodeID + for _, del := range dels { + found := false + delIt, err := store.GetCurrentDelegatorIterator(subnetID, nodeID) + if err != nil { + return fmt.Sprintf("unexpected failure in current delegators iterator creation, error %v", err) + } + for delIt.Next() { + if reflect.DeepEqual(*delIt.Value(), del) { + found = true + break + } + } + delIt.Release() + + if !found { + return fmt.Sprintf("missing delegator %v", del) + } + } + + // check no extra delegator by subnetID, nodeID delIt, err := store.GetCurrentDelegatorIterator(subnetID, nodeID) if err != nil { return fmt.Sprintf("unexpected failure in current delegators iterator creation, error %v", err) @@ -260,27 +280,27 @@ func simpleStakerStateProperties(storeCreatorF func() (state.Stakers, error)) *g } delIt.Release() - // check delegators - version 2 + // check no missing delegators if whole staker set for _, del := range dels { found := false - delIt, err := store.GetCurrentDelegatorIterator(subnetID, nodeID) + fullDelIt, err := store.GetCurrentStakerIterator() if err != nil { return fmt.Sprintf("unexpected failure in current delegators iterator creation, error %v", err) } - for delIt.Next() { - if reflect.DeepEqual(*delIt.Value(), del) { + for fullDelIt.Next() { + if reflect.DeepEqual(*fullDelIt.Value(), del) { found = true break } } - delIt.Release() + fullDelIt.Release() if !found { return fmt.Sprintf("missing delegator %v", del) } } - // delege delegators + // delete delegators for _, del := range dels { cpy := del store.DeleteCurrentDelegator(&cpy) @@ -353,7 +373,27 @@ func simpleStakerStateProperties(storeCreatorF func() (state.Stakers, error)) *g store.PutPendingDelegator(&cpy) } - // check delegators - version 1 + // check no missing delegators by subnetID, nodeID + for _, del := range dels { + found := false + delIt, err := store.GetPendingDelegatorIterator(subnetID, nodeID) + if err != nil { + return fmt.Sprintf("unexpected failure in pending delegators iterator creation, error %v", err) + } + for delIt.Next() { + if reflect.DeepEqual(*delIt.Value(), del) { + found = true + break + } + } + delIt.Release() + + if !found { + return fmt.Sprintf("missing delegator %v", del) + } + } + + // check no extra delegators by subnetID, nodeID delIt, err := store.GetPendingDelegatorIterator(subnetID, nodeID) if err != nil { return fmt.Sprintf("unexpected failure in pending delegators iterator creation, error %v", err) @@ -372,27 +412,27 @@ func simpleStakerStateProperties(storeCreatorF func() (state.Stakers, error)) *g } delIt.Release() - // check delegators - version 2 + // check no missing delegators if whole staker set for _, del := range dels { found := false - delIt, err := store.GetPendingDelegatorIterator(subnetID, nodeID) + fullDelIt, err := store.GetPendingStakerIterator() if err != nil { - return fmt.Sprintf("unexpected failure in pending delegators iterator creation, error %v", err) + return fmt.Sprintf("unexpected failure in current delegators iterator creation, error %v", err) } - for delIt.Next() { - if reflect.DeepEqual(*delIt.Value(), del) { + for fullDelIt.Next() { + if reflect.DeepEqual(*fullDelIt.Value(), del) { found = true break } } - delIt.Release() + fullDelIt.Release() if !found { return fmt.Sprintf("missing delegator %v", del) } } - // delege delegators + // delete delegators for _, del := range dels { cpy := del store.DeletePendingDelegator(&cpy) From 94ecc46ccf506706f6641d8cdce28bb8fa6b833f Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Thu, 30 Mar 2023 13:16:22 +0200 Subject: [PATCH 009/132] nit --- .../state/models/stakers_generator_test.go | 23 +++++++++---------- .../state/models/stakers_ops_test.go | 14 +++++------ 2 files changed, 17 insertions(+), 20 deletions(-) diff --git a/vms/platformvm/state/models/stakers_generator_test.go b/vms/platformvm/state/models/stakers_generator_test.go index 7e64c158371b..20dec6a06404 100644 --- a/vms/platformvm/state/models/stakers_generator_test.go +++ b/vms/platformvm/state/models/stakers_generator_test.go @@ -20,7 +20,7 @@ import ( "github.com/leanovate/gopter/prop" ) -func stakerGenerator(prio *priorityType, subnet *ids.ID, nodeID *ids.NodeID) gopter.Gen { +func stakerGenerator(prio priorityType, subnet *ids.ID, nodeID *ids.NodeID) gopter.Gen { return genStakerTimeData(prio).FlatMap( func(v interface{}) gopter.Gen { macro := v.(stakerTimeData) @@ -62,7 +62,7 @@ func TestGeneratedStakersValidity(t *testing.T) { } return "" }, - stakerGenerator(nil, nil, nil), + stakerGenerator(anyPriority, nil, nil), )) properties.Property("NextTime coherent with priority", prop.ForAll( @@ -95,7 +95,7 @@ func TestGeneratedStakersValidity(t *testing.T) { return fmt.Sprintf("priority %v unhandled in test", p) } }, - stakerGenerator(nil, nil, nil), + stakerGenerator(anyPriority, nil, nil), )) subnetID := ids.GenerateTestID() @@ -112,7 +112,7 @@ func TestGeneratedStakersValidity(t *testing.T) { } return "" }, - stakerGenerator(nil, &subnetID, &nodeID), + stakerGenerator(anyPriority, &subnetID, &nodeID), )) properties.TestingRun(t) @@ -130,7 +130,7 @@ type stakerTimeData struct { NextTime time.Time } -func genStakerTimeData(prio *priorityType) gopter.Gen { +func genStakerTimeData(prio priorityType) gopter.Gen { return genStakerMicroData(prio).FlatMap( func(v interface{}) gopter.Gen { micro := v.(stakerMicroData) @@ -174,7 +174,7 @@ type stakerMicroData struct { } // genStakerMicroData is the helper to generate stakerMicroData -func genStakerMicroData(prio *priorityType) gopter.Gen { +func genStakerMicroData(prio priorityType) gopter.Gen { return gen.Struct(reflect.TypeOf(&stakerMicroData{}), map[string]gopter.Gen{ "StartTime": gen.Time(), "Duration": gen.Int64Range(1, 365*24), @@ -185,14 +185,16 @@ func genStakerMicroData(prio *priorityType) gopter.Gen { type priorityType uint8 const ( - currentValidator priorityType = iota + 1 + anyPriority priorityType = iota + currentValidator currentDelegator pendingValidator pendingDelegator ) -func genPriority(p *priorityType) gopter.Gen { - if p == nil { +func genPriority(p priorityType) gopter.Gen { + switch p { + case anyPriority: return gen.OneConstOf( txs.PrimaryNetworkDelegatorApricotPendingPriority, txs.PrimaryNetworkValidatorPendingPriority, @@ -206,9 +208,6 @@ func genPriority(p *priorityType) gopter.Gen { txs.PrimaryNetworkDelegatorCurrentPriority, txs.PrimaryNetworkValidatorCurrentPriority, ) - } - - switch *p { case currentValidator: return gen.OneConstOf( txs.SubnetPermissionedValidatorCurrentPriority, diff --git a/vms/platformvm/state/models/stakers_ops_test.go b/vms/platformvm/state/models/stakers_ops_test.go index f41ef9593486..afbb345210d2 100644 --- a/vms/platformvm/state/models/stakers_ops_test.go +++ b/vms/platformvm/state/models/stakers_ops_test.go @@ -121,7 +121,7 @@ func simpleStakerStateProperties(storeCreatorF func() (state.Stakers, error)) *g return "" }, - stakerGenerator(nil, nil, nil), + stakerGenerator(anyPriority, nil, nil), )) properties.Property("some pending validator ops", prop.ForAll( @@ -189,12 +189,10 @@ func simpleStakerStateProperties(storeCreatorF func() (state.Stakers, error)) *g return "" }, - stakerGenerator(nil, nil, nil), + stakerGenerator(anyPriority, nil, nil), )) var ( - valPrio = currentValidator - delPrio = currentDelegator subnetID = ids.GenerateTestID() nodeID = ids.GenerateTestNodeID() ) @@ -326,8 +324,8 @@ func simpleStakerStateProperties(storeCreatorF func() (state.Stakers, error)) *g return "" }, - stakerGenerator(&valPrio, &subnetID, &nodeID), - gen.SliceOfN(20, stakerGenerator(&delPrio, &subnetID, &nodeID)), + stakerGenerator(currentValidator, &subnetID, &nodeID), + gen.SliceOfN(20, stakerGenerator(currentDelegator, &subnetID, &nodeID)), )) properties.Property("some pending delegators ops", prop.ForAll( @@ -458,8 +456,8 @@ func simpleStakerStateProperties(storeCreatorF func() (state.Stakers, error)) *g return "" }, - stakerGenerator(&valPrio, &subnetID, &nodeID), - gen.SliceOfN(20, stakerGenerator(&delPrio, &subnetID, &nodeID)), + stakerGenerator(currentValidator, &subnetID, &nodeID), + gen.SliceOfN(20, stakerGenerator(currentDelegator, &subnetID, &nodeID)), )) return properties From cf91599a52d14e3869a640e5ecd4b890f7275803 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Thu, 30 Mar 2023 17:50:33 +0200 Subject: [PATCH 010/132] nit --- vms/platformvm/state/models/stakers_ops_test.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/vms/platformvm/state/models/stakers_ops_test.go b/vms/platformvm/state/models/stakers_ops_test.go index afbb345210d2..d34fd57bde61 100644 --- a/vms/platformvm/state/models/stakers_ops_test.go +++ b/vms/platformvm/state/models/stakers_ops_test.go @@ -325,6 +325,9 @@ func simpleStakerStateProperties(storeCreatorF func() (state.Stakers, error)) *g return "" }, stakerGenerator(currentValidator, &subnetID, &nodeID), + + // TODO ABENEGIA: make sure txIDs are unique in slice. + // They are unlikely to be equal, but still should be fixed. gen.SliceOfN(20, stakerGenerator(currentDelegator, &subnetID, &nodeID)), )) @@ -457,6 +460,9 @@ func simpleStakerStateProperties(storeCreatorF func() (state.Stakers, error)) *g return "" }, stakerGenerator(currentValidator, &subnetID, &nodeID), + + // TODO ABENEGIA: make sure txIDs are unique in slice. + // They are unlikely to be equal, but still should be fixed. gen.SliceOfN(20, stakerGenerator(currentDelegator, &subnetID, &nodeID)), )) From 382148dbcf6bc789fedc3c1829d8e3605c9989b6 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Thu, 30 Mar 2023 18:18:05 +0200 Subject: [PATCH 011/132] cleanup --- .../state/models/stakers_generator_test.go | 2 +- .../state/models/stakers_ops_test.go | 26 ++++++++++++++++--- 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/vms/platformvm/state/models/stakers_generator_test.go b/vms/platformvm/state/models/stakers_generator_test.go index 20dec6a06404..306433dca829 100644 --- a/vms/platformvm/state/models/stakers_generator_test.go +++ b/vms/platformvm/state/models/stakers_generator_test.go @@ -100,7 +100,7 @@ func TestGeneratedStakersValidity(t *testing.T) { subnetID := ids.GenerateTestID() nodeID := ids.GenerateTestNodeID() - properties.Property("EndTime never before StartTime", prop.ForAll( + properties.Property("subnetID and nodeID set as specified", prop.ForAll( func(s state.Staker) string { if s.SubnetID != subnetID { return fmt.Sprintf("unexpected subnetID, expected %v, got %v", diff --git a/vms/platformvm/state/models/stakers_ops_test.go b/vms/platformvm/state/models/stakers_ops_test.go index d34fd57bde61..5b7e5fc68e1f 100644 --- a/vms/platformvm/state/models/stakers_ops_test.go +++ b/vms/platformvm/state/models/stakers_ops_test.go @@ -11,6 +11,7 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/leanovate/gopter" "github.com/leanovate/gopter/gen" @@ -328,7 +329,17 @@ func simpleStakerStateProperties(storeCreatorF func() (state.Stakers, error)) *g // TODO ABENEGIA: make sure txIDs are unique in slice. // They are unlikely to be equal, but still should be fixed. - gen.SliceOfN(20, stakerGenerator(currentDelegator, &subnetID, &nodeID)), + gen.SliceOfN(10, stakerGenerator(currentDelegator, &subnetID, &nodeID)). + SuchThat(func(v interface{}) bool { + stakersList := v.([]state.Staker) + uniqueTxIDs := set.NewSet[ids.ID](len(stakersList)) + for _, staker := range stakersList { + uniqueTxIDs.Add(staker.TxID) + } + + // make sure TxIDs are unique, at least among delegators + return len(stakersList) == uniqueTxIDs.Len() + }), )) properties.Property("some pending delegators ops", prop.ForAll( @@ -460,10 +471,17 @@ func simpleStakerStateProperties(storeCreatorF func() (state.Stakers, error)) *g return "" }, stakerGenerator(currentValidator, &subnetID, &nodeID), + gen.SliceOfN(10, stakerGenerator(pendingDelegator, &subnetID, &nodeID)). + SuchThat(func(v interface{}) bool { + stakersList := v.([]state.Staker) + uniqueTxIDs := set.NewSet[ids.ID](len(stakersList)) + for _, staker := range stakersList { + uniqueTxIDs.Add(staker.TxID) + } - // TODO ABENEGIA: make sure txIDs are unique in slice. - // They are unlikely to be equal, but still should be fixed. - gen.SliceOfN(20, stakerGenerator(currentDelegator, &subnetID, &nodeID)), + // make sure TxIDs are unique, at least among delegators + return len(stakersList) == uniqueTxIDs.Len() + }), )) return properties From 8c119d5e6bb697ea57fff505a2f45cd0a1d4f3f3 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Thu, 30 Mar 2023 18:24:20 +0200 Subject: [PATCH 012/132] wip: stakers storage stateful testing --- .../models/stakers_storage_model_test.go | 236 ++++++++++++++++++ 1 file changed, 236 insertions(+) create mode 100644 vms/platformvm/state/models/stakers_storage_model_test.go diff --git a/vms/platformvm/state/models/stakers_storage_model_test.go b/vms/platformvm/state/models/stakers_storage_model_test.go new file mode 100644 index 000000000000..cf1bbe7178b2 --- /dev/null +++ b/vms/platformvm/state/models/stakers_storage_model_test.go @@ -0,0 +1,236 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package models + +import ( + "fmt" + "reflect" + "testing" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/vms/platformvm/state" + "github.com/leanovate/gopter" + "github.com/leanovate/gopter/commands" + "github.com/leanovate/gopter/gen" +) + +func TestStateAndDiffComparisonToStorageModel(t *testing.T) { + properties := gopter.NewProperties(nil) + properties.Property("state comparison to storage model", commands.Prop(stakersCommands)) + properties.TestingRun(t) +} + +type sysUnderTest struct { + blkIDList []ids.ID + blkIDToChainState map[ids.ID]state.Chain +} + +// stakersCommands creates/destroy the system under test and generates +// commands and initial states (stakersStorageModel) +var stakersCommands = &commands.ProtoCommands{ + NewSystemUnderTestFunc: func(initialState commands.State) commands.SystemUnderTest { + model := initialState.(*stakersStorageModel) + baseState, err := buildChainState() + if err != nil { + panic(err) + } + + // fillup baseState with model initial content + for _, staker := range model.currentValidators { + baseState.PutCurrentValidator(staker) + } + for _, delegators := range model.currentDelegators { + for _, staker := range delegators { + baseState.PutCurrentDelegator(staker) + } + } + for _, staker := range model.pendingValidators { + baseState.PutPendingValidator(staker) + } + for _, delegators := range model.currentDelegators { + for _, staker := range delegators { + baseState.PutPendingDelegator(staker) + } + } + + baseBlkID := baseState.GetLastAccepted() + sys := &sysUnderTest{ + blkIDList: []ids.ID{baseBlkID}, + blkIDToChainState: map[ids.ID]state.Chain{ + baseBlkID: baseState, + }, + } + return sys + }, + DestroySystemUnderTestFunc: func(sut commands.SystemUnderTest) { + // retrieve base state and close it + sys := sut.(*sysUnderTest) + baseState := sys.blkIDToChainState[sys.blkIDList[0]] + err := baseState.(state.State).Close() + if err != nil { + panic(err) + } + }, + InitialStateGen: gen.Const(newStakersStorageModel()), // TODO ABENEGIA: consider adding initial state + InitialPreConditionFunc: func(state commands.State) bool { + return true // nothing to do for now + }, + GenCommandFunc: func(state commands.State) gopter.Gen { + return gen.OneGenOf( + genPutCurrentValidatorCommand, + genDeleteCurrentValidatorCommand, + // genPutCurrentDelegatorCommand, + // genDeleteCurrentDelegatorCommand, + + // genApplyBottomDiffCommand, + // genAddTopDiffCommand, + // genCommitBottomStateCommand, + ) + }, +} + +type putCurrentValidatorCommand state.Staker + +func (v *putCurrentValidatorCommand) Run(sut commands.SystemUnderTest) commands.Result { + staker := (*state.Staker)(v) + sys := sut.(*sysUnderTest) + topDiffID := sys.blkIDList[len(sys.blkIDList)-1] + topDiff := sys.blkIDToChainState[topDiffID] + topDiff.PutCurrentValidator(staker) + return sys +} + +func (v *putCurrentValidatorCommand) NextState(cmdState commands.State) commands.State { + staker := (*state.Staker)(v) + cmdState.(*stakersStorageModel).PutCurrentValidator(staker) + return cmdState +} + +func (*putCurrentValidatorCommand) PreCondition(commands.State) bool { + // We allow inserting the same validator twice + return true +} + +func (*putCurrentValidatorCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { + model := cmdState.(*stakersStorageModel) + sys := res.(*sysUnderTest) + + if checkSystemAndModelContent(model, *sys) { + return &gopter.PropResult{Status: gopter.PropTrue} + } + + return &gopter.PropResult{Status: gopter.PropFalse} +} + +func (v *putCurrentValidatorCommand) String() string { + return fmt.Sprintf("PutCurrentValidator(subnetID: %s, nodeID: %s, txID: %s)", v.SubnetID, v.NodeID, v.TxID) +} + +// We want to have a generator for put commands for arbitrary int values. +// In this case the command is actually shrinkable, e.g. if the property fails +// by putting a 1000, it might already fail for a 500 as well ... +var genPutCurrentValidatorCommand = stakerGenerator(anyPriority, nil, nil).Map( + func(staker state.Staker) commands.Command { + cmd := (*putCurrentValidatorCommand)(&staker) + return cmd + }, +).WithShrinker( + func(v interface{}) gopter.Shrink { + return gen.IntShrinker(v.(putCurrentValidatorCommand)).Map(func(staker state.Staker) *putCurrentValidatorCommand { + cmd := (*putCurrentValidatorCommand)(&staker) + return cmd + }) + }, +) + +type deleteCurrentValidatorCommand state.Staker + +func (v *deleteCurrentValidatorCommand) Run(sut commands.SystemUnderTest) commands.Result { + staker := (*state.Staker)(v) + sys := sut.(*sysUnderTest) + topDiffID := sys.blkIDList[len(sys.blkIDList)-1] + topDiff := sys.blkIDToChainState[topDiffID] + topDiff.DeleteCurrentValidator(staker) + return sys // returns sys to allow comparison with state in PostCondition +} + +func (v *deleteCurrentValidatorCommand) NextState(cmdState commands.State) commands.State { + staker := (*state.Staker)(v) + cmdState.(*stakersStorageModel).DeleteCurrentValidator(staker) + return cmdState +} + +func (*deleteCurrentValidatorCommand) PreCondition(commands.State) bool { + // Don't even require staker to be inserted before being deleted + return true +} + +func (*deleteCurrentValidatorCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { + model := cmdState.(*stakersStorageModel) + sys := res.(*sysUnderTest) + + if checkSystemAndModelContent(model, *sys) { + return &gopter.PropResult{Status: gopter.PropTrue} + } + + return &gopter.PropResult{Status: gopter.PropFalse} +} + +func (v *deleteCurrentValidatorCommand) String() string { + return fmt.Sprintf("DeleteCurrentValidator(subnetID: %s, nodeID: %s, txID: %s)", v.SubnetID, v.NodeID, v.TxID) +} + +// We want to have a generator for put commands for arbitrary int values. +// In this case the command is actually shrinkable, e.g. if the property fails +// by putting a 1000, it might already fail for a 500 as well ... +var genDeleteCurrentValidatorCommand = stakerGenerator(anyPriority, nil, nil).Map( + func(staker state.Staker) commands.Command { + cmd := (*deleteCurrentValidatorCommand)(&staker) + return cmd + }, +).WithShrinker( + func(v interface{}) gopter.Shrink { + return gen.IntShrinker(v.(deleteCurrentValidatorCommand)).Map(func(staker state.Staker) *deleteCurrentValidatorCommand { + cmd := (*deleteCurrentValidatorCommand)(&staker) + return cmd + }) + }, +) + +func checkSystemAndModelContent(model *stakersStorageModel, sys sysUnderTest) bool { + // top view content must always match model content + topDiffID := sys.blkIDList[len(sys.blkIDList)-1] + topDiff := sys.blkIDToChainState[topDiffID] + + modelIt, err := model.GetCurrentStakerIterator() + if err != nil { + return false + } + sysIt, err := topDiff.GetCurrentStakerIterator() + if err != nil { + return false + } + + for { + modelNext := modelIt.Next() + sysNext := sysIt.Next() + if modelNext != sysNext { + return false + } + if !sysNext { + break // done with both model and sys iterations + } + + modelStaker := modelIt.Value() + sysStaker := sysIt.Value() + + if modelStaker == nil || sysStaker == nil || !reflect.DeepEqual(modelStaker, sysStaker) { + return false + } + } + + modelIt.Release() + sysIt.Release() + return true +} From 0e48267078e54d9986e1e5d2a0cd01ccb8a6d9ac Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Thu, 30 Mar 2023 22:36:56 +0200 Subject: [PATCH 013/132] wip: more stakers storage stateful testing --- .../models/stakers_storage_model_test.go | 137 ++++++++++++------ 1 file changed, 96 insertions(+), 41 deletions(-) diff --git a/vms/platformvm/state/models/stakers_storage_model_test.go b/vms/platformvm/state/models/stakers_storage_model_test.go index cf1bbe7178b2..dfa0db8d6a63 100644 --- a/vms/platformvm/state/models/stakers_storage_model_test.go +++ b/vms/platformvm/state/models/stakers_storage_model_test.go @@ -6,6 +6,7 @@ package models import ( "fmt" "reflect" + "sync/atomic" "testing" "github.com/ava-labs/avalanchego/ids" @@ -15,6 +16,13 @@ import ( "github.com/leanovate/gopter/gen" ) +var ( + _ state.Versions = (*sysUnderTest)(nil) + _ commands.Command = (*putCurrentValidatorCommand)(nil) + _ commands.Command = (*deleteCurrentValidatorCommand)(nil) + _ commands.Command = (*addTopDiffCommand)(nil) +) + func TestStateAndDiffComparisonToStorageModel(t *testing.T) { properties := gopter.NewProperties(nil) properties.Property("state comparison to storage model", commands.Prop(stakersCommands)) @@ -22,10 +30,40 @@ func TestStateAndDiffComparisonToStorageModel(t *testing.T) { } type sysUnderTest struct { - blkIDList []ids.ID + baseState state.State + blkIDsByHeight []ids.ID blkIDToChainState map[ids.ID]state.Chain } +func newSysUnderTest(baseState state.State) *sysUnderTest { + baseBlkID := baseState.GetLastAccepted() + sys := &sysUnderTest{ + baseState: baseState, + blkIDToChainState: map[ids.ID]state.Chain{}, + blkIDsByHeight: []ids.ID{baseBlkID}, + } + return sys +} + +func (s *sysUnderTest) GetState(blkID ids.ID) (state.Chain, bool) { + if state, found := s.blkIDToChainState[blkID]; found { + return state, found + } + return s.baseState, blkID == s.baseState.GetLastAccepted() +} + +func (s *sysUnderTest) addDiffOnTop() { + seed := uint64(len(s.blkIDsByHeight)) + newTopBlkID := ids.Empty.Prefix(atomic.AddUint64(&seed, 1)) + topBlkID := s.blkIDsByHeight[len(s.blkIDsByHeight)-1] + newTopDiff, err := state.NewDiff(topBlkID, s) + if err != nil { + panic(err) + } + s.blkIDsByHeight = append(s.blkIDsByHeight, newTopBlkID) + s.blkIDToChainState[newTopBlkID] = newTopDiff +} + // stakersCommands creates/destroy the system under test and generates // commands and initial states (stakersStorageModel) var stakersCommands = &commands.ProtoCommands{ @@ -54,25 +92,22 @@ var stakersCommands = &commands.ProtoCommands{ } } - baseBlkID := baseState.GetLastAccepted() - sys := &sysUnderTest{ - blkIDList: []ids.ID{baseBlkID}, - blkIDToChainState: map[ids.ID]state.Chain{ - baseBlkID: baseState, - }, - } - return sys + return newSysUnderTest(baseState) }, DestroySystemUnderTestFunc: func(sut commands.SystemUnderTest) { // retrieve base state and close it sys := sut.(*sysUnderTest) - baseState := sys.blkIDToChainState[sys.blkIDList[0]] - err := baseState.(state.State).Close() + err := sys.baseState.Close() if err != nil { panic(err) } }, - InitialStateGen: gen.Const(newStakersStorageModel()), // TODO ABENEGIA: consider adding initial state + // TODO ABENEGIA: using gen.Const(newStakersStorageModel()) would not recreated model + // among calls. Hence just use a dummy generated with sole purpose of recreating model + InitialStateGen: gen.IntRange(1, 2).Map(func(int) *stakersStorageModel { + return newStakersStorageModel() + }), + InitialPreConditionFunc: func(state commands.State) bool { return true // nothing to do for now }, @@ -84,19 +119,20 @@ var stakersCommands = &commands.ProtoCommands{ // genDeleteCurrentDelegatorCommand, // genApplyBottomDiffCommand, - // genAddTopDiffCommand, + genAddTopDiffCommand, // genCommitBottomStateCommand, ) }, } +// PutCurrentValidator section type putCurrentValidatorCommand state.Staker func (v *putCurrentValidatorCommand) Run(sut commands.SystemUnderTest) commands.Result { staker := (*state.Staker)(v) sys := sut.(*sysUnderTest) - topDiffID := sys.blkIDList[len(sys.blkIDList)-1] - topDiff := sys.blkIDToChainState[topDiffID] + topBlkID := sys.blkIDsByHeight[len(sys.blkIDsByHeight)-1] + topDiff, _ := sys.GetState(topBlkID) topDiff.PutCurrentValidator(staker) return sys } @@ -116,7 +152,7 @@ func (*putCurrentValidatorCommand) PostCondition(cmdState commands.State, res co model := cmdState.(*stakersStorageModel) sys := res.(*sysUnderTest) - if checkSystemAndModelContent(model, *sys) { + if checkSystemAndModelContent(model, sys) { return &gopter.PropResult{Status: gopter.PropTrue} } @@ -127,30 +163,21 @@ func (v *putCurrentValidatorCommand) String() string { return fmt.Sprintf("PutCurrentValidator(subnetID: %s, nodeID: %s, txID: %s)", v.SubnetID, v.NodeID, v.TxID) } -// We want to have a generator for put commands for arbitrary int values. -// In this case the command is actually shrinkable, e.g. if the property fails -// by putting a 1000, it might already fail for a 500 as well ... var genPutCurrentValidatorCommand = stakerGenerator(anyPriority, nil, nil).Map( func(staker state.Staker) commands.Command { cmd := (*putCurrentValidatorCommand)(&staker) return cmd }, -).WithShrinker( - func(v interface{}) gopter.Shrink { - return gen.IntShrinker(v.(putCurrentValidatorCommand)).Map(func(staker state.Staker) *putCurrentValidatorCommand { - cmd := (*putCurrentValidatorCommand)(&staker) - return cmd - }) - }, ) +// DeleteCurrentValidator section type deleteCurrentValidatorCommand state.Staker func (v *deleteCurrentValidatorCommand) Run(sut commands.SystemUnderTest) commands.Result { staker := (*state.Staker)(v) sys := sut.(*sysUnderTest) - topDiffID := sys.blkIDList[len(sys.blkIDList)-1] - topDiff := sys.blkIDToChainState[topDiffID] + topBlkID := sys.blkIDsByHeight[len(sys.blkIDsByHeight)-1] + topDiff, _ := sys.GetState(topBlkID) topDiff.DeleteCurrentValidator(staker) return sys // returns sys to allow comparison with state in PostCondition } @@ -170,7 +197,7 @@ func (*deleteCurrentValidatorCommand) PostCondition(cmdState commands.State, res model := cmdState.(*stakersStorageModel) sys := res.(*sysUnderTest) - if checkSystemAndModelContent(model, *sys) { + if checkSystemAndModelContent(model, sys) { return &gopter.PropResult{Status: gopter.PropTrue} } @@ -181,27 +208,55 @@ func (v *deleteCurrentValidatorCommand) String() string { return fmt.Sprintf("DeleteCurrentValidator(subnetID: %s, nodeID: %s, txID: %s)", v.SubnetID, v.NodeID, v.TxID) } -// We want to have a generator for put commands for arbitrary int values. -// In this case the command is actually shrinkable, e.g. if the property fails -// by putting a 1000, it might already fail for a 500 as well ... var genDeleteCurrentValidatorCommand = stakerGenerator(anyPriority, nil, nil).Map( func(staker state.Staker) commands.Command { cmd := (*deleteCurrentValidatorCommand)(&staker) return cmd }, -).WithShrinker( - func(v interface{}) gopter.Shrink { - return gen.IntShrinker(v.(deleteCurrentValidatorCommand)).Map(func(staker state.Staker) *deleteCurrentValidatorCommand { - cmd := (*deleteCurrentValidatorCommand)(&staker) - return cmd - }) +) + +// addTopDiffCommand section +type addTopDiffCommand struct{} + +func (*addTopDiffCommand) Run(sut commands.SystemUnderTest) commands.Result { + sys := sut.(*sysUnderTest) + sys.addDiffOnTop() + return sys +} + +func (*addTopDiffCommand) NextState(cmdState commands.State) commands.State { + return cmdState // model has no diffs +} + +func (*addTopDiffCommand) PreCondition(commands.State) bool { + return true +} + +func (*addTopDiffCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { + model := cmdState.(*stakersStorageModel) + sys := res.(*sysUnderTest) + + if checkSystemAndModelContent(model, sys) { + return &gopter.PropResult{Status: gopter.PropTrue} + } + + return &gopter.PropResult{Status: gopter.PropFalse} +} + +func (*addTopDiffCommand) String() string { + return "AddTopDiffCommand" +} + +var genAddTopDiffCommand = stakerGenerator(anyPriority, nil, nil).Map( + func(state.Staker) commands.Command { + return &addTopDiffCommand{} }, ) -func checkSystemAndModelContent(model *stakersStorageModel, sys sysUnderTest) bool { +func checkSystemAndModelContent(model *stakersStorageModel, sys *sysUnderTest) bool { // top view content must always match model content - topDiffID := sys.blkIDList[len(sys.blkIDList)-1] - topDiff := sys.blkIDToChainState[topDiffID] + topBlkID := sys.blkIDsByHeight[len(sys.blkIDsByHeight)-1] + topDiff, _ := sys.GetState(topBlkID) modelIt, err := model.GetCurrentStakerIterator() if err != nil { From 3bead39bcfe033f40452c152f3a0946c02415437 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Fri, 31 Mar 2023 16:05:39 +0200 Subject: [PATCH 014/132] wip: more stakers storage stateful testing --- .../models/stakers_storage_model_test.go | 115 ++++++++++++++---- 1 file changed, 94 insertions(+), 21 deletions(-) diff --git a/vms/platformvm/state/models/stakers_storage_model_test.go b/vms/platformvm/state/models/stakers_storage_model_test.go index dfa0db8d6a63..f677be306249 100644 --- a/vms/platformvm/state/models/stakers_storage_model_test.go +++ b/vms/platformvm/state/models/stakers_storage_model_test.go @@ -21,47 +21,85 @@ var ( _ commands.Command = (*putCurrentValidatorCommand)(nil) _ commands.Command = (*deleteCurrentValidatorCommand)(nil) _ commands.Command = (*addTopDiffCommand)(nil) + _ commands.Command = (*applyBottomDiffCommand)(nil) ) func TestStateAndDiffComparisonToStorageModel(t *testing.T) { properties := gopter.NewProperties(nil) + + // to reproduce a given scenario do something like this: + // parameters := gopter.DefaultTestParametersWithSeed(1680269995295922009) + // properties := gopter.NewProperties(parameters) + properties.Property("state comparison to storage model", commands.Prop(stakersCommands)) properties.TestingRun(t) } type sysUnderTest struct { - baseState state.State - blkIDsByHeight []ids.ID - blkIDToChainState map[ids.ID]state.Chain + diffBlkIDSeed uint64 + baseState state.State + sortedDiffIDs []ids.ID + diffsMap map[ids.ID]state.Diff } func newSysUnderTest(baseState state.State) *sysUnderTest { - baseBlkID := baseState.GetLastAccepted() sys := &sysUnderTest{ - baseState: baseState, - blkIDToChainState: map[ids.ID]state.Chain{}, - blkIDsByHeight: []ids.ID{baseBlkID}, + baseState: baseState, + diffsMap: map[ids.ID]state.Diff{}, + sortedDiffIDs: []ids.ID{}, } return sys } func (s *sysUnderTest) GetState(blkID ids.ID) (state.Chain, bool) { - if state, found := s.blkIDToChainState[blkID]; found { + if state, found := s.diffsMap[blkID]; found { return state, found } return s.baseState, blkID == s.baseState.GetLastAccepted() } func (s *sysUnderTest) addDiffOnTop() { - seed := uint64(len(s.blkIDsByHeight)) - newTopBlkID := ids.Empty.Prefix(atomic.AddUint64(&seed, 1)) - topBlkID := s.blkIDsByHeight[len(s.blkIDsByHeight)-1] + newTopBlkID := ids.Empty.Prefix(atomic.AddUint64(&s.diffBlkIDSeed, 1)) + var topBlkID ids.ID + if len(s.sortedDiffIDs) == 0 { + topBlkID = s.baseState.GetLastAccepted() + } else { + topBlkID = s.sortedDiffIDs[len(s.sortedDiffIDs)-1] + } newTopDiff, err := state.NewDiff(topBlkID, s) if err != nil { panic(err) } - s.blkIDsByHeight = append(s.blkIDsByHeight, newTopBlkID) - s.blkIDToChainState[newTopBlkID] = newTopDiff + s.sortedDiffIDs = append(s.sortedDiffIDs, newTopBlkID) + s.diffsMap[newTopBlkID] = newTopDiff +} + +// getTopChainState returns top diff or baseState +func (s *sysUnderTest) getTopChainState() state.Chain { + var topChainStateID ids.ID + if len(s.sortedDiffIDs) != 0 { + topChainStateID = s.sortedDiffIDs[len(s.sortedDiffIDs)-1] + } else { + topChainStateID = s.baseState.GetLastAccepted() + } + + topChainState, _ := s.GetState(topChainStateID) + return topChainState +} + +// flushBottomDiff returns bottom diff if available +func (s *sysUnderTest) flushBottomDiff() { + if len(s.sortedDiffIDs) == 0 { + return + } + bottomDiffID := s.sortedDiffIDs[0] + diffToApply := s.diffsMap[bottomDiffID] + + diffToApply.Apply(s.baseState) + s.baseState.SetLastAccepted(bottomDiffID) + + s.sortedDiffIDs = s.sortedDiffIDs[1:] + delete(s.diffsMap, bottomDiffID) } // stakersCommands creates/destroy the system under test and generates @@ -118,8 +156,8 @@ var stakersCommands = &commands.ProtoCommands{ // genPutCurrentDelegatorCommand, // genDeleteCurrentDelegatorCommand, - // genApplyBottomDiffCommand, genAddTopDiffCommand, + genApplyBottomDiffCommand, // genCommitBottomStateCommand, ) }, @@ -131,9 +169,8 @@ type putCurrentValidatorCommand state.Staker func (v *putCurrentValidatorCommand) Run(sut commands.SystemUnderTest) commands.Result { staker := (*state.Staker)(v) sys := sut.(*sysUnderTest) - topBlkID := sys.blkIDsByHeight[len(sys.blkIDsByHeight)-1] - topDiff, _ := sys.GetState(topBlkID) - topDiff.PutCurrentValidator(staker) + topChainState := sys.getTopChainState() + topChainState.PutCurrentValidator(staker) return sys } @@ -176,8 +213,7 @@ type deleteCurrentValidatorCommand state.Staker func (v *deleteCurrentValidatorCommand) Run(sut commands.SystemUnderTest) commands.Result { staker := (*state.Staker)(v) sys := sut.(*sysUnderTest) - topBlkID := sys.blkIDsByHeight[len(sys.blkIDsByHeight)-1] - topDiff, _ := sys.GetState(topBlkID) + topDiff := sys.getTopChainState() topDiff.DeleteCurrentValidator(staker) return sys // returns sys to allow comparison with state in PostCondition } @@ -253,10 +289,47 @@ var genAddTopDiffCommand = stakerGenerator(anyPriority, nil, nil).Map( }, ) +// applyBottomDiffCommand section +type applyBottomDiffCommand struct{} + +func (*applyBottomDiffCommand) Run(sut commands.SystemUnderTest) commands.Result { + sys := sut.(*sysUnderTest) + sys.flushBottomDiff() + return sys +} + +func (*applyBottomDiffCommand) NextState(cmdState commands.State) commands.State { + return cmdState // model has no diffs +} + +func (*applyBottomDiffCommand) PreCondition(commands.State) bool { + return true +} + +func (*applyBottomDiffCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { + model := cmdState.(*stakersStorageModel) + sys := res.(*sysUnderTest) + + if checkSystemAndModelContent(model, sys) { + return &gopter.PropResult{Status: gopter.PropTrue} + } + + return &gopter.PropResult{Status: gopter.PropFalse} +} + +func (*applyBottomDiffCommand) String() string { + return "ApplyBottomDiffCommand" +} + +var genApplyBottomDiffCommand = stakerGenerator(anyPriority, nil, nil).Map( + func(state.Staker) commands.Command { + return &applyBottomDiffCommand{} + }, +) + func checkSystemAndModelContent(model *stakersStorageModel, sys *sysUnderTest) bool { // top view content must always match model content - topBlkID := sys.blkIDsByHeight[len(sys.blkIDsByHeight)-1] - topDiff, _ := sys.GetState(topBlkID) + topDiff := sys.getTopChainState() modelIt, err := model.GetCurrentStakerIterator() if err != nil { From f6356f265d94185d0d14f85f26f934d14657e97a Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Fri, 31 Mar 2023 17:57:09 +0200 Subject: [PATCH 015/132] wip: more stakers storage stateful testing --- .../state/models/stakers_storage_model.go | 4 ++ .../models/stakers_storage_model_test.go | 44 ++++++++++++++++++- 2 files changed, 47 insertions(+), 1 deletion(-) diff --git a/vms/platformvm/state/models/stakers_storage_model.go b/vms/platformvm/state/models/stakers_storage_model.go index b5e16ba17a75..c9984f9668f9 100644 --- a/vms/platformvm/state/models/stakers_storage_model.go +++ b/vms/platformvm/state/models/stakers_storage_model.go @@ -72,6 +72,10 @@ func putValidator(staker *state.Staker, domain map[subnetNodeKey]*state.Staker) subnetID: staker.SubnetID, nodeID: staker.NodeID, } + + // overwrite validator even if already exist. In prod code, + // it's up to block verification to check that we do not overwrite + // a validator existing on state or lower diffs. domain[key] = staker } diff --git a/vms/platformvm/state/models/stakers_storage_model_test.go b/vms/platformvm/state/models/stakers_storage_model_test.go index f677be306249..0869a6750561 100644 --- a/vms/platformvm/state/models/stakers_storage_model_test.go +++ b/vms/platformvm/state/models/stakers_storage_model_test.go @@ -22,6 +22,7 @@ var ( _ commands.Command = (*deleteCurrentValidatorCommand)(nil) _ commands.Command = (*addTopDiffCommand)(nil) _ commands.Command = (*applyBottomDiffCommand)(nil) + _ commands.Command = (*commitBottomStateCommand)(nil) ) func TestStateAndDiffComparisonToStorageModel(t *testing.T) { @@ -158,7 +159,7 @@ var stakersCommands = &commands.ProtoCommands{ genAddTopDiffCommand, genApplyBottomDiffCommand, - // genCommitBottomStateCommand, + genCommitBottomStateCommand, ) }, } @@ -327,6 +328,47 @@ var genApplyBottomDiffCommand = stakerGenerator(anyPriority, nil, nil).Map( }, ) +// commitBottomStateCommand section +type commitBottomStateCommand struct{} + +func (*commitBottomStateCommand) Run(sut commands.SystemUnderTest) commands.Result { + sys := sut.(*sysUnderTest) + err := sys.baseState.Commit() + if err != nil { + panic(err) + } + return sys +} + +func (*commitBottomStateCommand) NextState(cmdState commands.State) commands.State { + return cmdState // model has no diffs +} + +func (*commitBottomStateCommand) PreCondition(commands.State) bool { + return true +} + +func (*commitBottomStateCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { + model := cmdState.(*stakersStorageModel) + sys := res.(*sysUnderTest) + + if checkSystemAndModelContent(model, sys) { + return &gopter.PropResult{Status: gopter.PropTrue} + } + + return &gopter.PropResult{Status: gopter.PropFalse} +} + +func (*commitBottomStateCommand) String() string { + return "CommitBottomStateCommand" +} + +var genCommitBottomStateCommand = stakerGenerator(anyPriority, nil, nil).Map( + func(state.Staker) commands.Command { + return &commitBottomStateCommand{} + }, +) + func checkSystemAndModelContent(model *stakersStorageModel, sys *sysUnderTest) bool { // top view content must always match model content topDiff := sys.getTopChainState() From 13c52567b079b3d41477b62f2581fd8bc7c8dfe6 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Fri, 7 Apr 2023 16:17:57 +0200 Subject: [PATCH 016/132] merge fix --- .../state/models/stakers_storage_model.go | 17 +++ .../models/stakers_storage_model_test.go | 141 ++++++++++++------ 2 files changed, 114 insertions(+), 44 deletions(-) diff --git a/vms/platformvm/state/models/stakers_storage_model.go b/vms/platformvm/state/models/stakers_storage_model.go index c9984f9668f9..15b333267f73 100644 --- a/vms/platformvm/state/models/stakers_storage_model.go +++ b/vms/platformvm/state/models/stakers_storage_model.go @@ -4,6 +4,8 @@ package models import ( + "errors" + "golang.org/x/exp/maps" "github.com/ava-labs/avalanchego/database" @@ -192,6 +194,21 @@ func getCurrentStakerIterator( } } +func (*stakersStorageModel) SetDelegateeReward( + ids.ID, + ids.NodeID, + uint64, +) error { + return errors.New("method non implemented in model") +} + +func (*stakersStorageModel) GetDelegateeReward( + ids.ID, + ids.NodeID, +) (uint64, error) { + return 0, errors.New("method non implemented in model") +} + type stakersStorageIteratorModel struct { current *state.Staker diff --git a/vms/platformvm/state/models/stakers_storage_model_test.go b/vms/platformvm/state/models/stakers_storage_model_test.go index 0869a6750561..3573e0ac582d 100644 --- a/vms/platformvm/state/models/stakers_storage_model_test.go +++ b/vms/platformvm/state/models/stakers_storage_model_test.go @@ -11,6 +11,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/vms/platformvm/state" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/leanovate/gopter" "github.com/leanovate/gopter/commands" "github.com/leanovate/gopter/gen" @@ -88,19 +89,23 @@ func (s *sysUnderTest) getTopChainState() state.Chain { return topChainState } -// flushBottomDiff returns bottom diff if available -func (s *sysUnderTest) flushBottomDiff() { +// flushBottomDiff applies bottom diff if available +func (s *sysUnderTest) flushBottomDiff() bool { if len(s.sortedDiffIDs) == 0 { - return + return false } bottomDiffID := s.sortedDiffIDs[0] diffToApply := s.diffsMap[bottomDiffID] - diffToApply.Apply(s.baseState) + err := diffToApply.Apply(s.baseState) + if err != nil { + panic(err) + } s.baseState.SetLastAccepted(bottomDiffID) s.sortedDiffIDs = s.sortedDiffIDs[1:] delete(s.diffsMap, bottomDiffID) + return true } // stakersCommands creates/destroy the system under test and generates @@ -141,11 +146,13 @@ var stakersCommands = &commands.ProtoCommands{ panic(err) } }, - // TODO ABENEGIA: using gen.Const(newStakersStorageModel()) would not recreated model + // Note: using gen.Const(newStakersStorageModel()) would not recreated model // among calls. Hence just use a dummy generated with sole purpose of recreating model - InitialStateGen: gen.IntRange(1, 2).Map(func(int) *stakersStorageModel { - return newStakersStorageModel() - }), + InitialStateGen: gen.IntRange(1, 2).Map( + func(int) *stakersStorageModel { + return newStakersStorageModel() + }, + ), InitialPreConditionFunc: func(state commands.State) bool { return true // nothing to do for now @@ -198,10 +205,11 @@ func (*putCurrentValidatorCommand) PostCondition(cmdState commands.State, res co } func (v *putCurrentValidatorCommand) String() string { - return fmt.Sprintf("PutCurrentValidator(subnetID: %s, nodeID: %s, txID: %s)", v.SubnetID, v.NodeID, v.TxID) + return fmt.Sprintf("PutCurrentValidator(subnetID: %v, nodeID: %v, txID: %v, priority: %v, unixStartTime: %v, duration: %v)", + v.SubnetID, v.NodeID, v.TxID, v.Priority, v.StartTime.Unix(), v.EndTime.Sub(v.StartTime)) } -var genPutCurrentValidatorCommand = stakerGenerator(anyPriority, nil, nil).Map( +var genPutCurrentValidatorCommand = stakerGenerator(currentValidator, nil, nil).Map( func(staker state.Staker) commands.Command { cmd := (*putCurrentValidatorCommand)(&staker) return cmd @@ -209,24 +217,67 @@ var genPutCurrentValidatorCommand = stakerGenerator(anyPriority, nil, nil).Map( ) // DeleteCurrentValidator section -type deleteCurrentValidatorCommand state.Staker +type deleteCurrentValidatorCommand struct{} -func (v *deleteCurrentValidatorCommand) Run(sut commands.SystemUnderTest) commands.Result { - staker := (*state.Staker)(v) +func (*deleteCurrentValidatorCommand) Run(sut commands.SystemUnderTest) commands.Result { + // delete first validator, if any sys := sut.(*sysUnderTest) topDiff := sys.getTopChainState() - topDiff.DeleteCurrentValidator(staker) + + stakerIt, err := topDiff.GetCurrentStakerIterator() + if err != nil { + panic(err) + } + var ( + found = false + validator *state.Staker + ) + for !found && stakerIt.Next() { + validator = stakerIt.Value() + if validator.Priority == txs.SubnetPermissionedValidatorCurrentPriority || + validator.Priority == txs.SubnetPermissionlessValidatorCurrentPriority || + validator.Priority == txs.PrimaryNetworkValidatorCurrentPriority { + found = true + } + } + if !found { + return sys // no current validator to delete + } + stakerIt.Release() + + topDiff.DeleteCurrentValidator(validator) return sys // returns sys to allow comparison with state in PostCondition } -func (v *deleteCurrentValidatorCommand) NextState(cmdState commands.State) commands.State { - staker := (*state.Staker)(v) - cmdState.(*stakersStorageModel).DeleteCurrentValidator(staker) +func (*deleteCurrentValidatorCommand) NextState(cmdState commands.State) commands.State { + model := cmdState.(*stakersStorageModel) + stakerIt, err := model.GetCurrentStakerIterator() + if err != nil { + return err + } + + var ( + found = false + validator *state.Staker + ) + for !found && stakerIt.Next() { + validator = stakerIt.Value() + if validator.Priority == txs.SubnetPermissionedValidatorCurrentPriority || + validator.Priority == txs.SubnetPermissionlessValidatorCurrentPriority || + validator.Priority == txs.PrimaryNetworkValidatorCurrentPriority { + found = true + } + } + if !found { + return cmdState // no current validator to add delegator to + } + stakerIt.Release() + + model.DeleteCurrentValidator(validator) return cmdState } func (*deleteCurrentValidatorCommand) PreCondition(commands.State) bool { - // Don't even require staker to be inserted before being deleted return true } @@ -241,14 +292,13 @@ func (*deleteCurrentValidatorCommand) PostCondition(cmdState commands.State, res return &gopter.PropResult{Status: gopter.PropFalse} } -func (v *deleteCurrentValidatorCommand) String() string { - return fmt.Sprintf("DeleteCurrentValidator(subnetID: %s, nodeID: %s, txID: %s)", v.SubnetID, v.NodeID, v.TxID) +func (*deleteCurrentValidatorCommand) String() string { + return "DeleteCurrentValidator" } -var genDeleteCurrentValidatorCommand = stakerGenerator(anyPriority, nil, nil).Map( - func(staker state.Staker) commands.Command { - cmd := (*deleteCurrentValidatorCommand)(&staker) - return cmd +var genDeleteCurrentValidatorCommand = gen.IntRange(1, 2).Map( + func(int) commands.Command { + return &deleteCurrentValidatorCommand{} }, ) @@ -284,8 +334,8 @@ func (*addTopDiffCommand) String() string { return "AddTopDiffCommand" } -var genAddTopDiffCommand = stakerGenerator(anyPriority, nil, nil).Map( - func(state.Staker) commands.Command { +var genAddTopDiffCommand = gen.IntRange(1, 2).Map( + func(int) commands.Command { return &addTopDiffCommand{} }, ) @@ -295,7 +345,7 @@ type applyBottomDiffCommand struct{} func (*applyBottomDiffCommand) Run(sut commands.SystemUnderTest) commands.Result { sys := sut.(*sysUnderTest) - sys.flushBottomDiff() + _ = sys.flushBottomDiff() return sys } @@ -322,8 +372,8 @@ func (*applyBottomDiffCommand) String() string { return "ApplyBottomDiffCommand" } -var genApplyBottomDiffCommand = stakerGenerator(anyPriority, nil, nil).Map( - func(state.Staker) commands.Command { +var genApplyBottomDiffCommand = gen.IntRange(1, 2).Map( + func(int) commands.Command { return &applyBottomDiffCommand{} }, ) @@ -363,8 +413,8 @@ func (*commitBottomStateCommand) String() string { return "CommitBottomStateCommand" } -var genCommitBottomStateCommand = stakerGenerator(anyPriority, nil, nil).Map( - func(state.Staker) commands.Command { +var genCommitBottomStateCommand = gen.IntRange(1, 2).Map( + func(int) commands.Command { return &commitBottomStateCommand{} }, ) @@ -382,25 +432,28 @@ func checkSystemAndModelContent(model *stakersStorageModel, sys *sysUnderTest) b return false } - for { - modelNext := modelIt.Next() - sysNext := sysIt.Next() - if modelNext != sysNext { - return false - } - if !sysNext { - break // done with both model and sys iterations - } + modelStakers := make([]*state.Staker, 0) + for modelIt.Next() { + modelStakers = append(modelStakers, modelIt.Value()) + } + modelIt.Release() - modelStaker := modelIt.Value() - sysStaker := sysIt.Value() + sysStakers := make([]*state.Staker, 0) + for sysIt.Next() { + sysStakers = append(sysStakers, sysIt.Value()) + } + sysIt.Release() + if len(modelStakers) != len(sysStakers) { + return false + } + + for idx, modelStaker := range modelStakers { + sysStaker := sysStakers[idx] if modelStaker == nil || sysStaker == nil || !reflect.DeepEqual(modelStaker, sysStaker) { return false } } - modelIt.Release() - sysIt.Release() return true } From 610b14d68baf1573272557229f3e02bb561e9ad0 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Tue, 11 Apr 2023 12:31:41 -0400 Subject: [PATCH 017/132] Add BLS benchmarks (#1318) Co-authored-by: Aaron Buchwald --- utils/crypto/bls/bls_benchmark_test.go | 88 +++++++++++++++++++ .../secp256k1/secp256k1_benchmark_test.go | 53 +++-------- 2 files changed, 101 insertions(+), 40 deletions(-) create mode 100644 utils/crypto/bls/bls_benchmark_test.go diff --git a/utils/crypto/bls/bls_benchmark_test.go b/utils/crypto/bls/bls_benchmark_test.go new file mode 100644 index 000000000000..a4503260a821 --- /dev/null +++ b/utils/crypto/bls/bls_benchmark_test.go @@ -0,0 +1,88 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bls + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/utils" +) + +var sizes = []int{ + 2, + 4, + 8, + 16, + 32, + 64, + 128, + 256, + 512, + 1024, + 2048, + 4096, +} + +func BenchmarkSign(b *testing.B) { + require := require.New(b) + + privateKey, err := NewSecretKey() + require.NoError(err) + for _, messageSize := range sizes { + b.Run(fmt.Sprintf("%d", messageSize), func(b *testing.B) { + message := utils.RandomBytes(messageSize) + + b.ResetTimer() + + for n := 0; n < b.N; n++ { + _ = Sign(privateKey, message) + } + }) + } +} + +func BenchmarkVerify(b *testing.B) { + require := require.New(b) + + privateKey, err := NewSecretKey() + require.NoError(err) + publicKey := PublicFromSecretKey(privateKey) + + for _, messageSize := range sizes { + b.Run(fmt.Sprintf("%d", messageSize), func(b *testing.B) { + message := utils.RandomBytes(messageSize) + signature := Sign(privateKey, message) + + b.ResetTimer() + + for n := 0; n < b.N; n++ { + require.True(Verify(publicKey, signature, message)) + } + }) + } +} + +func BenchmarkAggregatePublicKeys(b *testing.B) { + keys := make([]*PublicKey, 4096) + for i := range keys { + privateKey, err := NewSecretKey() + require.NoError(b, err) + + keys[i] = PublicFromSecretKey(privateKey) + } + + for _, size := range sizes { + b.Run(fmt.Sprintf("%d", size), func(b *testing.B) { + require := require.New(b) + + for n := 0; n < b.N; n++ { + _, err := AggregatePublicKeys(keys[:size]) + require.NoError(err) + } + }) + } +} diff --git a/utils/crypto/secp256k1/secp256k1_benchmark_test.go b/utils/crypto/secp256k1/secp256k1_benchmark_test.go index 0d7a9e5aec9a..b7f105b0dfe1 100644 --- a/utils/crypto/secp256k1/secp256k1_benchmark_test.go +++ b/utils/crypto/secp256k1/secp256k1_benchmark_test.go @@ -8,55 +8,28 @@ import ( "github.com/stretchr/testify/require" - "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/hashing" ) -// NumVerifies is the number of verifications to run per operation -const NumVerifies = 1 +func BenchmarkVerify(b *testing.B) { + require := require.New(b) -var ( - hashes [][]byte + f := &Factory{} - keys []*PublicKey - sigs [][]byte -) + privateKey, err := f.NewPrivateKey() + require.NoError(err) -func init() { - // Setup hashes: - bytes := ids.ID{} - for i := uint64(0); i < NumVerifies; i++ { - bytes[i%32]++ - hash := hashing.ComputeHash256(bytes[:]) - hashes = append(hashes, hash) - } + message := utils.RandomBytes(512) + hash := hashing.ComputeHash256(message) - // Setup signatures: - f := &Factory{} - for i := uint64(0); i < NumVerifies; i++ { - privateKey, err := f.NewPrivateKey() - if err != nil { - panic(err) - } - - publicKey := privateKey.PublicKey() - sig, err := privateKey.SignHash(hashes[i]) - if err != nil { - panic(err) - } - - keys = append(keys, publicKey) - sigs = append(sigs, sig) - } -} + publicKey := privateKey.PublicKey() + signature, err := privateKey.SignHash(hash) + require.NoError(err) -// BenchmarkSECP256k1Verify runs the benchmark with SECP256K1 keys -func BenchmarkSECP256k1Verify(b *testing.B) { - require := require.New(b) + b.ResetTimer() for n := 0; n < b.N; n++ { - for i := 0; i < NumVerifies; i++ { - require.True(keys[i].VerifyHash(hashes[i], sigs[i])) - } + require.True(publicKey.VerifyHash(hash, signature)) } } From 0c6064a85a5fb033bd26474f0380781a6070fb0b Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 11 Apr 2023 13:39:42 -0400 Subject: [PATCH 018/132] Encode codec version in merkledb (#1313) Co-authored-by: Alberto Benegiamo --- x/merkledb/codec.go | 73 ++++++++++++++++++++++++++++++++++++--------- 1 file changed, 59 insertions(+), 14 deletions(-) diff --git a/x/merkledb/codec.go b/x/merkledb/codec.go index 51984d141a1d..7baa37159981 100644 --- a/x/merkledb/codec.go +++ b/x/merkledb/codec.go @@ -23,6 +23,7 @@ const ( minVarIntLen = 1 boolLen = 1 idLen = hashing.HashLen + minCodecVersionLen = minVarIntLen minSerializedPathLen = minVarIntLen minByteSliceLen = minVarIntLen minDeletedKeyLen = minByteSliceLen @@ -30,11 +31,11 @@ const ( minProofPathLen = minVarIntLen minKeyValueLen = 2 * minByteSliceLen minProofNodeLen = minSerializedPathLen + minMaybeByteSliceLen + minVarIntLen - minProofLen = minProofPathLen + minByteSliceLen - minChangeProofLen = boolLen + 2*minProofPathLen + 2*minVarIntLen - minRangeProofLen = 2*minProofPathLen + minVarIntLen - minDBNodeLen = minMaybeByteSliceLen + minVarIntLen - minHashValuesLen = minVarIntLen + minMaybeByteSliceLen + minSerializedPathLen + minProofLen = minCodecVersionLen + minProofPathLen + minByteSliceLen + minChangeProofLen = minCodecVersionLen + +boolLen + 2*minProofPathLen + 2*minVarIntLen + minRangeProofLen = minCodecVersionLen + +2*minProofPathLen + minVarIntLen + minDBNodeLen = minCodecVersionLen + minMaybeByteSliceLen + minVarIntLen + minHashValuesLen = minCodecVersionLen + minVarIntLen + minMaybeByteSliceLen + minSerializedPathLen minProofNodeChildLen = minVarIntLen + idLen minChildLen = minVarIntLen + minSerializedPathLen + idLen ) @@ -60,6 +61,7 @@ var ( errNonZeroNibblePadding = errors.New("nibbles should be padded with 0s") errExtraSpace = errors.New("trailing buffer space") errNegativeSliceLength = errors.New("negative slice length") + errInvalidCodecVersion = errors.New("invalid codec version") ) // EncoderDecoder defines the interface needed by merkleDB to marshal @@ -69,7 +71,6 @@ type EncoderDecoder interface { Decoder } -// TODO actually encode the version and remove version from the interface type Encoder interface { EncodeProof(version uint16, p *Proof) ([]byte, error) EncodeChangeProof(version uint16, p *ChangeProof) ([]byte, error) @@ -107,10 +108,13 @@ func (c *codecImpl) EncodeProof(version uint16, proof *Proof) ([]byte, error) { } if version != codecVersion { - return nil, errUnknownVersion + return nil, fmt.Errorf("%w: %d", errUnknownVersion, version) } buf := &bytes.Buffer{} + if err := c.encodeInt(buf, int(version)); err != nil { + return nil, err + } if err := c.encodeProofPath(buf, proof.Path); err != nil { return nil, err } @@ -129,14 +133,17 @@ func (c *codecImpl) EncodeChangeProof(version uint16, proof *ChangeProof) ([]byt } if version != codecVersion { - return nil, errUnknownVersion + return nil, fmt.Errorf("%w: %d", errUnknownVersion, version) } buf := &bytes.Buffer{} + + if err := c.encodeInt(buf, int(version)); err != nil { + return nil, err + } if err := c.encodeBool(buf, proof.HadRootsInHistory); err != nil { return nil, err } - if err := c.encodeProofPath(buf, proof.StartProof); err != nil { return nil, err } @@ -169,10 +176,13 @@ func (c *codecImpl) EncodeRangeProof(version uint16, proof *RangeProof) ([]byte, } if version != codecVersion { - return nil, errUnknownVersion + return nil, fmt.Errorf("%w: %d", errUnknownVersion, version) } buf := &bytes.Buffer{} + if err := c.encodeInt(buf, int(version)); err != nil { + return nil, err + } if err := c.encodeProofPath(buf, proof.StartProof); err != nil { return nil, err } @@ -197,10 +207,13 @@ func (c *codecImpl) encodeDBNode(version uint16, n *dbNode) ([]byte, error) { } if version != codecVersion { - return nil, errUnknownVersion + return nil, fmt.Errorf("%w: %d", errUnknownVersion, version) } buf := &bytes.Buffer{} + if err := c.encodeInt(buf, int(version)); err != nil { + return nil, err + } if err := c.encodeMaybeByteSlice(buf, n.value); err != nil { return nil, err } @@ -231,10 +244,15 @@ func (c *codecImpl) encodeHashValues(version uint16, hv *hashValues) ([]byte, er } if version != codecVersion { - return nil, errUnknownVersion + return nil, fmt.Errorf("%w: %d", errUnknownVersion, version) } buf := &bytes.Buffer{} + + if err := c.encodeInt(buf, int(version)); err != nil { + return nil, err + } + length := len(hv.Children) if err := c.encodeInt(buf, length); err != nil { return nil, err @@ -273,7 +291,13 @@ func (c *codecImpl) DecodeProof(b []byte, proof *Proof) (uint16, error) { err error src = bytes.NewReader(b) ) - + gotCodecVersion, err := c.decodeInt(src) + if err != nil { + return 0, err + } + if codecVersion != gotCodecVersion { + return 0, fmt.Errorf("%w: %d", errInvalidCodecVersion, gotCodecVersion) + } if proof.Path, err = c.decodeProofPath(src); err != nil { return 0, err } @@ -302,6 +326,13 @@ func (c *codecImpl) DecodeChangeProof(b []byte, proof *ChangeProof) (uint16, err err error ) + gotCodecVersion, err := c.decodeInt(src) + if err != nil { + return 0, err + } + if gotCodecVersion != codecVersion { + return 0, fmt.Errorf("%w: %d", errInvalidCodecVersion, gotCodecVersion) + } if proof.HadRootsInHistory, err = c.decodeBool(src); err != nil { return 0, err } @@ -363,7 +394,13 @@ func (c *codecImpl) DecodeRangeProof(b []byte, proof *RangeProof) (uint16, error src = bytes.NewReader(b) err error ) - + gotCodecVersion, err := c.decodeInt(src) + if err != nil { + return 0, err + } + if codecVersion != gotCodecVersion { + return 0, fmt.Errorf("%w: %d", errInvalidCodecVersion, gotCodecVersion) + } if proof.StartProof, err = c.decodeProofPath(src); err != nil { return 0, err } @@ -406,6 +443,14 @@ func (c *codecImpl) decodeDBNode(b []byte, n *dbNode) (uint16, error) { err error ) + gotCodecVersion, err := c.decodeInt(src) + if err != nil { + return 0, err + } + if codecVersion != gotCodecVersion { + return 0, fmt.Errorf("%w: %d", errInvalidCodecVersion, gotCodecVersion) + } + if n.value, err = c.decodeMaybeByteSlice(src); err != nil { return 0, err } From ffc7e42f8e0c489b7a9958fc6c16e5436ed08383 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Tue, 11 Apr 2023 14:03:38 -0400 Subject: [PATCH 019/132] Expose consensus-app-concurrency (#1322) --- chains/manager.go | 3 +++ config/config.go | 6 ++++++ config/flags.go | 1 + config/keys.go | 1 + node/config.go | 3 +++ node/node.go | 1 + snow/networking/handler/handler.go | 2 +- snow/networking/handler/handler_test.go | 8 ++++++++ snow/networking/router/chain_router_test.go | 13 ++++++++++++- snow/networking/sender/sender_test.go | 5 +++++ utils/constants/networking.go | 1 + vms/platformvm/vm_test.go | 1 + 12 files changed, 43 insertions(+), 2 deletions(-) diff --git a/chains/manager.go b/chains/manager.go index c821cbc6fb26..b3614139cb18 100644 --- a/chains/manager.go +++ b/chains/manager.go @@ -205,6 +205,7 @@ type ManagerConfig struct { Metrics metrics.MultiGatherer ConsensusGossipFrequency time.Duration + ConsensusAppConcurrency int // Max Time to spend fetching a container and its // ancestors when responding to a GetAncestors @@ -818,6 +819,7 @@ func (m *manager) createAvalancheChain( vdrs, msgChan, m.ConsensusGossipFrequency, + m.ConsensusAppConcurrency, m.ResourceTracker, validators.UnhandledSubnetConnector, // avalanche chains don't use subnet connector sb, @@ -1173,6 +1175,7 @@ func (m *manager) createSnowmanChain( vdrs, msgChan, m.ConsensusGossipFrequency, + m.ConsensusAppConcurrency, m.ResourceTracker, subnetConnector, sb, diff --git a/config/config.go b/config/config.go index 70a1c782fcec..e955c749b01b 100644 --- a/config/config.go +++ b/config/config.go @@ -1310,6 +1310,12 @@ func GetNodeConfig(v *viper.Viper) (node.Config, error) { return node.Config{}, fmt.Errorf("%s must be >= 0", ConsensusGossipFrequencyKey) } + // App handling + nodeConfig.ConsensusAppConcurrency = int(v.GetUint(ConsensusAppConcurrencyKey)) + if nodeConfig.ConsensusAppConcurrency <= 0 { + return node.Config{}, fmt.Errorf("%s must be > 0", ConsensusAppConcurrencyKey) + } + nodeConfig.UseCurrentHeight = v.GetBool(ProposerVMUseCurrentHeightKey) // Logging diff --git a/config/flags.go b/config/flags.go index 98c782d0f161..c30f6d27e94e 100644 --- a/config/flags.go +++ b/config/flags.go @@ -178,6 +178,7 @@ func addNodeFlags(fs *pflag.FlagSet) { // Router fs.Duration(ConsensusGossipFrequencyKey, constants.DefaultConsensusGossipFrequency, "Frequency of gossiping accepted frontiers") + fs.Uint(ConsensusAppConcurrencyKey, constants.DefaultConsensusAppConcurrency, "Maximum number of goroutines to use when handling App messages on a chain") fs.Duration(ConsensusShutdownTimeoutKey, constants.DefaultConsensusShutdownTimeout, "Timeout before killing an unresponsive chain") fs.Uint(ConsensusGossipAcceptedFrontierValidatorSizeKey, constants.DefaultConsensusGossipAcceptedFrontierValidatorSize, "Number of validators to gossip to when gossiping accepted frontier") fs.Uint(ConsensusGossipAcceptedFrontierNonValidatorSizeKey, constants.DefaultConsensusGossipAcceptedFrontierNonValidatorSize, "Number of non-validators to gossip to when gossiping accepted frontier") diff --git a/config/keys.go b/config/keys.go index 1f9e0a00b21d..01c3364393c3 100644 --- a/config/keys.go +++ b/config/keys.go @@ -143,6 +143,7 @@ const ( IpcsPathKey = "ipcs-path" MeterVMsEnabledKey = "meter-vms-enabled" ConsensusGossipFrequencyKey = "consensus-gossip-frequency" + ConsensusAppConcurrencyKey = "consensus-app-concurrency" ConsensusGossipAcceptedFrontierValidatorSizeKey = "consensus-accepted-frontier-gossip-validator-size" ConsensusGossipAcceptedFrontierNonValidatorSizeKey = "consensus-accepted-frontier-gossip-non-validator-size" ConsensusGossipAcceptedFrontierPeerSizeKey = "consensus-accepted-frontier-gossip-peer-size" diff --git a/node/config.go b/node/config.go index 4fce225eb172..d32a0b071726 100644 --- a/node/config.go +++ b/node/config.go @@ -179,6 +179,9 @@ type Config struct { ConsensusShutdownTimeout time.Duration `json:"consensusShutdownTimeout"` // Gossip a container in the accepted frontier every [ConsensusGossipFrequency] ConsensusGossipFrequency time.Duration `json:"consensusGossipFreq"` + // ConsensusAppConcurrency defines the maximum number of goroutines to + // handle App messages per chain. + ConsensusAppConcurrency int `json:"consensusAppConcurrency"` TrackedSubnets set.Set[ids.ID] `json:"trackedSubnets"` diff --git a/node/node.go b/node/node.go index 7e46ff066f6e..0e258f4841a2 100644 --- a/node/node.go +++ b/node/node.go @@ -740,6 +740,7 @@ func (n *Node) initChainManager(avaxAssetID ids.ID) error { SubnetConfigs: n.Config.SubnetConfigs, ChainConfigs: n.Config.ChainConfigs, ConsensusGossipFrequency: n.Config.ConsensusGossipFrequency, + ConsensusAppConcurrency: n.Config.ConsensusAppConcurrency, BootstrapMaxTimeGetAncestors: n.Config.BootstrapMaxTimeGetAncestors, BootstrapAncestorsMaxContainersSent: n.Config.BootstrapAncestorsMaxContainersSent, BootstrapAncestorsMaxContainersReceived: n.Config.BootstrapAncestorsMaxContainersReceived, diff --git a/snow/networking/handler/handler.go b/snow/networking/handler/handler.go index 2934af800cbe..81b8a9c5e282 100644 --- a/snow/networking/handler/handler.go +++ b/snow/networking/handler/handler.go @@ -32,7 +32,6 @@ import ( ) const ( - threadPoolSize = 2 numDispatchersToClose = 3 // If a consensus message takes longer than this to process, the handler // will log a warning. @@ -121,6 +120,7 @@ func New( validators validators.Set, msgFromVMChan <-chan common.Message, gossipFrequency time.Duration, + threadPoolSize int, resourceTracker tracker.ResourceTracker, subnetConnector validators.SubnetConnector, subnet subnets.Subnet, diff --git a/snow/networking/handler/handler_test.go b/snow/networking/handler/handler_test.go index 9b51748b92c4..b9e757b7d99d 100644 --- a/snow/networking/handler/handler_test.go +++ b/snow/networking/handler/handler_test.go @@ -28,6 +28,8 @@ import ( "github.com/ava-labs/avalanchego/utils/resource" ) +const testThreadPoolSize = 2 + var errFatal = errors.New("error should cause handler to close") func TestHandlerDropsTimedOutMessages(t *testing.T) { @@ -52,6 +54,7 @@ func TestHandlerDropsTimedOutMessages(t *testing.T) { vdrs, nil, time.Second, + testThreadPoolSize, resourceTracker, validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), @@ -146,6 +149,7 @@ func TestHandlerClosesOnError(t *testing.T) { vdrs, nil, time.Second, + testThreadPoolSize, resourceTracker, validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), @@ -236,6 +240,7 @@ func TestHandlerDropsGossipDuringBootstrapping(t *testing.T) { vdrs, nil, 1, + testThreadPoolSize, resourceTracker, validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), @@ -315,6 +320,7 @@ func TestHandlerDispatchInternal(t *testing.T) { vdrs, msgFromVMChan, time.Second, + testThreadPoolSize, resourceTracker, validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), @@ -392,6 +398,7 @@ func TestHandlerSubnetConnector(t *testing.T) { vdrs, nil, time.Second, + testThreadPoolSize, resourceTracker, connector, subnets.New(ctx.NodeID, subnets.Config{}), @@ -561,6 +568,7 @@ func TestDynamicEngineTypeDispatch(t *testing.T) { vdrs, nil, time.Second, + testThreadPoolSize, resourceTracker, validators.UnhandledSubnetConnector, subnets.New(ids.EmptyNodeID, subnets.Config{}), diff --git a/snow/networking/router/chain_router_test.go b/snow/networking/router/chain_router_test.go index 27a83b6f3036..f235064b28f4 100644 --- a/snow/networking/router/chain_router_test.go +++ b/snow/networking/router/chain_router_test.go @@ -36,7 +36,10 @@ import ( "github.com/ava-labs/avalanchego/version" ) -const engineType = p2p.EngineType_ENGINE_TYPE_AVALANCHE +const ( + engineType = p2p.EngineType_ENGINE_TYPE_AVALANCHE + testThreadPoolSize = 2 +) func TestShutdown(t *testing.T) { vdrs := validators.NewSet() @@ -89,6 +92,7 @@ func TestShutdown(t *testing.T) { vdrs, nil, time.Second, + testThreadPoolSize, resourceTracker, validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), @@ -224,6 +228,7 @@ func TestShutdownTimesOut(t *testing.T) { vdrs, nil, time.Second, + testThreadPoolSize, resourceTracker, validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), @@ -380,6 +385,7 @@ func TestRouterTimeout(t *testing.T) { vdrs, nil, time.Second, + testThreadPoolSize, resourceTracker, validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), @@ -848,6 +854,7 @@ func TestRouterClearTimeouts(t *testing.T) { vdrs, nil, time.Second, + testThreadPoolSize, resourceTracker, validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), @@ -1138,6 +1145,7 @@ func TestValidatorOnlyMessageDrops(t *testing.T) { vdrs, nil, time.Second, + testThreadPoolSize, resourceTracker, validators.UnhandledSubnetConnector, sb, @@ -1287,6 +1295,7 @@ func TestRouterCrossChainMessages(t *testing.T) { vdrs, nil, time.Second, + testThreadPoolSize, resourceTracker, validators.UnhandledSubnetConnector, subnets.New(requester.NodeID, subnets.Config{}), @@ -1304,6 +1313,7 @@ func TestRouterCrossChainMessages(t *testing.T) { vdrs, nil, time.Second, + testThreadPoolSize, resourceTracker, validators.UnhandledSubnetConnector, subnets.New(responder.NodeID, subnets.Config{}), @@ -1552,6 +1562,7 @@ func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { vdrs, nil, time.Second, + testThreadPoolSize, resourceTracker, validators.UnhandledSubnetConnector, sb, diff --git a/snow/networking/sender/sender_test.go b/snow/networking/sender/sender_test.go index db20b12b3c44..04bfeea6d561 100644 --- a/snow/networking/sender/sender_test.go +++ b/snow/networking/sender/sender_test.go @@ -37,6 +37,8 @@ import ( "github.com/ava-labs/avalanchego/version" ) +const testThreadPoolSize = 2 + var defaultSubnetConfig = subnets.Config{ GossipConfig: subnets.GossipConfig{ AcceptedFrontierPeerSize: 2, @@ -122,6 +124,7 @@ func TestTimeout(t *testing.T) { vdrs, nil, time.Hour, + testThreadPoolSize, resourceTracker, validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), @@ -395,6 +398,7 @@ func TestReliableMessages(t *testing.T) { vdrs, nil, 1, + testThreadPoolSize, resourceTracker, validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), @@ -543,6 +547,7 @@ func TestReliableMessagesToMyself(t *testing.T) { vdrs, nil, time.Second, + testThreadPoolSize, resourceTracker, validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), diff --git a/utils/constants/networking.go b/utils/constants/networking.go index b958e2c40889..7290718c267a 100644 --- a/utils/constants/networking.go +++ b/utils/constants/networking.go @@ -79,6 +79,7 @@ const ( // Router DefaultConsensusGossipFrequency = 10 * time.Second + DefaultConsensusAppConcurrency = 2 DefaultConsensusShutdownTimeout = 30 * time.Second DefaultConsensusGossipAcceptedFrontierValidatorSize = 0 DefaultConsensusGossipAcceptedFrontierNonValidatorSize = 0 diff --git a/vms/platformvm/vm_test.go b/vms/platformvm/vm_test.go index f7b679d9a63e..539fd3862f84 100644 --- a/vms/platformvm/vm_test.go +++ b/vms/platformvm/vm_test.go @@ -1827,6 +1827,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { beacons, msgChan, time.Hour, + 2, cpuTracker, vm, subnets.New(ctx.NodeID, subnets.Config{}), From e8f6abe35425a551e16f054170c557870873269b Mon Sep 17 00:00:00 2001 From: David Boehm <91908103+dboehm-avalabs@users.noreply.github.com> Date: Tue, 11 Apr 2023 14:14:57 -0400 Subject: [PATCH 020/132] Adjust Logic In Merkle DB History (#1310) Co-authored-by: Darioush Jalali --- x/merkledb/history.go | 52 ++++++++++++++++++++++++------------------- 1 file changed, 29 insertions(+), 23 deletions(-) diff --git a/x/merkledb/history.go b/x/merkledb/history.go index fbaeca3bd8e3..e58dedbd3e19 100644 --- a/x/merkledb/history.go +++ b/x/merkledb/history.go @@ -96,28 +96,34 @@ func (th *trieHistory) getValueChanges(startRoot, endRoot ids.ID, start, end []b return nil, ErrRootIDNotPresent } - // [lastStartRootChange] is the latest appearance of [startRoot] - // which came before [lastEndRootChange]. - var lastStartRootChange *changeSummaryAndIndex - th.history.DescendLessOrEqual( - lastEndRootChange, - func(item *changeSummaryAndIndex) bool { - if item == lastEndRootChange { - return true // Skip first iteration - } - if item.rootID == startRoot { - lastStartRootChange = item - return false - } - return true - }, - ) - - // There's no change resulting in [startRoot] before the latest change resulting in [endRoot]. - if lastStartRootChange == nil { + // [startRootChanges] is the last appearance of [startRoot] + startRootChanges, ok := th.lastChanges[startRoot] + if !ok { return nil, ErrStartRootNotFound } + // startRootChanges is after the lastEndRootChange, but that is just the latest appearance of start root + // there may be an earlier entry, so attempt to find an entry that comes before lastEndRootChange + if startRootChanges.index > lastEndRootChange.index { + th.history.DescendLessOrEqual( + lastEndRootChange, + func(item *changeSummaryAndIndex) bool { + if item == lastEndRootChange { + return true // Skip first iteration + } + if item.rootID == startRoot { + startRootChanges = item + return false + } + return true + }, + ) + // There's no change resulting in [startRoot] before the latest change resulting in [endRoot]. + if startRootChanges.index > lastEndRootChange.index { + return nil, ErrStartRootNotFound + } + } + // Keep changes sorted so the largest can be removed in order to stay within the maxLength limit. sortedKeys := btree.NewG( 2, @@ -135,13 +141,13 @@ func (th *trieHistory) getValueChanges(startRoot, endRoot ids.ID, start, end []b // Only the key-value pairs with the greatest [maxLength] keys will be kept. combinedChanges := newChangeSummary(maxLength) - // For each change after [lastStartRootChange] up to and including + // For each change after [startRootChanges] up to and including // [lastEndRootChange], record the change in [combinedChanges]. th.history.AscendGreaterOrEqual( - lastStartRootChange, + startRootChanges, func(item *changeSummaryAndIndex) bool { - if item == lastStartRootChange { - // Start from the first change after [lastStartRootChange]. + if item == startRootChanges { + // Start from the first change after [startRootChanges]. return true } if item.index > lastEndRootChange.index { From 343693ff6068361505bb54d7449e96f9d242e50e Mon Sep 17 00:00:00 2001 From: David Boehm <91908103+dboehm-avalabs@users.noreply.github.com> Date: Tue, 11 Apr 2023 15:33:54 -0400 Subject: [PATCH 021/132] Fix Concurrency Bug In CommitToParent (#1320) Co-authored-by: Dan Laine --- x/merkledb/trie_test.go | 107 ++++++++++++++++++++++++++++++++++++++++ x/merkledb/trieview.go | 19 +++---- 2 files changed, 115 insertions(+), 11 deletions(-) diff --git a/x/merkledb/trie_test.go b/x/merkledb/trie_test.go index 713ae7f1e59a..530a2db633fc 100644 --- a/x/merkledb/trie_test.go +++ b/x/merkledb/trie_test.go @@ -1181,6 +1181,113 @@ func TestTrieViewInvalidChildrenExcept(t *testing.T) { require.Empty(view1.childViews) } +func Test_Trie_CommitToParentView_Concurrent(t *testing.T) { + for i := 0; i < 5000; i++ { + dbTrie, err := getBasicDB() + require.NoError(t, err) + require.NotNil(t, dbTrie) + + baseView, err := dbTrie.NewView() + require.NoError(t, err) + + parentView, err := baseView.NewView() + require.NoError(t, err) + err = parentView.Insert(context.Background(), []byte{0}, []byte{0}) + require.NoError(t, err) + + childView1, err := parentView.NewView() + require.NoError(t, err) + err = childView1.Insert(context.Background(), []byte{1}, []byte{1}) + require.NoError(t, err) + + childView2, err := childView1.NewView() + require.NoError(t, err) + err = childView2.Insert(context.Background(), []byte{2}, []byte{2}) + require.NoError(t, err) + + var wg sync.WaitGroup + wg.Add(3) + go func() { + defer wg.Done() + require.NoError(t, parentView.CommitToParent(context.Background())) + }() + go func() { + defer wg.Done() + require.NoError(t, childView1.CommitToParent(context.Background())) + }() + go func() { + defer wg.Done() + require.NoError(t, childView2.CommitToParent(context.Background())) + }() + + wg.Wait() + + val0, err := baseView.GetValue(context.Background(), []byte{0}) + require.NoError(t, err) + require.Equal(t, []byte{0}, val0) + + val1, err := baseView.GetValue(context.Background(), []byte{1}) + require.NoError(t, err) + require.Equal(t, []byte{1}, val1) + + val2, err := baseView.GetValue(context.Background(), []byte{2}) + require.NoError(t, err) + require.Equal(t, []byte{2}, val2) + } +} + +func Test_Trie_CommitToParentDB_Concurrent(t *testing.T) { + for i := 0; i < 5000; i++ { + dbTrie, err := getBasicDB() + require.NoError(t, err) + require.NotNil(t, dbTrie) + + parentView, err := dbTrie.NewView() + require.NoError(t, err) + err = parentView.Insert(context.Background(), []byte{0}, []byte{0}) + require.NoError(t, err) + + childView1, err := parentView.NewView() + require.NoError(t, err) + err = childView1.Insert(context.Background(), []byte{1}, []byte{1}) + require.NoError(t, err) + + childView2, err := childView1.NewView() + require.NoError(t, err) + err = childView2.Insert(context.Background(), []byte{2}, []byte{2}) + require.NoError(t, err) + + var wg sync.WaitGroup + wg.Add(3) + go func() { + defer wg.Done() + require.NoError(t, parentView.CommitToParent(context.Background())) + }() + go func() { + defer wg.Done() + require.NoError(t, childView1.CommitToParent(context.Background())) + }() + go func() { + defer wg.Done() + require.NoError(t, childView2.CommitToParent(context.Background())) + }() + + wg.Wait() + + val0, err := dbTrie.GetValue(context.Background(), []byte{0}) + require.NoError(t, err) + require.Equal(t, []byte{0}, val0) + + val1, err := dbTrie.GetValue(context.Background(), []byte{1}) + require.NoError(t, err) + require.Equal(t, []byte{1}, val1) + + val2, err := dbTrie.GetValue(context.Background(), []byte{2}) + require.NoError(t, err) + require.Equal(t, []byte{2}, val2) + } +} + func Test_Trie_ConcurrentReadWrite(t *testing.T) { require := require.New(t) diff --git a/x/merkledb/trieview.go b/x/merkledb/trieview.go index 4f95711e18c3..a1fc676d2b9a 100644 --- a/x/merkledb/trieview.go +++ b/x/merkledb/trieview.go @@ -27,7 +27,7 @@ const defaultPreallocationSize = 100 var ( ErrCommitted = errors.New("view has been committed") - ErrInvalid = errors.New("the trie this view was based on has changed, rending this view invalid") + ErrInvalid = errors.New("the trie this view was based on has changed, rendering this view invalid") ErrOddLengthWithValue = errors.New( "the underlying db only supports whole number of byte keys, so cannot record changes with odd nibble length", ) @@ -222,10 +222,8 @@ func (t *trieView) calculateNodeIDs(ctx context.Context) error { // ensure that the view under this one is up-to-date before potentially pulling in nodes from it // getting the Merkle root forces any unupdated nodes to recalculate their ids - if t.parentTrie != nil { - if _, err := t.getParentTrie().GetMerkleRoot(ctx); err != nil { - return err - } + if _, err := t.getParentTrie().GetMerkleRoot(ctx); err != nil { + return err } if err := t.applyChangedValuesToTrie(ctx); err != nil { @@ -567,11 +565,10 @@ func (t *trieView) commitChanges(ctx context.Context, trieToCommit *trieView) er // CommitToParent commits the changes from this view to its parent Trie func (t *trieView) CommitToParent(ctx context.Context) error { - // if we are about to write to the db, then we to hold the commitLock - if t.getParentTrie() == t.db { - t.db.commitLock.Lock() - defer t.db.commitLock.Unlock() - } + // TODO: Only lock the commitlock when the parent is the DB + // TODO: fix concurrency bugs with CommitToParent + t.db.commitLock.Lock() + defer t.db.commitLock.Unlock() t.lock.Lock() defer t.lock.Unlock() @@ -597,7 +594,7 @@ func (t *trieView) commitToParent(ctx context.Context) error { return err } - // overwrite this view with changes from the incoming view + // write this view's changes into its parent if err := t.getParentTrie().commitChanges(ctx, t); err != nil { return err } From f8eb5c34db74a07fe367b376211fd3ec9900336b Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Tue, 11 Apr 2023 16:46:37 -0400 Subject: [PATCH 022/132] Cleanup goroutines on health.Stop (#1325) --- api/health/health.go | 5 +++++ api/health/worker.go | 8 +++++++- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/api/health/health.go b/api/health/health.go index dc9e09235127..874944e77269 100644 --- a/api/health/health.go +++ b/api/health/health.go @@ -28,7 +28,12 @@ type Health interface { Registerer Reporter + // Start running periodic health checks at the specified frequency. + // Repeated calls to Start will be no-ops. Start(ctx context.Context, freq time.Duration) + + // Stop running periodic health checks. Stop should only be called after + // Start. Once Stop returns, no more health checks will be executed. Stop() } diff --git a/api/health/worker.go b/api/health/worker.go index cf359a8ee577..9db01b8296ff 100644 --- a/api/health/worker.go +++ b/api/health/worker.go @@ -31,6 +31,7 @@ type worker struct { startOnce sync.Once closeOnce sync.Once + wg sync.WaitGroup closer chan struct{} } @@ -126,9 +127,13 @@ func (w *worker) Results(tags ...string) (map[string]Result, bool) { func (w *worker) Start(ctx context.Context, freq time.Duration) { w.startOnce.Do(func() { detachedCtx := utils.Detach(ctx) + w.wg.Add(1) go func() { ticker := time.NewTicker(freq) - defer ticker.Stop() + defer func() { + ticker.Stop() + w.wg.Done() + }() w.runChecks(detachedCtx) for { @@ -146,6 +151,7 @@ func (w *worker) Start(ctx context.Context, freq time.Duration) { func (w *worker) Stop() { w.closeOnce.Do(func() { close(w.closer) + w.wg.Wait() }) } From 8441a3a1c8d08827d0cc068ee47f33cd76549a16 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Tue, 11 Apr 2023 19:09:28 -0400 Subject: [PATCH 023/132] Update versions for v1.10.0 (#1282) --- .github/workflows/test.upgrade.yml | 7 ++-- RELEASES.md | 60 ++++++++++++++++++++++++++++++ go.mod | 2 +- go.sum | 4 +- proto/README.md | 2 +- scripts/constants.sh | 2 +- version/compatibility.json | 3 ++ version/constants.go | 17 ++++----- 8 files changed, 80 insertions(+), 17 deletions(-) diff --git a/.github/workflows/test.upgrade.yml b/.github/workflows/test.upgrade.yml index f9a204a328c1..a6c4a812ac86 100644 --- a/.github/workflows/test.upgrade.yml +++ b/.github/workflows/test.upgrade.yml @@ -26,6 +26,7 @@ jobs: - name: Build the avalanchego binary shell: bash run: ./scripts/build.sh - - name: Run upgrade tests - shell: bash - run: scripts/tests.upgrade.sh 1.9.0 ./build/avalanchego + # TODO: re-activate this test after there is a compatible tag to use + # - name: Run upgrade tests + # shell: bash + # run: scripts/tests.upgrade.sh 1.9.0 ./build/avalanchego diff --git a/RELEASES.md b/RELEASES.md index 7eac8f58e289..1a3d929ea8d4 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -1,5 +1,65 @@ # Release Notes +## [v1.10.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.0) + +[This upgrade](https://medium.com/avalancheavax/cortina-x-chain-linearization-a1d9305553f6) linearizes the X-chain, introduces delegation batching to the P-chain, and increases the maximum block size on the C-chain. + +The changes in the upgrade go into effect at 11 AM ET, April 25th 2023 on Mainnet. + +**All Mainnet nodes should upgrade before 11 AM ET, April 25th 2023.** + +The supported plugin version is `25`. + +### What's Changed + +- Add CODEOWNERS for the x/ package by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1260 +- Feature Spec Template by @richardpringle in https://github.com/ava-labs/avalanchego/pull/1258 +- Standardize CI triggers by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1265 +- special case no sent/received message in network health check by @ceyonur in https://github.com/ava-labs/avalanchego/pull/1263 +- Fix bug template by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1268 +- Replace `flags` usage with `pflags` by @danlaine in https://github.com/ava-labs/avalanchego/pull/1270 +- Fixed grammatical errors in `README.md` by @krakxn in https://github.com/ava-labs/avalanchego/pull/1102 +- Add tests for race conditions in merkledb by @kyl27 in https://github.com/ava-labs/avalanchego/pull/1256 +- Add P-chain indexer API example by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1271 +- use `require` in `snow/choices` tests by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1279 +- use `require` in `utils/wrappers` tests by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1280 +- add support for tracking delegatee rewards to validator metadata by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1273 +- defer delegatee rewards until end of validator staking period by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1262 +- Initialize UptimeCalculator in TestPeer by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/1283 +- Add Avalanche liveness health checks by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1287 +- Skip AMI generation with Fuji tags by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1288 +- Use `maps.Equal` in `set.Equals` by @danlaine in https://github.com/ava-labs/avalanchego/pull/1290 +- return accrued delegator rewards in `GetCurrentValidators` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1291 +- Add zstd compression by @danlaine in https://github.com/ava-labs/avalanchego/pull/1278 +- implement `txs.Visitor` in X chain wallet by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1299 +- Parallelize gzip compression by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1293 +- Add zip bomb tests by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1300 +- Gossip Avalanche frontier after the linearization by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1303 +- Add fine grained metrics+logging for handling, processing, and grab l… by @aaronbuchwald in https://github.com/ava-labs/avalanchego/pull/1301 +- Persist stateless block in AVM state by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1305 +- Initialize FxID fields in GetBlock and GetBlockByHeight by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1306 +- Filterable Health Tags by @ceyonur in https://github.com/ava-labs/avalanchego/pull/1304 +- increase health await timeout by @ceyonur in https://github.com/ava-labs/avalanchego/pull/1317 +- Expose GetEngineManager from the chain Handler by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1316 +- Add BLS benchmarks by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1318 +- Encode codec version in merkledb by @danlaine in https://github.com/ava-labs/avalanchego/pull/1313 +- Expose consensus-app-concurrency by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1322 +- Adjust Logic In Merkle DB History by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/1310 +- Fix Concurrency Bug In CommitToParent by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/1320 +- Cleanup goroutines on health.Stop by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1325 + +### New Contributors + +- @richardpringle made their first contribution in https://github.com/ava-labs/avalanchego/pull/1258 +- @ceyonur made their first contribution in https://github.com/ava-labs/avalanchego/pull/1263 +- @krakxn made their first contribution in https://github.com/ava-labs/avalanchego/pull/1102 +- @kyl27 made their first contribution in https://github.com/ava-labs/avalanchego/pull/1256 +- @dhrubabasu made their first contribution in https://github.com/ava-labs/avalanchego/pull/1279 +- @joshua-kim made their first contribution in https://github.com/ava-labs/avalanchego/pull/1283 +- @dboehm-avalabs made their first contribution in https://github.com/ava-labs/avalanchego/pull/1310 + +**Full Changelog**: https://github.com/ava-labs/avalanchego/compare/v1.9.16...v1.10.0 + ## [v1.9.16](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.16) This version is backwards compatible to [v1.9.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.0). It is optional, but encouraged. The supported plugin version is `24`. diff --git a/go.mod b/go.mod index a23357836583..788feb121cec 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/Microsoft/go-winio v0.5.2 github.com/NYTimes/gziphandler v1.1.1 github.com/ava-labs/avalanche-network-runner-sdk v0.3.0 - github.com/ava-labs/coreth v0.11.9-rc.0 + github.com/ava-labs/coreth v0.12.0-rc.2 github.com/ava-labs/ledger-avalanche/go v0.0.0-20230105152938-00a24d05a8c7 github.com/btcsuite/btcd/btcutil v1.1.3 github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 diff --git a/go.sum b/go.sum index 8bcad7be1771..76e1e3057e9f 100644 --- a/go.sum +++ b/go.sum @@ -59,8 +59,8 @@ github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/ava-labs/avalanche-network-runner-sdk v0.3.0 h1:TVi9JEdKNU/RevYZ9PyW4pULbEdS+KQDA9Ki2DUvuAs= github.com/ava-labs/avalanche-network-runner-sdk v0.3.0/go.mod h1:SgKJvtqvgo/Bl/c8fxEHCLaSxEbzimYfBopcfrajxQk= -github.com/ava-labs/coreth v0.11.9-rc.0 h1:7oK5DWtvDEMKEaduw/34r5kdwUssRsFszCl1FFUvvPc= -github.com/ava-labs/coreth v0.11.9-rc.0/go.mod h1:y41I9mWK04s8oObvQeYjkdoidtPhkPqV8prRPN6zrd4= +github.com/ava-labs/coreth v0.12.0-rc.2 h1:UNyGhuC2HxZ8eCLZiZON8xRiJkNHVZ75zknu/xqkKBA= +github.com/ava-labs/coreth v0.12.0-rc.2/go.mod h1:ZGhoIZTWbIaTmzEbprXu0hLtLdoE2PSTEFnCTYr0BRk= github.com/ava-labs/ledger-avalanche/go v0.0.0-20230105152938-00a24d05a8c7 h1:EdxD90j5sClfL5Ngpz2TlnbnkNYdFPDXa0jDOjam65c= github.com/ava-labs/ledger-avalanche/go v0.0.0-20230105152938-00a24d05a8c7/go.mod h1:XhiXSrh90sHUbkERzaxEftCmUz53eCijshDLZ4fByVM= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= diff --git a/proto/README.md b/proto/README.md index 4baa2d531fa8..f42fbf11d17d 100644 --- a/proto/README.md +++ b/proto/README.md @@ -1,6 +1,6 @@ # Avalanche gRPC -Now Serving: **Protocol Version 24** +Now Serving: **Protocol Version 25** Protobuf files are hosted at [https://buf.build/ava-labs/avalanche](https://buf.build/ava-labs/avalanche) and can be used as dependencies in other projects. diff --git a/scripts/constants.sh b/scripts/constants.sh index 6cd4df8d428b..5df8872f7aca 100755 --- a/scripts/constants.sh +++ b/scripts/constants.sh @@ -9,7 +9,7 @@ AVALANCHE_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) # Direct avalanchego_path="$AVALANCHE_PATH/build/avalanchego" plugin_dir=${PLUGIN_DIR:-$HOME/.avalanchego/plugins} evm_path=${EVM_PATH:-$plugin_dir/evm} -coreth_version=${CORETH_VERSION:-'v0.11.9-rc.0'} +coreth_version=${CORETH_VERSION:-'v0.12.0-rc.2'} # Set the PATHS GOPATH="$(go env GOPATH)" diff --git a/version/compatibility.json b/version/compatibility.json index b342400a0d39..e11fdd3524f0 100644 --- a/version/compatibility.json +++ b/version/compatibility.json @@ -1,4 +1,7 @@ { + "25": [ + "v1.10.0" + ], "24": [ "v1.9.10", "v1.9.11", diff --git a/version/constants.go b/version/constants.go index 4e0ee3ba9b64..d491e9b00fdd 100644 --- a/version/constants.go +++ b/version/constants.go @@ -14,14 +14,14 @@ import ( // RPCChainVMProtocol should be bumped anytime changes are made which require // the plugin vm to upgrade to latest avalanchego release to be compatible. -const RPCChainVMProtocol uint = 24 +const RPCChainVMProtocol uint = 25 // These are globals that describe network upgrades and node versions var ( Current = &Semantic{ Major: 1, - Minor: 9, - Patch: 16, + Minor: 10, + Patch: 0, } CurrentApp = &Application{ Major: Current.Major, @@ -30,12 +30,12 @@ var ( } MinimumCompatibleVersion = &Application{ Major: 1, - Minor: 9, + Minor: 10, Patch: 0, } PrevMinimumCompatibleVersion = &Application{ Major: 1, - Minor: 8, + Minor: 9, Patch: 0, } @@ -95,10 +95,9 @@ var ( } BanffDefaultTime = time.Date(2020, time.December, 5, 5, 0, 0, 0, time.UTC) - // TODO: update this before release CortinaTimes = map[uint32]time.Time{ - constants.MainnetID: time.Date(10000, time.December, 1, 0, 0, 0, 0, time.UTC), - constants.FujiID: time.Date(10000, time.December, 1, 0, 0, 0, 0, time.UTC), + constants.MainnetID: time.Date(2023, time.April, 25, 15, 0, 0, 0, time.UTC), + constants.FujiID: time.Date(2023, time.April, 6, 15, 0, 0, 0, time.UTC), } CortinaDefaultTime = time.Date(2020, time.December, 5, 5, 0, 0, 0, time.UTC) ) @@ -177,7 +176,7 @@ func GetCompatibility(networkID uint32) Compatibility { return NewCompatibility( CurrentApp, MinimumCompatibleVersion, - GetBanffTime(networkID), + GetCortinaTime(networkID), PrevMinimumCompatibleVersion, ) } From 69589145baff0c5048d3243e7ad163e995e0bc1a Mon Sep 17 00:00:00 2001 From: Sam Batschelet Date: Wed, 12 Apr 2023 15:13:50 -0400 Subject: [PATCH 024/132] Add benchmark for gRPC GetValidatorSet (#1326) --- .../gvalidators/validator_state_test.go | 50 +++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/snow/validators/gvalidators/validator_state_test.go b/snow/validators/gvalidators/validator_state_test.go index d9a6d4093f5f..2effc82bf0f3 100644 --- a/snow/validators/gvalidators/validator_state_test.go +++ b/snow/validators/gvalidators/validator_state_test.go @@ -6,6 +6,7 @@ package gvalidators import ( "context" "errors" + "fmt" "testing" "github.com/golang/mock/gomock" @@ -181,3 +182,52 @@ func TestGetValidatorSet(t *testing.T) { _, err = state.client.GetValidatorSet(context.Background(), height, subnetID) require.Error(err) } + +// BenchmarkGetValidatorSet measures the time it takes complete a gRPC client +// request based on a mocked validator set. +func BenchmarkGetValidatorSet(b *testing.B) { + for _, size := range []int{1, 16, 32, 1024, 2048} { + vs := setupValidatorSet(b, size) + b.Run(fmt.Sprintf("get_validator_set_%d_validators", size), func(b *testing.B) { + benchmarkGetValidatorSet(b, vs) + }) + } +} + +func benchmarkGetValidatorSet(b *testing.B, vs map[ids.NodeID]*validators.GetValidatorOutput) { + require := require.New(b) + ctrl := gomock.NewController(b) + state := setupState(b, ctrl) + defer func() { + ctrl.Finish() + state.closeFn() + }() + + height := uint64(1337) + subnetID := ids.GenerateTestID() + state.server.EXPECT().GetValidatorSet(gomock.Any(), height, subnetID).Return(vs, nil).AnyTimes() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := state.client.GetValidatorSet(context.Background(), height, subnetID) + require.NoError(err) + } + b.StopTimer() +} + +func setupValidatorSet(b *testing.B, size int) map[ids.NodeID]*validators.GetValidatorOutput { + b.Helper() + + set := make(map[ids.NodeID]*validators.GetValidatorOutput, size) + sk, err := bls.NewSecretKey() + require.NoError(b, err) + pk := bls.PublicFromSecretKey(sk) + for i := 0; i < size; i++ { + id := ids.GenerateTestNodeID() + set[id] = &validators.GetValidatorOutput{ + NodeID: id, + PublicKey: pk, + Weight: uint64(i), + } + } + return set +} From a924b944d781b0be21b3ac2ff26090ed1f693283 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Wed, 12 Apr 2023 15:15:49 -0400 Subject: [PATCH 025/132] Add checks for database being closed in merkledb; other nits (#1333) --- x/merkledb/db.go | 106 +++++++++++++++++++++++++++++------------ x/merkledb/trieview.go | 4 +- 2 files changed, 77 insertions(+), 33 deletions(-) diff --git a/x/merkledb/db.go b/x/merkledb/db.go index 88a63d23cac1..219d90fad1ff 100644 --- a/x/merkledb/db.go +++ b/x/merkledb/db.go @@ -38,7 +38,7 @@ const ( ) var ( - _ Trie = &Database{} + _ TrieView = &Database{} _ database.Database = &Database{} Codec, Version = newCodec() @@ -230,6 +230,10 @@ func (db *Database) CommitChangeProof(ctx context.Context, proof *ChangeProof) e db.commitLock.Lock() defer db.commitLock.Unlock() + if db.closed { + return database.ErrClosed + } + view, err := db.prepareChangeProofView(proof) if err != nil { return err @@ -243,6 +247,10 @@ func (db *Database) CommitRangeProof(ctx context.Context, start []byte, proof *R db.commitLock.Lock() defer db.commitLock.Unlock() + if db.closed { + return database.ErrClosed + } + view, err := db.prepareRangeProofView(start, proof) if err != nil { return err @@ -312,13 +320,14 @@ func (db *Database) GetValues(ctx context.Context, keys [][]byte) ([][]byte, []e )) defer span.End() + // Lock to ensure no commit happens during the reads. db.lock.RLock() defer db.lock.RUnlock() values := make([][]byte, len(keys)) errors := make([]error, len(keys)) for i, key := range keys { - values[i], errors[i] = db.getValueCopy(newPath(key), false) + values[i], errors[i] = db.getValueCopy(newPath(key), false /*lock*/) } return values, errors } @@ -329,7 +338,7 @@ func (db *Database) GetValue(ctx context.Context, key []byte) ([]byte, error) { _, span := db.tracer.Start(ctx, "MerkleDB.GetValue") defer span.End() - return db.getValueCopy(newPath(key), true) + return db.getValueCopy(newPath(key), true /*lock*/) } // getValueCopy returns a copy of the value for the given [key]. @@ -344,6 +353,8 @@ func (db *Database) getValueCopy(key path, lock bool) ([]byte, error) { // getValue returns the value for the given [key]. // Returns database.ErrNotFound if it doesn't exist. +// If [lock], [db.lock]'s read lock is acquired. +// Otherwise assumes [db.lock] is already held. func (db *Database) getValue(key path, lock bool) ([]byte, error) { if lock { db.lock.RLock() @@ -353,6 +364,7 @@ func (db *Database) getValue(key path, lock bool) ([]byte, error) { if db.closed { return nil, database.ErrClosed } + n, err := db.getNode(key) if err != nil { return nil, err @@ -371,6 +383,10 @@ func (db *Database) GetMerkleRoot(ctx context.Context) (ids.ID, error) { db.lock.RLock() defer db.lock.RUnlock() + if db.closed { + return ids.Empty, database.ErrClosed + } + return db.getMerkleRoot(), nil } @@ -391,6 +407,10 @@ func (db *Database) GetProof(ctx context.Context, key []byte) (*Proof, error) { // Returns a proof of the existence/non-existence of [key] in this trie. // Assumes [db.commitLock] is read locked. func (db *Database) getProof(ctx context.Context, key []byte) (*Proof, error) { + if db.closed { + return nil, database.ErrClosed + } + view, err := db.newUntrackedView(defaultPreallocationSize) if err != nil { return nil, err @@ -436,6 +456,9 @@ func (db *Database) getRangeProofAtRoot( end []byte, maxLength int, ) (*RangeProof, error) { + if db.closed { + return nil, database.ErrClosed + } if maxLength <= 0 { return nil, fmt.Errorf("%w but was %d", ErrInvalidMaxLength, maxLength) } @@ -468,6 +491,10 @@ func (db *Database) GetChangeProof( db.commitLock.RLock() defer db.commitLock.RUnlock() + if db.closed { + return nil, database.ErrClosed + } + result := &ChangeProof{ HadRootsInHistory: true, } @@ -560,7 +587,7 @@ func (db *Database) NewView() (TrieView, error) { // Returns a new view that isn't tracked in [db.childViews]. // For internal use only, namely in methods that create short-lived views. -// Assumes [db.lock] is read locked. +// Assumes [db.lock] and/or [db.commitLock] is read locked. func (db *Database) newUntrackedView(estimatedSize int) (*trieView, error) { return newTrieView(db, db, db.root.clone(), estimatedSize) } @@ -573,6 +600,10 @@ func (db *Database) NewPreallocatedView(estimatedSize int) (TrieView, error) { db.lock.Lock() defer db.lock.Unlock() + if db.closed { + return nil, database.ErrClosed + } + newView, err := newTrieView(db, db, db.root.clone(), estimatedSize) if err != nil { return nil, err @@ -589,7 +620,7 @@ func (db *Database) Has(k []byte) (bool, error) { return false, database.ErrClosed } - _, err := db.getValue(newPath(k), true) + _, err := db.getValue(newPath(k), false /*lock*/) if err == database.ErrNotFound { return false, nil } @@ -597,6 +628,12 @@ func (db *Database) Has(k []byte) (bool, error) { } func (db *Database) HealthCheck(ctx context.Context) (interface{}, error) { + db.lock.RLock() + defer db.lock.RUnlock() + + if db.closed { + return nil, database.ErrClosed + } return db.nodeDB.HealthCheck(ctx) } @@ -604,10 +641,11 @@ func (db *Database) Insert(ctx context.Context, k, v []byte) error { db.commitLock.Lock() defer db.commitLock.Unlock() - db.lock.RLock() - view, err := db.newUntrackedView(defaultPreallocationSize) - db.lock.RUnlock() + if db.closed { + return database.ErrClosed + } + view, err := db.newUntrackedView(defaultPreallocationSize) if err != nil { return err } @@ -693,9 +731,11 @@ func (db *Database) Remove(ctx context.Context, key []byte) error { db.commitLock.Lock() defer db.commitLock.Unlock() - db.lock.RLock() + if db.closed { + return database.ErrClosed + } + view, err := db.newUntrackedView(defaultPreallocationSize) - db.lock.RUnlock() if err != nil { return err } @@ -710,6 +750,10 @@ func (db *Database) commitBatch(ops []database.BatchOp) error { db.commitLock.Lock() defer db.commitLock.Unlock() + if db.closed { + return database.ErrClosed + } + view, err := db.prepareBatchView(ops) if err != nil { return err @@ -732,12 +776,15 @@ func (db *Database) commitChanges(ctx context.Context, trieToCommit *trieView) e db.lock.Lock() defer db.lock.Unlock() - if trieToCommit == nil { + switch { + case db.closed: + return database.ErrClosed + case trieToCommit == nil: return nil - } - if trieToCommit.isInvalid() { + case trieToCommit.isInvalid(): return ErrInvalid } + changes := trieToCommit.changes _, span := db.tracer.Start(ctx, "MerkleDB.commitChanges", oteltrace.WithAttributes( attribute.Int("nodesChanged", len(changes.nodes)), @@ -745,10 +792,6 @@ func (db *Database) commitChanges(ctx context.Context, trieToCommit *trieView) e )) defer span.End() - if db.closed { - return database.ErrClosed - } - // invalidate all child views except for the view being committed db.invalidateChildrenExcept(trieToCommit) @@ -839,13 +882,13 @@ func (db *Database) moveChildViewsToDB(trieToCommit *trieView) { trieToCommit.childViews = make([]*trieView, 0, defaultPreallocationSize) } -// CommitToDB is a No Op for db since it is already in sync with itself -// here to satisfy TrieView interface +// CommitToDB is a no-op for db since it is already in sync with itself. +// This exists to satisfy the TrieView interface. func (*Database) CommitToDB(context.Context) error { return nil } -// invalidate and remove any child views that aren't the exception +// Invalidates and removes any child views that aren't [exception]. // Assumes [db.lock] is held. func (db *Database) invalidateChildrenExcept(exception *trieView) { isTrackedView := false @@ -965,6 +1008,9 @@ func (db *Database) getEditableNode(key path) (*node, error) { // Returns database.ErrNotFound if the node doesn't exist. // Assumes [db.lock] is read locked. func (db *Database) getNode(key path) (*node, error) { + if db.closed { + return nil, database.ErrClosed + } if key == RootPath { return db.root, nil } @@ -1012,6 +1058,11 @@ func (db *Database) getKeyValues( db.lock.RLock() defer db.lock.RUnlock() } + + if db.closed { + return nil, database.ErrClosed + } + if maxLength <= 0 { return nil, fmt.Errorf("%w but was %d", ErrInvalidMaxLength, maxLength) } @@ -1044,12 +1095,9 @@ func (db *Database) getKeyValues( } // Returns a new view atop [db] with the changes in [ops] applied to it. -func (db *Database) prepareBatchView( - ops []database.BatchOp, -) (*trieView, error) { - db.lock.RLock() +// Assumes [db.commitLock] is read locked. +func (db *Database) prepareBatchView(ops []database.BatchOp) (*trieView, error) { view, err := db.newUntrackedView(len(ops)) - db.lock.RUnlock() if err != nil { return nil, err } @@ -1071,10 +1119,9 @@ func (db *Database) prepareBatchView( // Returns a new view atop [db] with the key/value pairs in [proof.KeyValues] // inserted and the key/value pairs in [proof.DeletedKeys] removed. +// Assumes [db.commitLock] is locked. func (db *Database) prepareChangeProofView(proof *ChangeProof) (*trieView, error) { - db.lock.RLock() view, err := db.newUntrackedView(len(proof.KeyValues)) - db.lock.RUnlock() if err != nil { return nil, err } @@ -1096,13 +1143,10 @@ func (db *Database) prepareChangeProofView(proof *ChangeProof) (*trieView, error // Returns a new view atop [db] with the key/value pairs in [proof.KeyValues] added and // any existing key-value pairs in the proof's range but not in the proof removed. -// assumes [db.commitLock] is held +// Assumes [db.commitLock] is locked. func (db *Database) prepareRangeProofView(start []byte, proof *RangeProof) (*trieView, error) { // Don't need to lock [view] because nobody else has a reference to it. - db.lock.RLock() view, err := db.newUntrackedView(len(proof.KeyValues)) - db.lock.RUnlock() - if err != nil { return nil, err } diff --git a/x/merkledb/trieview.go b/x/merkledb/trieview.go index a1fc676d2b9a..b94319ab0561 100644 --- a/x/merkledb/trieview.go +++ b/x/merkledb/trieview.go @@ -902,7 +902,7 @@ func (t *trieView) getValue(key path, lock bool) ([]byte, error) { t.db.metrics.ViewValueCacheMiss() // if we don't have local copy of the key, then grab a copy from the parent trie - value, err := t.getParentTrie().getValue(key, true) + value, err := t.getParentTrie().getValue(key, true /*lock*/) if err != nil { return nil, err } @@ -1304,7 +1304,7 @@ func (t *trieView) recordValueChange(key path, value Maybe[[]byte]) error { // grab the before value var beforeMaybe Maybe[[]byte] - before, err := t.getParentTrie().getValue(key, true) + before, err := t.getParentTrie().getValue(key, true /*lock*/) switch err { case nil: beforeMaybe = Some(before) From 7d6fda1b1934182331b51a6718731fad30298c8e Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Thu, 13 Apr 2023 16:22:36 +0200 Subject: [PATCH 026/132] nit --- vms/platformvm/state/models/stakers_ops_test.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/vms/platformvm/state/models/stakers_ops_test.go b/vms/platformvm/state/models/stakers_ops_test.go index 5b7e5fc68e1f..46b85fd2f3fa 100644 --- a/vms/platformvm/state/models/stakers_ops_test.go +++ b/vms/platformvm/state/models/stakers_ops_test.go @@ -326,9 +326,6 @@ func simpleStakerStateProperties(storeCreatorF func() (state.Stakers, error)) *g return "" }, stakerGenerator(currentValidator, &subnetID, &nodeID), - - // TODO ABENEGIA: make sure txIDs are unique in slice. - // They are unlikely to be equal, but still should be fixed. gen.SliceOfN(10, stakerGenerator(currentDelegator, &subnetID, &nodeID)). SuchThat(func(v interface{}) bool { stakersList := v.([]state.Staker) From adc5f408a67d51982e928a9dec28379f7ea4aec2 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Wed, 10 May 2023 22:50:56 +0200 Subject: [PATCH 027/132] docs --- vms/platformvm/state/models/helpers_test.go | 22 +++++++++---------- .../state/models/stakers_generator_test.go | 4 ++++ .../state/models/stakers_ops_test.go | 7 +++--- .../state/models/stakers_storage_model.go | 10 +++++++++ .../models/stakers_storage_model_test.go | 9 ++++++++ 5 files changed, 38 insertions(+), 14 deletions(-) diff --git a/vms/platformvm/state/models/helpers_test.go b/vms/platformvm/state/models/helpers_test.go index 933f700c3dfb..9b890e9ee5b5 100644 --- a/vms/platformvm/state/models/helpers_test.go +++ b/vms/platformvm/state/models/helpers_test.go @@ -4,6 +4,7 @@ package models import ( + "errors" "fmt" "time" @@ -31,9 +32,10 @@ import ( var ( _ state.Versions = (*versionsHolder)(nil) - xChainID = ids.Empty.Prefix(0) - cChainID = ids.Empty.Prefix(1) - avaxAssetID = ids.ID{'y', 'e', 'e', 't'} + testNetworkID = uint32(10) // To be used in tests + xChainID = ids.Empty.Prefix(0) + cChainID = ids.Empty.Prefix(1) + avaxAssetID = ids.ID{'y', 'e', 'e', 't'} defaultMinStakingDuration = 24 * time.Hour defaultMaxStakingDuration = 365 * 24 * time.Hour @@ -42,7 +44,7 @@ var ( defaultValidateEndTime = defaultValidateStartTime.Add(10 * defaultMinStakingDuration) defaultTxFee = uint64(100) - testNetworkID = 10 // To be used in tests + errNonEmptyIteratorExpected = errors.New("expected non-empty iterator, got no elements") ) type versionsHolder struct { @@ -60,7 +62,7 @@ func buildChainState() (state.State, error) { cfg := defaultConfig() ctx := snow.DefaultContextTest() - ctx.NetworkID = 10 + ctx.NetworkID = testNetworkID ctx.XChainID = xChainID ctx.CChainID = cChainID ctx.AVAXAssetID = avaxAssetID @@ -107,19 +109,17 @@ func defaultConfig() *config.Config { }, ApricotPhase3Time: defaultValidateEndTime, ApricotPhase5Time: defaultValidateEndTime, - BanffTime: time.Time{}, // neglecting fork ordering this for package tests + BanffTime: defaultValidateEndTime, + CortinaTime: defaultValidateEndTime, } } func buildGenesisTest(ctx *snow.Context) ([]byte, error) { - // no UTXOs, not nor validators in this genesis - genesisUTXOs := make([]api.UTXO, 0) - genesisValidators := make([]api.PermissionlessValidator, 0) buildGenesisArgs := api.BuildGenesisArgs{ NetworkID: json.Uint32(testNetworkID), AvaxAssetID: ctx.AVAXAssetID, - UTXOs: genesisUTXOs, - Validators: genesisValidators, + UTXOs: nil, // no UTXOs in this genesis. Not relevant to package tests. + Validators: nil, // no validators in this genesis. Tests will handle them. Chains: nil, Time: json.Uint64(defaultGenesisTime.Unix()), InitialSupply: json.Uint64(360 * units.MegaAvax), diff --git a/vms/platformvm/state/models/stakers_generator_test.go b/vms/platformvm/state/models/stakers_generator_test.go index 306433dca829..ca4da550c7ec 100644 --- a/vms/platformvm/state/models/stakers_generator_test.go +++ b/vms/platformvm/state/models/stakers_generator_test.go @@ -20,6 +20,10 @@ import ( "github.com/leanovate/gopter/prop" ) +// stakerGenerator helps creating random yet reproducible state.Staker objects, which +// can be used in our property tests. +// stakerGenerator takes care of enforcing some state.Staker invariants on each and every random sample. +// TestGeneratedStakersValidity documents and verifies the enforced invariants. func stakerGenerator(prio priorityType, subnet *ids.ID, nodeID *ids.NodeID) gopter.Gen { return genStakerTimeData(prio).FlatMap( func(v interface{}) gopter.Gen { diff --git a/vms/platformvm/state/models/stakers_ops_test.go b/vms/platformvm/state/models/stakers_ops_test.go index 46b85fd2f3fa..df4303ffd995 100644 --- a/vms/platformvm/state/models/stakers_ops_test.go +++ b/vms/platformvm/state/models/stakers_ops_test.go @@ -4,7 +4,6 @@ package models import ( - "errors" "fmt" "reflect" "testing" @@ -18,8 +17,10 @@ import ( "github.com/leanovate/gopter/prop" ) -var errNonEmptyIteratorExpected = errors.New("expected non-empty iterator, got no elements") - +// TestSimpleStakersOperations checks that state.State and state.Diff conform our stakersStorageModel. +// TestSimpleStakersOperations tests state.State and state.Diff in isolation, over simple operations. +// TestStateAndDiffComparisonToStorageModel carries a more involved verification over a production-like +// mix of state.State and state.Diffs. func TestSimpleStakersOperations(t *testing.T) { storeCreators := map[string]func() (state.Stakers, error){ "base state": func() (state.Stakers, error) { diff --git a/vms/platformvm/state/models/stakers_storage_model.go b/vms/platformvm/state/models/stakers_storage_model.go index 15b333267f73..4f4156dcfeae 100644 --- a/vms/platformvm/state/models/stakers_storage_model.go +++ b/vms/platformvm/state/models/stakers_storage_model.go @@ -19,6 +19,16 @@ var ( _ state.StakerIterator = (*stakersStorageIteratorModel)(nil) ) +// stakersStorageModel is the executable reference model of how we expect +// P-chain state and diffs to behave with respect to stakers. +// stakersStorageModel abstracts away the complexity related to +// P-chain state persistence and to the state.Diff flushing mechanisms. +// stakersStorageModel represents how we expect state.Diff and state.State to behave +// in a single threaded environment when stakers are written to or read from them. +// The utility of stakersStorageModel as an executable reference model is that +// we can write automatic tests asserting that state.Diff and state.State conform +// to stakersStorageModel. + type subnetNodeKey struct { subnetID ids.ID nodeID ids.NodeID diff --git a/vms/platformvm/state/models/stakers_storage_model_test.go b/vms/platformvm/state/models/stakers_storage_model_test.go index 3573e0ac582d..9f1e3dcb07a8 100644 --- a/vms/platformvm/state/models/stakers_storage_model_test.go +++ b/vms/platformvm/state/models/stakers_storage_model_test.go @@ -26,6 +26,15 @@ var ( _ commands.Command = (*commitBottomStateCommand)(nil) ) +// TestStateAndDiffComparisonToStorageModel verifies that a production-like +// system made of a stack of state.Diffs built on top of a state.State conforms to +// our stakersStorageModel. It achieves this by: +// 1. randomly generating a sequence of stakers writes as well as +// some persistence operations (commit/diff apply), +// 2. applying the sequence to both our stakersStorageModel and the production-like system. +// 3. checking that both stakersStorageModel and the production-like system have +// the same state after each operation. + func TestStateAndDiffComparisonToStorageModel(t *testing.T) { properties := gopter.NewProperties(nil) From ab56c7f76b7eab3e28a58766b2e8785b29187cf5 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Thu, 11 May 2023 18:19:46 +0200 Subject: [PATCH 028/132] nit --- ...elpers_test.go => stakers_helpers_test.go} | 13 +-- ...tor_test.go => stakers_model_generator.go} | 109 +++--------------- .../state/stakers_model_generator_test.go | 83 +++++++++++++ ...rage_model.go => stakers_model_storage.go} | 85 +++++++------- ..._test.go => stakers_model_storage_test.go} | 37 +++--- .../state/{models => }/stakers_ops_test.go | 33 +++--- 6 files changed, 184 insertions(+), 176 deletions(-) rename vms/platformvm/state/{models/helpers_test.go => stakers_helpers_test.go} (94%) rename vms/platformvm/state/{models/stakers_generator_test.go => stakers_model_generator.go} (65%) create mode 100644 vms/platformvm/state/stakers_model_generator_test.go rename vms/platformvm/state/{models/stakers_storage_model.go => stakers_model_storage.go} (61%) rename vms/platformvm/state/{models/stakers_storage_model_test.go => stakers_model_storage_test.go} (93%) rename vms/platformvm/state/{models => }/stakers_ops_test.go (93%) diff --git a/vms/platformvm/state/models/helpers_test.go b/vms/platformvm/state/stakers_helpers_test.go similarity index 94% rename from vms/platformvm/state/models/helpers_test.go rename to vms/platformvm/state/stakers_helpers_test.go index 9b890e9ee5b5..2cc160965d46 100644 --- a/vms/platformvm/state/models/helpers_test.go +++ b/vms/platformvm/state/stakers_helpers_test.go @@ -1,7 +1,7 @@ // Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package models +package state import ( "errors" @@ -25,12 +25,11 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/metrics" "github.com/ava-labs/avalanchego/vms/platformvm/reward" - "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/prometheus/client_golang/prometheus" ) var ( - _ state.Versions = (*versionsHolder)(nil) + _ Versions = (*versionsHolder)(nil) testNetworkID = uint32(10) // To be used in tests xChainID = ids.Empty.Prefix(0) @@ -48,14 +47,14 @@ var ( ) type versionsHolder struct { - baseState state.State + baseState State } -func (h *versionsHolder) GetState(blkID ids.ID) (state.Chain, bool) { +func (h *versionsHolder) GetState(blkID ids.ID) (Chain, bool) { return h.baseState, blkID == h.baseState.GetLastAccepted() } -func buildChainState() (state.State, error) { +func buildChainState() (State, error) { baseDBManager := manager.NewMemDB(version.Semantic1_0_0) baseDB := versiondb.New(baseDBManager.Current().Database) @@ -73,7 +72,7 @@ func buildChainState() (state.State, error) { } rewardsCalc := reward.NewCalculator(cfg.RewardConfig) - return state.New( + return New( baseDB, genesisBytes, prometheus.NewRegistry(), diff --git a/vms/platformvm/state/models/stakers_generator_test.go b/vms/platformvm/state/stakers_model_generator.go similarity index 65% rename from vms/platformvm/state/models/stakers_generator_test.go rename to vms/platformvm/state/stakers_model_generator.go index ca4da550c7ec..f20810db37e1 100644 --- a/vms/platformvm/state/models/stakers_generator_test.go +++ b/vms/platformvm/state/stakers_model_generator.go @@ -1,30 +1,36 @@ // Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package models +package state import ( - "fmt" "reflect" - "testing" "time" blst "github.com/supranational/blst/bindings/go" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/crypto/bls" - "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/leanovate/gopter" "github.com/leanovate/gopter/gen" - "github.com/leanovate/gopter/prop" ) -// stakerGenerator helps creating random yet reproducible state.Staker objects, which -// can be used in our property tests. -// stakerGenerator takes care of enforcing some state.Staker invariants on each and every random sample. +type generatorPriorityType uint8 + +const ( + anyPriority generatorPriorityType = iota + currentValidator + currentDelegator + pendingValidator + pendingDelegator +) + +// stakerGenerator helps creating random yet reproducible Staker objects, +// which can be used in our property tests. stakerGenerator takes care of +// enforcing some Staker invariants on each and every random sample. // TestGeneratedStakersValidity documents and verifies the enforced invariants. -func stakerGenerator(prio priorityType, subnet *ids.ID, nodeID *ids.NodeID) gopter.Gen { +func stakerGenerator(prio generatorPriorityType, subnet *ids.ID, nodeID *ids.NodeID) gopter.Gen { return genStakerTimeData(prio).FlatMap( func(v interface{}) gopter.Gen { macro := v.(stakerTimeData) @@ -38,7 +44,7 @@ func stakerGenerator(prio priorityType, subnet *ids.ID, nodeID *ids.NodeID) gopt genStakerNodeID = gen.Const(*nodeID) } - return gen.Struct(reflect.TypeOf(state.Staker{}), map[string]gopter.Gen{ + return gen.Struct(reflect.TypeOf(Staker{}), map[string]gopter.Gen{ "TxID": genID, "NodeID": genStakerNodeID, "PublicKey": genBlsKey, @@ -55,73 +61,6 @@ func stakerGenerator(prio priorityType, subnet *ids.ID, nodeID *ids.NodeID) gopt ) } -func TestGeneratedStakersValidity(t *testing.T) { - properties := gopter.NewProperties(nil) - - properties.Property("EndTime never before StartTime", prop.ForAll( - func(s state.Staker) string { - if s.EndTime.Before(s.StartTime) { - return fmt.Sprintf("startTime %v not before endTime %v, staker %v", - s.StartTime, s.EndTime, s) - } - return "" - }, - stakerGenerator(anyPriority, nil, nil), - )) - - properties.Property("NextTime coherent with priority", prop.ForAll( - func(s state.Staker) string { - switch p := s.Priority; p { - case txs.PrimaryNetworkDelegatorApricotPendingPriority, - txs.PrimaryNetworkDelegatorBanffPendingPriority, - txs.SubnetPermissionlessDelegatorPendingPriority, - txs.PrimaryNetworkValidatorPendingPriority, - txs.SubnetPermissionlessValidatorPendingPriority, - txs.SubnetPermissionedValidatorPendingPriority: - if !s.NextTime.Equal(s.StartTime) { - return fmt.Sprintf("pending staker has nextTime %v different from startTime %v, staker %v", - s.NextTime, s.StartTime, s) - } - return "" - - case txs.PrimaryNetworkDelegatorCurrentPriority, - txs.SubnetPermissionlessDelegatorCurrentPriority, - txs.PrimaryNetworkValidatorCurrentPriority, - txs.SubnetPermissionlessValidatorCurrentPriority, - txs.SubnetPermissionedValidatorCurrentPriority: - if !s.NextTime.Equal(s.EndTime) { - return fmt.Sprintf("current staker has nextTime %v different from endTime %v, staker %v", - s.NextTime, s.EndTime, s) - } - return "" - - default: - return fmt.Sprintf("priority %v unhandled in test", p) - } - }, - stakerGenerator(anyPriority, nil, nil), - )) - - subnetID := ids.GenerateTestID() - nodeID := ids.GenerateTestNodeID() - properties.Property("subnetID and nodeID set as specified", prop.ForAll( - func(s state.Staker) string { - if s.SubnetID != subnetID { - return fmt.Sprintf("unexpected subnetID, expected %v, got %v", - subnetID, s.SubnetID) - } - if s.NodeID != nodeID { - return fmt.Sprintf("unexpected nodeID, expected %v, got %v", - nodeID, s.NodeID) - } - return "" - }, - stakerGenerator(anyPriority, &subnetID, &nodeID), - )) - - properties.TestingRun(t) -} - // stakerTimeData holds Staker's time related data in order to generate them // while fullfilling the following constrains: // 1. EndTime >= StartTime @@ -134,7 +73,7 @@ type stakerTimeData struct { NextTime time.Time } -func genStakerTimeData(prio priorityType) gopter.Gen { +func genStakerTimeData(prio generatorPriorityType) gopter.Gen { return genStakerMicroData(prio).FlatMap( func(v interface{}) gopter.Gen { micro := v.(stakerMicroData) @@ -178,7 +117,7 @@ type stakerMicroData struct { } // genStakerMicroData is the helper to generate stakerMicroData -func genStakerMicroData(prio priorityType) gopter.Gen { +func genStakerMicroData(prio generatorPriorityType) gopter.Gen { return gen.Struct(reflect.TypeOf(&stakerMicroData{}), map[string]gopter.Gen{ "StartTime": gen.Time(), "Duration": gen.Int64Range(1, 365*24), @@ -186,17 +125,7 @@ func genStakerMicroData(prio priorityType) gopter.Gen { }) } -type priorityType uint8 - -const ( - anyPriority priorityType = iota - currentValidator - currentDelegator - pendingValidator - pendingDelegator -) - -func genPriority(p priorityType) gopter.Gen { +func genPriority(p generatorPriorityType) gopter.Gen { switch p { case anyPriority: return gen.OneConstOf( diff --git a/vms/platformvm/state/stakers_model_generator_test.go b/vms/platformvm/state/stakers_model_generator_test.go new file mode 100644 index 000000000000..4ed0094f2d20 --- /dev/null +++ b/vms/platformvm/state/stakers_model_generator_test.go @@ -0,0 +1,83 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package state + +import ( + "fmt" + "testing" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/leanovate/gopter" + "github.com/leanovate/gopter/prop" +) + +// TestGeneratedStakersValidity documents and verifies the +// invariants enforced by the staker generator +func TestGeneratedStakersValidity(t *testing.T) { + properties := gopter.NewProperties(nil) + + properties.Property("EndTime never before StartTime", prop.ForAll( + func(s Staker) string { + if s.EndTime.Before(s.StartTime) { + return fmt.Sprintf("startTime %v not before endTime %v, staker %v", + s.StartTime, s.EndTime, s) + } + return "" + }, + stakerGenerator(anyPriority, nil, nil), + )) + + properties.Property("NextTime coherent with priority", prop.ForAll( + func(s Staker) string { + switch p := s.Priority; p { + case txs.PrimaryNetworkDelegatorApricotPendingPriority, + txs.PrimaryNetworkDelegatorBanffPendingPriority, + txs.SubnetPermissionlessDelegatorPendingPriority, + txs.PrimaryNetworkValidatorPendingPriority, + txs.SubnetPermissionlessValidatorPendingPriority, + txs.SubnetPermissionedValidatorPendingPriority: + if !s.NextTime.Equal(s.StartTime) { + return fmt.Sprintf("pending staker has nextTime %v different from startTime %v, staker %v", + s.NextTime, s.StartTime, s) + } + return "" + + case txs.PrimaryNetworkDelegatorCurrentPriority, + txs.SubnetPermissionlessDelegatorCurrentPriority, + txs.PrimaryNetworkValidatorCurrentPriority, + txs.SubnetPermissionlessValidatorCurrentPriority, + txs.SubnetPermissionedValidatorCurrentPriority: + if !s.NextTime.Equal(s.EndTime) { + return fmt.Sprintf("current staker has nextTime %v different from endTime %v, staker %v", + s.NextTime, s.EndTime, s) + } + return "" + + default: + return fmt.Sprintf("priority %v unhandled in test", p) + } + }, + stakerGenerator(anyPriority, nil, nil), + )) + + subnetID := ids.GenerateTestID() + nodeID := ids.GenerateTestNodeID() + properties.Property("subnetID and nodeID set as specified", prop.ForAll( + func(s Staker) string { + if s.SubnetID != subnetID { + return fmt.Sprintf("unexpected subnetID, expected %v, got %v", + subnetID, s.SubnetID) + } + if s.NodeID != nodeID { + return fmt.Sprintf("unexpected nodeID, expected %v, got %v", + nodeID, s.NodeID) + } + return "" + }, + stakerGenerator(anyPriority, &subnetID, &nodeID), + )) + + properties.TestingRun(t) +} diff --git a/vms/platformvm/state/models/stakers_storage_model.go b/vms/platformvm/state/stakers_model_storage.go similarity index 61% rename from vms/platformvm/state/models/stakers_storage_model.go rename to vms/platformvm/state/stakers_model_storage.go index 4f4156dcfeae..c83a00634b3a 100644 --- a/vms/platformvm/state/models/stakers_storage_model.go +++ b/vms/platformvm/state/stakers_model_storage.go @@ -1,7 +1,7 @@ // Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package models +package state import ( "errors" @@ -11,22 +11,21 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/vms/platformvm/state" ) var ( - _ state.Stakers = (*stakersStorageModel)(nil) - _ state.StakerIterator = (*stakersStorageIteratorModel)(nil) + _ Stakers = (*stakersStorageModel)(nil) + _ StakerIterator = (*stakersStorageIteratorModel)(nil) ) // stakersStorageModel is the executable reference model of how we expect // P-chain state and diffs to behave with respect to stakers. // stakersStorageModel abstracts away the complexity related to -// P-chain state persistence and to the state.Diff flushing mechanisms. -// stakersStorageModel represents how we expect state.Diff and state.State to behave +// P-chain state persistence and to the Diff flushing mechanisms. +// stakersStorageModel represents how we expect Diff and State to behave // in a single threaded environment when stakers are written to or read from them. // The utility of stakersStorageModel as an executable reference model is that -// we can write automatic tests asserting that state.Diff and state.State conform +// we can write automatic tests asserting that Diff and State conform // to stakersStorageModel. type subnetNodeKey struct { @@ -35,31 +34,31 @@ type subnetNodeKey struct { } type stakersStorageModel struct { - currentValidators map[subnetNodeKey]*state.Staker - currentDelegators map[subnetNodeKey](map[ids.ID]*state.Staker) // -> (txID -> Staker) + currentValidators map[subnetNodeKey]*Staker + currentDelegators map[subnetNodeKey](map[ids.ID]*Staker) // -> (txID -> Staker) - pendingValidators map[subnetNodeKey]*state.Staker - pendingDelegators map[subnetNodeKey](map[ids.ID]*state.Staker) // -> (txID -> Staker) + pendingValidators map[subnetNodeKey]*Staker + pendingDelegators map[subnetNodeKey](map[ids.ID]*Staker) // -> (txID -> Staker) } func newStakersStorageModel() *stakersStorageModel { return &stakersStorageModel{ - currentValidators: make(map[subnetNodeKey]*state.Staker), - currentDelegators: make(map[subnetNodeKey]map[ids.ID]*state.Staker), - pendingValidators: make(map[subnetNodeKey]*state.Staker), - pendingDelegators: make(map[subnetNodeKey]map[ids.ID]*state.Staker), + currentValidators: make(map[subnetNodeKey]*Staker), + currentDelegators: make(map[subnetNodeKey]map[ids.ID]*Staker), + pendingValidators: make(map[subnetNodeKey]*Staker), + pendingDelegators: make(map[subnetNodeKey]map[ids.ID]*Staker), } } -func (m *stakersStorageModel) GetCurrentValidator(subnetID ids.ID, nodeID ids.NodeID) (*state.Staker, error) { +func (m *stakersStorageModel) GetCurrentValidator(subnetID ids.ID, nodeID ids.NodeID) (*Staker, error) { return getValidator(subnetID, nodeID, m.currentValidators) } -func (m *stakersStorageModel) GetPendingValidator(subnetID ids.ID, nodeID ids.NodeID) (*state.Staker, error) { +func (m *stakersStorageModel) GetPendingValidator(subnetID ids.ID, nodeID ids.NodeID) (*Staker, error) { return getValidator(subnetID, nodeID, m.pendingValidators) } -func getValidator(subnetID ids.ID, nodeID ids.NodeID, domain map[subnetNodeKey]*state.Staker) (*state.Staker, error) { +func getValidator(subnetID ids.ID, nodeID ids.NodeID, domain map[subnetNodeKey]*Staker) (*Staker, error) { key := subnetNodeKey{ subnetID: subnetID, nodeID: nodeID, @@ -71,15 +70,15 @@ func getValidator(subnetID ids.ID, nodeID ids.NodeID, domain map[subnetNodeKey]* return res, nil } -func (m *stakersStorageModel) PutCurrentValidator(staker *state.Staker) { +func (m *stakersStorageModel) PutCurrentValidator(staker *Staker) { putValidator(staker, m.currentValidators) } -func (m *stakersStorageModel) PutPendingValidator(staker *state.Staker) { +func (m *stakersStorageModel) PutPendingValidator(staker *Staker) { putValidator(staker, m.pendingValidators) } -func putValidator(staker *state.Staker, domain map[subnetNodeKey]*state.Staker) { +func putValidator(staker *Staker, domain map[subnetNodeKey]*Staker) { key := subnetNodeKey{ subnetID: staker.SubnetID, nodeID: staker.NodeID, @@ -91,15 +90,15 @@ func putValidator(staker *state.Staker, domain map[subnetNodeKey]*state.Staker) domain[key] = staker } -func (m *stakersStorageModel) DeleteCurrentValidator(staker *state.Staker) { +func (m *stakersStorageModel) DeleteCurrentValidator(staker *Staker) { deleteValidator(staker, m.currentValidators) } -func (m *stakersStorageModel) DeletePendingValidator(staker *state.Staker) { +func (m *stakersStorageModel) DeletePendingValidator(staker *Staker) { deleteValidator(staker, m.pendingValidators) } -func deleteValidator(staker *state.Staker, domain map[subnetNodeKey]*state.Staker) { +func deleteValidator(staker *Staker, domain map[subnetNodeKey]*Staker) { key := subnetNodeKey{ subnetID: staker.SubnetID, nodeID: staker.NodeID, @@ -107,22 +106,22 @@ func deleteValidator(staker *state.Staker, domain map[subnetNodeKey]*state.Stake delete(domain, key) } -func (m *stakersStorageModel) GetCurrentDelegatorIterator(subnetID ids.ID, nodeID ids.NodeID) (state.StakerIterator, error) { +func (m *stakersStorageModel) GetCurrentDelegatorIterator(subnetID ids.ID, nodeID ids.NodeID) (StakerIterator, error) { return getDelegatorIterator(subnetID, nodeID, m.currentDelegators), nil } -func (m *stakersStorageModel) GetPendingDelegatorIterator(subnetID ids.ID, nodeID ids.NodeID) (state.StakerIterator, error) { +func (m *stakersStorageModel) GetPendingDelegatorIterator(subnetID ids.ID, nodeID ids.NodeID) (StakerIterator, error) { return getDelegatorIterator(subnetID, nodeID, m.pendingDelegators), nil } -func getDelegatorIterator(subnetID ids.ID, nodeID ids.NodeID, domain map[subnetNodeKey](map[ids.ID]*state.Staker)) state.StakerIterator { +func getDelegatorIterator(subnetID ids.ID, nodeID ids.NodeID, domain map[subnetNodeKey](map[ids.ID]*Staker)) StakerIterator { key := subnetNodeKey{ subnetID: subnetID, nodeID: nodeID, } dels, found := domain[key] if !found { - return state.EmptyIterator + return EmptyIterator } sortedDels := maps.Values(dels) @@ -133,15 +132,15 @@ func getDelegatorIterator(subnetID ids.ID, nodeID ids.NodeID, domain map[subnetN } } -func (m *stakersStorageModel) PutCurrentDelegator(staker *state.Staker) { +func (m *stakersStorageModel) PutCurrentDelegator(staker *Staker) { putDelegator(staker, m.currentDelegators) } -func (m *stakersStorageModel) PutPendingDelegator(staker *state.Staker) { +func (m *stakersStorageModel) PutPendingDelegator(staker *Staker) { putDelegator(staker, m.pendingDelegators) } -func putDelegator(staker *state.Staker, domain map[subnetNodeKey]map[ids.ID]*state.Staker) { +func putDelegator(staker *Staker, domain map[subnetNodeKey]map[ids.ID]*Staker) { key := subnetNodeKey{ subnetID: staker.SubnetID, nodeID: staker.NodeID, @@ -149,21 +148,21 @@ func putDelegator(staker *state.Staker, domain map[subnetNodeKey]map[ids.ID]*sta ls, found := domain[key] if !found { - ls = make(map[ids.ID]*state.Staker) + ls = make(map[ids.ID]*Staker) domain[key] = ls } ls[staker.TxID] = staker } -func (m *stakersStorageModel) DeleteCurrentDelegator(staker *state.Staker) { +func (m *stakersStorageModel) DeleteCurrentDelegator(staker *Staker) { deleteDelegator(staker, m.currentDelegators) } -func (m *stakersStorageModel) DeletePendingDelegator(staker *state.Staker) { +func (m *stakersStorageModel) DeletePendingDelegator(staker *Staker) { deleteDelegator(staker, m.pendingDelegators) } -func deleteDelegator(staker *state.Staker, domain map[subnetNodeKey]map[ids.ID]*state.Staker) { +func deleteDelegator(staker *Staker, domain map[subnetNodeKey]map[ids.ID]*Staker) { key := subnetNodeKey{ subnetID: staker.SubnetID, nodeID: staker.NodeID, @@ -181,18 +180,18 @@ func deleteDelegator(staker *state.Staker, domain map[subnetNodeKey]map[ids.ID]* } } -func (m *stakersStorageModel) GetCurrentStakerIterator() (state.StakerIterator, error) { +func (m *stakersStorageModel) GetCurrentStakerIterator() (StakerIterator, error) { return getCurrentStakerIterator(m.currentValidators, m.currentDelegators), nil } -func (m *stakersStorageModel) GetPendingStakerIterator() (state.StakerIterator, error) { +func (m *stakersStorageModel) GetPendingStakerIterator() (StakerIterator, error) { return getCurrentStakerIterator(m.pendingValidators, m.pendingDelegators), nil } func getCurrentStakerIterator( - validators map[subnetNodeKey]*state.Staker, - delegators map[subnetNodeKey](map[ids.ID]*state.Staker), -) state.StakerIterator { + validators map[subnetNodeKey]*Staker, + delegators map[subnetNodeKey](map[ids.ID]*Staker), +) StakerIterator { allStakers := maps.Values(validators) for _, dels := range delegators { allStakers = append(allStakers, maps.Values(dels)...) @@ -220,13 +219,13 @@ func (*stakersStorageModel) GetDelegateeReward( } type stakersStorageIteratorModel struct { - current *state.Staker + current *Staker // sortedStakers contains the sorted list of stakers // as it should be returned by iteration. // sortedStakers must be sorted upon stakersStorageIteratorModel creation. // Stakers are evicted from sortedStakers as Value() is called. - sortedStakers []*state.Staker + sortedStakers []*Staker } func (i *stakersStorageIteratorModel) Next() bool { @@ -239,7 +238,7 @@ func (i *stakersStorageIteratorModel) Next() bool { return true } -func (i *stakersStorageIteratorModel) Value() *state.Staker { +func (i *stakersStorageIteratorModel) Value() *Staker { return i.current } diff --git a/vms/platformvm/state/models/stakers_storage_model_test.go b/vms/platformvm/state/stakers_model_storage_test.go similarity index 93% rename from vms/platformvm/state/models/stakers_storage_model_test.go rename to vms/platformvm/state/stakers_model_storage_test.go index 9f1e3dcb07a8..6a7123837295 100644 --- a/vms/platformvm/state/models/stakers_storage_model_test.go +++ b/vms/platformvm/state/stakers_model_storage_test.go @@ -1,7 +1,7 @@ // Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package models +package state import ( "fmt" @@ -10,7 +10,6 @@ import ( "testing" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/leanovate/gopter" "github.com/leanovate/gopter/commands" @@ -18,7 +17,7 @@ import ( ) var ( - _ state.Versions = (*sysUnderTest)(nil) + _ Versions = (*sysUnderTest)(nil) _ commands.Command = (*putCurrentValidatorCommand)(nil) _ commands.Command = (*deleteCurrentValidatorCommand)(nil) _ commands.Command = (*addTopDiffCommand)(nil) @@ -27,7 +26,7 @@ var ( ) // TestStateAndDiffComparisonToStorageModel verifies that a production-like -// system made of a stack of state.Diffs built on top of a state.State conforms to +// system made of a stack of Diffs built on top of a State conforms to // our stakersStorageModel. It achieves this by: // 1. randomly generating a sequence of stakers writes as well as // some persistence operations (commit/diff apply), @@ -48,21 +47,21 @@ func TestStateAndDiffComparisonToStorageModel(t *testing.T) { type sysUnderTest struct { diffBlkIDSeed uint64 - baseState state.State + baseState State sortedDiffIDs []ids.ID - diffsMap map[ids.ID]state.Diff + diffsMap map[ids.ID]Diff } -func newSysUnderTest(baseState state.State) *sysUnderTest { +func newSysUnderTest(baseState State) *sysUnderTest { sys := &sysUnderTest{ baseState: baseState, - diffsMap: map[ids.ID]state.Diff{}, + diffsMap: map[ids.ID]Diff{}, sortedDiffIDs: []ids.ID{}, } return sys } -func (s *sysUnderTest) GetState(blkID ids.ID) (state.Chain, bool) { +func (s *sysUnderTest) GetState(blkID ids.ID) (Chain, bool) { if state, found := s.diffsMap[blkID]; found { return state, found } @@ -77,7 +76,7 @@ func (s *sysUnderTest) addDiffOnTop() { } else { topBlkID = s.sortedDiffIDs[len(s.sortedDiffIDs)-1] } - newTopDiff, err := state.NewDiff(topBlkID, s) + newTopDiff, err := NewDiff(topBlkID, s) if err != nil { panic(err) } @@ -86,7 +85,7 @@ func (s *sysUnderTest) addDiffOnTop() { } // getTopChainState returns top diff or baseState -func (s *sysUnderTest) getTopChainState() state.Chain { +func (s *sysUnderTest) getTopChainState() Chain { var topChainStateID ids.ID if len(s.sortedDiffIDs) != 0 { topChainStateID = s.sortedDiffIDs[len(s.sortedDiffIDs)-1] @@ -181,10 +180,10 @@ var stakersCommands = &commands.ProtoCommands{ } // PutCurrentValidator section -type putCurrentValidatorCommand state.Staker +type putCurrentValidatorCommand Staker func (v *putCurrentValidatorCommand) Run(sut commands.SystemUnderTest) commands.Result { - staker := (*state.Staker)(v) + staker := (*Staker)(v) sys := sut.(*sysUnderTest) topChainState := sys.getTopChainState() topChainState.PutCurrentValidator(staker) @@ -192,7 +191,7 @@ func (v *putCurrentValidatorCommand) Run(sut commands.SystemUnderTest) commands. } func (v *putCurrentValidatorCommand) NextState(cmdState commands.State) commands.State { - staker := (*state.Staker)(v) + staker := (*Staker)(v) cmdState.(*stakersStorageModel).PutCurrentValidator(staker) return cmdState } @@ -219,7 +218,7 @@ func (v *putCurrentValidatorCommand) String() string { } var genPutCurrentValidatorCommand = stakerGenerator(currentValidator, nil, nil).Map( - func(staker state.Staker) commands.Command { + func(staker Staker) commands.Command { cmd := (*putCurrentValidatorCommand)(&staker) return cmd }, @@ -239,7 +238,7 @@ func (*deleteCurrentValidatorCommand) Run(sut commands.SystemUnderTest) commands } var ( found = false - validator *state.Staker + validator *Staker ) for !found && stakerIt.Next() { validator = stakerIt.Value() @@ -267,7 +266,7 @@ func (*deleteCurrentValidatorCommand) NextState(cmdState commands.State) command var ( found = false - validator *state.Staker + validator *Staker ) for !found && stakerIt.Next() { validator = stakerIt.Value() @@ -441,13 +440,13 @@ func checkSystemAndModelContent(model *stakersStorageModel, sys *sysUnderTest) b return false } - modelStakers := make([]*state.Staker, 0) + modelStakers := make([]*Staker, 0) for modelIt.Next() { modelStakers = append(modelStakers, modelIt.Value()) } modelIt.Release() - sysStakers := make([]*state.Staker, 0) + sysStakers := make([]*Staker, 0) for sysIt.Next() { sysStakers = append(sysStakers, sysIt.Value()) } diff --git a/vms/platformvm/state/models/stakers_ops_test.go b/vms/platformvm/state/stakers_ops_test.go similarity index 93% rename from vms/platformvm/state/models/stakers_ops_test.go rename to vms/platformvm/state/stakers_ops_test.go index df4303ffd995..b1057bd20e7e 100644 --- a/vms/platformvm/state/models/stakers_ops_test.go +++ b/vms/platformvm/state/stakers_ops_test.go @@ -1,7 +1,7 @@ // Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package models +package state import ( "fmt" @@ -11,22 +11,21 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/leanovate/gopter" "github.com/leanovate/gopter/gen" "github.com/leanovate/gopter/prop" ) -// TestSimpleStakersOperations checks that state.State and state.Diff conform our stakersStorageModel. -// TestSimpleStakersOperations tests state.State and state.Diff in isolation, over simple operations. +// TestSimpleStakersOperations checks that State and Diff conform our stakersStorageModel. +// TestSimpleStakersOperations tests State and Diff in isolation, over simple operations. // TestStateAndDiffComparisonToStorageModel carries a more involved verification over a production-like -// mix of state.State and state.Diffs. +// mix of State and Diffs. func TestSimpleStakersOperations(t *testing.T) { - storeCreators := map[string]func() (state.Stakers, error){ - "base state": func() (state.Stakers, error) { + storeCreators := map[string]func() (Stakers, error){ + "base state": func() (Stakers, error) { return buildChainState() }, - "diff": func() (state.Stakers, error) { + "diff": func() (Stakers, error) { baseState, err := buildChainState() if err != nil { return nil, fmt.Errorf("unexpected error while creating chain base state, err %v", err) @@ -36,13 +35,13 @@ func TestSimpleStakersOperations(t *testing.T) { versions := &versionsHolder{ baseState: baseState, } - store, err := state.NewDiff(genesisID, versions) + store, err := NewDiff(genesisID, versions) if err != nil { return nil, fmt.Errorf("unexpected error while creating diff, err %v", err) } return store, nil }, - "storage model": func() (state.Stakers, error) { //nolint:golint,unparam + "storage model": func() (Stakers, error) { //nolint:golint,unparam return newStakersStorageModel(), nil }, } @@ -55,11 +54,11 @@ func TestSimpleStakersOperations(t *testing.T) { } } -func simpleStakerStateProperties(storeCreatorF func() (state.Stakers, error)) *gopter.Properties { +func simpleStakerStateProperties(storeCreatorF func() (Stakers, error)) *gopter.Properties { properties := gopter.NewProperties(nil) properties.Property("some current validator ops", prop.ForAll( - func(s state.Staker) string { + func(s Staker) string { store, err := storeCreatorF() if err != nil { return fmt.Sprintf("unexpected error while creating staker store, err %v", err) @@ -127,7 +126,7 @@ func simpleStakerStateProperties(storeCreatorF func() (state.Stakers, error)) *g )) properties.Property("some pending validator ops", prop.ForAll( - func(s state.Staker) string { + func(s Staker) string { store, err := storeCreatorF() if err != nil { return fmt.Sprintf("unexpected error while creating staker store, err %v", err) @@ -199,7 +198,7 @@ func simpleStakerStateProperties(storeCreatorF func() (state.Stakers, error)) *g nodeID = ids.GenerateTestNodeID() ) properties.Property("some current delegators ops", prop.ForAll( - func(val state.Staker, dels []state.Staker) string { + func(val Staker, dels []Staker) string { store, err := storeCreatorF() if err != nil { return fmt.Sprintf("unexpected error while creating staker store, err %v", err) @@ -329,7 +328,7 @@ func simpleStakerStateProperties(storeCreatorF func() (state.Stakers, error)) *g stakerGenerator(currentValidator, &subnetID, &nodeID), gen.SliceOfN(10, stakerGenerator(currentDelegator, &subnetID, &nodeID)). SuchThat(func(v interface{}) bool { - stakersList := v.([]state.Staker) + stakersList := v.([]Staker) uniqueTxIDs := set.NewSet[ids.ID](len(stakersList)) for _, staker := range stakersList { uniqueTxIDs.Add(staker.TxID) @@ -341,7 +340,7 @@ func simpleStakerStateProperties(storeCreatorF func() (state.Stakers, error)) *g )) properties.Property("some pending delegators ops", prop.ForAll( - func(val state.Staker, dels []state.Staker) string { + func(val Staker, dels []Staker) string { store, err := storeCreatorF() if err != nil { return fmt.Sprintf("unexpected error while creating staker store, err %v", err) @@ -471,7 +470,7 @@ func simpleStakerStateProperties(storeCreatorF func() (state.Stakers, error)) *g stakerGenerator(currentValidator, &subnetID, &nodeID), gen.SliceOfN(10, stakerGenerator(pendingDelegator, &subnetID, &nodeID)). SuchThat(func(v interface{}) bool { - stakersList := v.([]state.Staker) + stakersList := v.([]Staker) uniqueTxIDs := set.NewSet[ids.ID](len(stakersList)) for _, staker := range stakersList { uniqueTxIDs.Add(staker.TxID) From 643e3b2cb5c431a236332d2f84d3790233a427c8 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Thu, 11 May 2023 18:44:02 +0200 Subject: [PATCH 029/132] nits --- vms/platformvm/state/stakers_ops_test.go | 94 ++++++++++++++---------- 1 file changed, 57 insertions(+), 37 deletions(-) diff --git a/vms/platformvm/state/stakers_ops_test.go b/vms/platformvm/state/stakers_ops_test.go index b1057bd20e7e..c15f5eb44efe 100644 --- a/vms/platformvm/state/stakers_ops_test.go +++ b/vms/platformvm/state/stakers_ops_test.go @@ -65,15 +65,28 @@ func simpleStakerStateProperties(storeCreatorF func() (Stakers, error)) *gopter. } // no staker before insertion - _, err = store.GetCurrentValidator(s.SubnetID, s.NodeID) + _, err = store.GetCurrentValidator(s.SubnetID, s.NodeID) // check version 1 if err != database.ErrNotFound { return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) } + currIT, err := store.GetCurrentStakerIterator() // check version 2 + if err != nil { + return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) + } + if currIT.Next() { + return fmt.Sprintf("expected empty iterator, got at least element %v", currIT.Value()) + } + currIT.Release() + // it's fine deleting unknown validator store.DeleteCurrentValidator(&s) + _, err = store.GetCurrentValidator(s.SubnetID, s.NodeID) // check version 1 + if err != database.ErrNotFound { + return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) + } - currIT, err := store.GetCurrentStakerIterator() + currIT, err = store.GetCurrentStakerIterator() // check version 2 if err != nil { return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) } @@ -82,9 +95,9 @@ func simpleStakerStateProperties(storeCreatorF func() (Stakers, error)) *gopter. } currIT.Release() - // staker after insertion + // insert the staker and show it can be found store.PutCurrentValidator(&s) - retrievedStaker, err := store.GetCurrentValidator(s.SubnetID, s.NodeID) + retrievedStaker, err := store.GetCurrentValidator(s.SubnetID, s.NodeID) // check version 1 if err != nil { return fmt.Sprintf("expected no error, got %v", err) } @@ -92,7 +105,7 @@ func simpleStakerStateProperties(storeCreatorF func() (Stakers, error)) *gopter. return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &s, retrievedStaker) } - currIT, err = store.GetCurrentStakerIterator() + currIT, err = store.GetCurrentStakerIterator() // check version 2 if err != nil { return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) } @@ -104,14 +117,14 @@ func simpleStakerStateProperties(storeCreatorF func() (Stakers, error)) *gopter. } currIT.Release() - // no staker after deletion + // delete the staker and show it won't be found anymore store.DeleteCurrentValidator(&s) - _, err = store.GetCurrentValidator(s.SubnetID, s.NodeID) + _, err = store.GetCurrentValidator(s.SubnetID, s.NodeID) // check version 1 if err != database.ErrNotFound { return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) } - currIT, err = store.GetCurrentStakerIterator() + currIT, err = store.GetCurrentStakerIterator() // check version 2 if err != nil { return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) } @@ -133,26 +146,39 @@ func simpleStakerStateProperties(storeCreatorF func() (Stakers, error)) *gopter. } // no staker before insertion - _, err = store.GetPendingValidator(s.SubnetID, s.NodeID) + _, err = store.GetPendingValidator(s.SubnetID, s.NodeID) // check version 1 if err != database.ErrNotFound { return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) } + pendIt, err := store.GetPendingStakerIterator() // check version 2 + if err != nil { + return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) + } + if pendIt.Next() { + return fmt.Sprintf("expected empty iterator, got at least element %v", pendIt.Value()) + } + pendIt.Release() + // it's fine deleting unknown validator store.DeletePendingValidator(&s) + _, err = store.GetPendingValidator(s.SubnetID, s.NodeID) // check version 1 + if err != database.ErrNotFound { + return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) + } - currIT, err := store.GetPendingStakerIterator() + pendIt, err = store.GetPendingStakerIterator() // check version 2 if err != nil { return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) } - if currIT.Next() { - return fmt.Sprintf("expected empty iterator, got at least element %v", currIT.Value()) + if pendIt.Next() { + return fmt.Sprintf("expected empty iterator, got at least element %v", pendIt.Value()) } - currIT.Release() + pendIt.Release() - // staker after insertion + // insert the staker and show it can be found store.PutPendingValidator(&s) - retrievedStaker, err := store.GetPendingValidator(s.SubnetID, s.NodeID) + retrievedStaker, err := store.GetPendingValidator(s.SubnetID, s.NodeID) // check version 1 if err != nil { return fmt.Sprintf("expected no error, got %v", err) } @@ -160,33 +186,33 @@ func simpleStakerStateProperties(storeCreatorF func() (Stakers, error)) *gopter. return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &s, retrievedStaker) } - currIT, err = store.GetPendingStakerIterator() + pendIt, err = store.GetPendingStakerIterator() // check version 2 if err != nil { return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) } - if !currIT.Next() { + if !pendIt.Next() { return errNonEmptyIteratorExpected.Error() } - if !reflect.DeepEqual(currIT.Value(), retrievedStaker) { + if !reflect.DeepEqual(pendIt.Value(), retrievedStaker) { return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &s, retrievedStaker) } - currIT.Release() + pendIt.Release() - // no staker after deletion + // delete the staker and show it won't be found anymore store.DeletePendingValidator(&s) - _, err = store.GetPendingValidator(s.SubnetID, s.NodeID) + _, err = store.GetPendingValidator(s.SubnetID, s.NodeID) // check version 1 if err != database.ErrNotFound { return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) } - currIT, err = store.GetPendingStakerIterator() + pendIt, err = store.GetPendingStakerIterator() // check version 2 if err != nil { return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) } - if currIT.Next() { - return fmt.Sprintf("expected empty iterator, got at least element %v", currIT.Value()) + if pendIt.Next() { + return fmt.Sprintf("expected empty iterator, got at least element %v", pendIt.Value()) } - currIT.Release() + pendIt.Release() return "" }, @@ -206,9 +232,7 @@ func simpleStakerStateProperties(storeCreatorF func() (Stakers, error)) *gopter. // store validator store.PutCurrentValidator(&val) - - // check validator - version 1 - retrievedValidator, err := store.GetCurrentValidator(val.SubnetID, val.NodeID) + retrievedValidator, err := store.GetCurrentValidator(val.SubnetID, val.NodeID) // check version 1 if err != nil { return fmt.Sprintf("expected no error, got %v", err) } @@ -216,8 +240,7 @@ func simpleStakerStateProperties(storeCreatorF func() (Stakers, error)) *gopter. return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &val, retrievedValidator) } - // check validator - version 2 - valIt, err := store.GetCurrentStakerIterator() + valIt, err := store.GetCurrentStakerIterator() // check version 2 if err != nil { return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) } @@ -279,7 +302,7 @@ func simpleStakerStateProperties(storeCreatorF func() (Stakers, error)) *gopter. } delIt.Release() - // check no missing delegators if whole staker set + // check no missing delegators in the whole staker set for _, del := range dels { found := false fullDelIt, err := store.GetCurrentStakerIterator() @@ -348,9 +371,7 @@ func simpleStakerStateProperties(storeCreatorF func() (Stakers, error)) *gopter. // store validator store.PutCurrentValidator(&val) - - // check validator - version 1 - retrievedValidator, err := store.GetCurrentValidator(val.SubnetID, val.NodeID) + retrievedValidator, err := store.GetCurrentValidator(val.SubnetID, val.NodeID) // check version 1 if err != nil { return fmt.Sprintf("expected no error, got %v", err) } @@ -358,8 +379,7 @@ func simpleStakerStateProperties(storeCreatorF func() (Stakers, error)) *gopter. return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &val, retrievedValidator) } - // check validator - version 2 - valIt, err := store.GetCurrentStakerIterator() + valIt, err := store.GetCurrentStakerIterator() // check version 2 if err != nil { return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) } @@ -421,7 +441,7 @@ func simpleStakerStateProperties(storeCreatorF func() (Stakers, error)) *gopter. } delIt.Release() - // check no missing delegators if whole staker set + // check no missing delegators in the whole staker set for _, del := range dels { found := false fullDelIt, err := store.GetPendingStakerIterator() From 9c4fa9a3a81c8e395acbbd44f3115180d6f07126 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Thu, 11 May 2023 20:01:55 +0200 Subject: [PATCH 030/132] cleanup --- .../state/stakers_model_generator.go | 9 +- .../state/stakers_model_generator_test.go | 7 +- .../state/stakers_model_storage_test.go | 219 +++++++++++++++++- vms/platformvm/state/stakers_ops_test.go | 21 +- 4 files changed, 238 insertions(+), 18 deletions(-) diff --git a/vms/platformvm/state/stakers_model_generator.go b/vms/platformvm/state/stakers_model_generator.go index f20810db37e1..19668c6c91a4 100644 --- a/vms/platformvm/state/stakers_model_generator.go +++ b/vms/platformvm/state/stakers_model_generator.go @@ -30,7 +30,12 @@ const ( // which can be used in our property tests. stakerGenerator takes care of // enforcing some Staker invariants on each and every random sample. // TestGeneratedStakersValidity documents and verifies the enforced invariants. -func stakerGenerator(prio generatorPriorityType, subnet *ids.ID, nodeID *ids.NodeID) gopter.Gen { +func stakerGenerator( + prio generatorPriorityType, + subnet *ids.ID, + nodeID *ids.NodeID, + maxWeight uint64, // helps avoiding overflows in delegator tests +) gopter.Gen { return genStakerTimeData(prio).FlatMap( func(v interface{}) gopter.Gen { macro := v.(stakerTimeData) @@ -49,7 +54,7 @@ func stakerGenerator(prio generatorPriorityType, subnet *ids.ID, nodeID *ids.Nod "NodeID": genStakerNodeID, "PublicKey": genBlsKey, "SubnetID": genStakerSubnetID, - "Weight": gen.UInt64(), + "Weight": gen.UInt64Range(0, maxWeight), "StartTime": gen.Const(macro.StartTime), "EndTime": gen.Const(macro.EndTime), "PotentialReward": gen.UInt64(), diff --git a/vms/platformvm/state/stakers_model_generator_test.go b/vms/platformvm/state/stakers_model_generator_test.go index 4ed0094f2d20..e99ae84fc365 100644 --- a/vms/platformvm/state/stakers_model_generator_test.go +++ b/vms/platformvm/state/stakers_model_generator_test.go @@ -5,6 +5,7 @@ package state import ( "fmt" + "math" "testing" "github.com/ava-labs/avalanchego/ids" @@ -26,7 +27,7 @@ func TestGeneratedStakersValidity(t *testing.T) { } return "" }, - stakerGenerator(anyPriority, nil, nil), + stakerGenerator(anyPriority, nil, nil, math.MaxUint64), )) properties.Property("NextTime coherent with priority", prop.ForAll( @@ -59,7 +60,7 @@ func TestGeneratedStakersValidity(t *testing.T) { return fmt.Sprintf("priority %v unhandled in test", p) } }, - stakerGenerator(anyPriority, nil, nil), + stakerGenerator(anyPriority, nil, nil, math.MaxUint64), )) subnetID := ids.GenerateTestID() @@ -76,7 +77,7 @@ func TestGeneratedStakersValidity(t *testing.T) { } return "" }, - stakerGenerator(anyPriority, &subnetID, &nodeID), + stakerGenerator(anyPriority, &subnetID, &nodeID, math.MaxUint64), )) properties.TestingRun(t) diff --git a/vms/platformvm/state/stakers_model_storage_test.go b/vms/platformvm/state/stakers_model_storage_test.go index 6a7123837295..73c341ad751f 100644 --- a/vms/platformvm/state/stakers_model_storage_test.go +++ b/vms/platformvm/state/stakers_model_storage_test.go @@ -5,6 +5,7 @@ package state import ( "fmt" + "math" "reflect" "sync/atomic" "testing" @@ -20,6 +21,8 @@ var ( _ Versions = (*sysUnderTest)(nil) _ commands.Command = (*putCurrentValidatorCommand)(nil) _ commands.Command = (*deleteCurrentValidatorCommand)(nil) + _ commands.Command = (*putCurrentDelegatorCommand)(nil) + _ commands.Command = (*deleteCurrentDelegatorCommand)(nil) _ commands.Command = (*addTopDiffCommand)(nil) _ commands.Command = (*applyBottomDiffCommand)(nil) _ commands.Command = (*commitBottomStateCommand)(nil) @@ -169,8 +172,9 @@ var stakersCommands = &commands.ProtoCommands{ return gen.OneGenOf( genPutCurrentValidatorCommand, genDeleteCurrentValidatorCommand, - // genPutCurrentDelegatorCommand, - // genDeleteCurrentDelegatorCommand, + + genPutCurrentDelegatorCommand, + genDeleteCurrentDelegatorCommand, genAddTopDiffCommand, genApplyBottomDiffCommand, @@ -217,7 +221,7 @@ func (v *putCurrentValidatorCommand) String() string { v.SubnetID, v.NodeID, v.TxID, v.Priority, v.StartTime.Unix(), v.EndTime.Sub(v.StartTime)) } -var genPutCurrentValidatorCommand = stakerGenerator(currentValidator, nil, nil).Map( +var genPutCurrentValidatorCommand = stakerGenerator(currentValidator, nil, nil, math.MaxUint64).Map( func(staker Staker) commands.Command { cmd := (*putCurrentValidatorCommand)(&staker) return cmd @@ -310,6 +314,215 @@ var genDeleteCurrentValidatorCommand = gen.IntRange(1, 2).Map( }, ) +// PutCurrentDelegator section +type putCurrentDelegatorCommand Staker + +func (v *putCurrentDelegatorCommand) Run(sut commands.SystemUnderTest) commands.Result { + candidateDelegator := (*Staker)(v) + sys := sut.(*sysUnderTest) + err := addCurrentDelegatorInSystem(sys, candidateDelegator) + if err != nil { + panic(err) + } + return sys +} + +func addCurrentDelegatorInSystem(sys *sysUnderTest, candidateDelegator *Staker) error { + // 1. check if there is a current validator, already inserted. If not return + // 2. Update candidateDelegator attributes to make it delegator of selected validator + // 3. Add delegator to picked validator + chain := sys.getTopChainState() + + // 1. check if there is a current validator. If not, nothing to do + stakerIt, err := chain.GetCurrentStakerIterator() + if err != nil { + return err + } + + var ( + found = false + validator *Staker + ) + for !found && stakerIt.Next() { + validator = stakerIt.Value() + if validator.Priority == txs.SubnetPermissionedValidatorCurrentPriority || + validator.Priority == txs.SubnetPermissionlessValidatorCurrentPriority || + validator.Priority == txs.PrimaryNetworkValidatorCurrentPriority { + found = true + } + } + if !found { + return nil // no current validator to add delegator to + } + stakerIt.Release() + + // 2. Add a delegator to it + delegator := candidateDelegator + delegator.SubnetID = validator.SubnetID + delegator.NodeID = validator.NodeID + + chain.PutCurrentDelegator(delegator) + return nil +} + +func (v *putCurrentDelegatorCommand) NextState(cmdState commands.State) commands.State { + candidateDelegator := (*Staker)(v) + model := cmdState.(*stakersStorageModel) + err := addCurrentDelegatorInModel(model, candidateDelegator) + if err != nil { + panic(err) + } + return cmdState +} + +func addCurrentDelegatorInModel(model *stakersStorageModel, candidateDelegator *Staker) error { + // 1. check if there is a current validator, already inserted. If not return + // 2. Update candidateDelegator attributes to make it delegator of selected validator + // 3. Add delegator to picked validator + + // 1. check if there is a current validator. If not, nothing to do + stakerIt, err := model.GetCurrentStakerIterator() + if err != nil { + return err + } + + var ( + found = false + validator *Staker + ) + for !found && stakerIt.Next() { + validator = stakerIt.Value() + if validator.Priority == txs.SubnetPermissionedValidatorCurrentPriority || + validator.Priority == txs.SubnetPermissionlessValidatorCurrentPriority || + validator.Priority == txs.PrimaryNetworkValidatorCurrentPriority { + found = true + } + } + if !found { + return nil // no current validator to add delegator to + } + stakerIt.Release() + + // 2. Add a delegator to it + delegator := candidateDelegator + delegator.SubnetID = validator.SubnetID + delegator.NodeID = validator.NodeID + + model.PutCurrentDelegator(delegator) + return nil +} + +func (*putCurrentDelegatorCommand) PreCondition(commands.State) bool { + return true +} + +func (*putCurrentDelegatorCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { + model := cmdState.(*stakersStorageModel) + sys := res.(*sysUnderTest) + + if checkSystemAndModelContent(model, sys) { + return &gopter.PropResult{Status: gopter.PropTrue} + } + + return &gopter.PropResult{Status: gopter.PropFalse} +} + +func (v *putCurrentDelegatorCommand) String() string { + return fmt.Sprintf("putCurrentDelegator(subnetID: %v, nodeID: %v, txID: %v, priority: %v, unixStartTime: %v, duration: %v)", + v.SubnetID, v.NodeID, v.TxID, v.Priority, v.StartTime.Unix(), v.EndTime.Sub(v.StartTime)) +} + +var genPutCurrentDelegatorCommand = stakerGenerator(currentDelegator, nil, nil, 1000).Map( + func(staker Staker) commands.Command { + cmd := (*putCurrentDelegatorCommand)(&staker) + return cmd + }, +) + +// DeleteCurrentDelegator section +type deleteCurrentDelegatorCommand struct{} + +func (*deleteCurrentDelegatorCommand) Run(sut commands.SystemUnderTest) commands.Result { + // delete first validator, if any + sys := sut.(*sysUnderTest) + topDiff := sys.getTopChainState() + + stakerIt, err := topDiff.GetCurrentStakerIterator() + if err != nil { + panic(err) + } + var ( + found = false + delegator *Staker + ) + for !found && stakerIt.Next() { + delegator = stakerIt.Value() + if delegator.Priority == txs.SubnetPermissionlessDelegatorCurrentPriority || + delegator.Priority == txs.PrimaryNetworkDelegatorCurrentPriority { + found = true + } + } + if !found { + return sys // no current validator to delete + } + stakerIt.Release() + + topDiff.DeleteCurrentDelegator(delegator) + return sys // returns sys to allow comparison with state in PostCondition +} + +func (*deleteCurrentDelegatorCommand) NextState(cmdState commands.State) commands.State { + model := cmdState.(*stakersStorageModel) + stakerIt, err := model.GetCurrentStakerIterator() + if err != nil { + return err + } + + var ( + found = false + delegator *Staker + ) + for !found && stakerIt.Next() { + delegator = stakerIt.Value() + if delegator.Priority == txs.SubnetPermissionlessDelegatorCurrentPriority || + delegator.Priority == txs.PrimaryNetworkDelegatorCurrentPriority { + found = true + } + } + if !found { + return cmdState // no current validator to add delegator to + } + stakerIt.Release() + + model.DeleteCurrentDelegator(delegator) + return cmdState +} + +func (*deleteCurrentDelegatorCommand) PreCondition(commands.State) bool { + return true +} + +func (*deleteCurrentDelegatorCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { + model := cmdState.(*stakersStorageModel) + sys := res.(*sysUnderTest) + + if checkSystemAndModelContent(model, sys) { + return &gopter.PropResult{Status: gopter.PropTrue} + } + + return &gopter.PropResult{Status: gopter.PropFalse} +} + +func (*deleteCurrentDelegatorCommand) String() string { + return "DeleteCurrentDelegator" +} + +var genDeleteCurrentDelegatorCommand = gen.IntRange(1, 2).Map( + func(int) commands.Command { + return &deleteCurrentDelegatorCommand{} + }, +) + // addTopDiffCommand section type addTopDiffCommand struct{} diff --git a/vms/platformvm/state/stakers_ops_test.go b/vms/platformvm/state/stakers_ops_test.go index c15f5eb44efe..6c1e2514d63e 100644 --- a/vms/platformvm/state/stakers_ops_test.go +++ b/vms/platformvm/state/stakers_ops_test.go @@ -5,6 +5,7 @@ package state import ( "fmt" + "math" "reflect" "testing" @@ -57,7 +58,7 @@ func TestSimpleStakersOperations(t *testing.T) { func simpleStakerStateProperties(storeCreatorF func() (Stakers, error)) *gopter.Properties { properties := gopter.NewProperties(nil) - properties.Property("some current validator ops", prop.ForAll( + properties.Property("add, delete and query current validators", prop.ForAll( func(s Staker) string { store, err := storeCreatorF() if err != nil { @@ -135,10 +136,10 @@ func simpleStakerStateProperties(storeCreatorF func() (Stakers, error)) *gopter. return "" }, - stakerGenerator(anyPriority, nil, nil), + stakerGenerator(anyPriority, nil, nil, math.MaxUint64), )) - properties.Property("some pending validator ops", prop.ForAll( + properties.Property("add, delete and query pending validators", prop.ForAll( func(s Staker) string { store, err := storeCreatorF() if err != nil { @@ -216,7 +217,7 @@ func simpleStakerStateProperties(storeCreatorF func() (Stakers, error)) *gopter. return "" }, - stakerGenerator(anyPriority, nil, nil), + stakerGenerator(anyPriority, nil, nil, math.MaxUint64), )) var ( @@ -348,8 +349,8 @@ func simpleStakerStateProperties(storeCreatorF func() (Stakers, error)) *gopter. return "" }, - stakerGenerator(currentValidator, &subnetID, &nodeID), - gen.SliceOfN(10, stakerGenerator(currentDelegator, &subnetID, &nodeID)). + stakerGenerator(currentValidator, &subnetID, &nodeID, math.MaxUint64), + gen.SliceOfN(10, stakerGenerator(currentDelegator, &subnetID, &nodeID, math.MaxUint64)). SuchThat(func(v interface{}) bool { stakersList := v.([]Staker) uniqueTxIDs := set.NewSet[ids.ID](len(stakersList)) @@ -357,12 +358,12 @@ func simpleStakerStateProperties(storeCreatorF func() (Stakers, error)) *gopter. uniqueTxIDs.Add(staker.TxID) } - // make sure TxIDs are unique, at least among delegators + // make sure TxIDs are unique, at least among delegators. return len(stakersList) == uniqueTxIDs.Len() }), )) - properties.Property("some pending delegators ops", prop.ForAll( + properties.Property("add, delete and query pending delegators", prop.ForAll( func(val Staker, dels []Staker) string { store, err := storeCreatorF() if err != nil { @@ -487,8 +488,8 @@ func simpleStakerStateProperties(storeCreatorF func() (Stakers, error)) *gopter. return "" }, - stakerGenerator(currentValidator, &subnetID, &nodeID), - gen.SliceOfN(10, stakerGenerator(pendingDelegator, &subnetID, &nodeID)). + stakerGenerator(currentValidator, &subnetID, &nodeID, math.MaxUint64), + gen.SliceOfN(10, stakerGenerator(pendingDelegator, &subnetID, &nodeID, math.MaxUint64)). SuchThat(func(v interface{}) bool { stakersList := v.([]Staker) uniqueTxIDs := set.NewSet[ids.ID](len(stakersList)) From 273cab4ef19b83a8317ce282ef0ca2e1fe2bded6 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Thu, 18 May 2023 15:28:27 +0200 Subject: [PATCH 031/132] nit --- vms/platformvm/state/stakers_helpers_test.go | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/vms/platformvm/state/stakers_helpers_test.go b/vms/platformvm/state/stakers_helpers_test.go index 2cc160965d46..3218cab680cc 100644 --- a/vms/platformvm/state/stakers_helpers_test.go +++ b/vms/platformvm/state/stakers_helpers_test.go @@ -31,10 +31,9 @@ import ( var ( _ Versions = (*versionsHolder)(nil) - testNetworkID = uint32(10) // To be used in tests - xChainID = ids.Empty.Prefix(0) - cChainID = ids.Empty.Prefix(1) - avaxAssetID = ids.ID{'y', 'e', 'e', 't'} + xChainID = ids.Empty.Prefix(0) + cChainID = ids.Empty.Prefix(1) + avaxAssetID = ids.ID{'y', 'e', 'e', 't'} defaultMinStakingDuration = 24 * time.Hour defaultMaxStakingDuration = 365 * 24 * time.Hour @@ -61,7 +60,7 @@ func buildChainState() (State, error) { cfg := defaultConfig() ctx := snow.DefaultContextTest() - ctx.NetworkID = testNetworkID + ctx.NetworkID = constants.UnitTestID ctx.XChainID = xChainID ctx.CChainID = cChainID ctx.AVAXAssetID = avaxAssetID @@ -115,7 +114,7 @@ func defaultConfig() *config.Config { func buildGenesisTest(ctx *snow.Context) ([]byte, error) { buildGenesisArgs := api.BuildGenesisArgs{ - NetworkID: json.Uint32(testNetworkID), + NetworkID: json.Uint32(constants.UnitTestID), AvaxAssetID: ctx.AVAXAssetID, UTXOs: nil, // no UTXOs in this genesis. Not relevant to package tests. Validators: nil, // no validators in this genesis. Tests will handle them. From 1e104db14b466378fd18579d9d9d4527771ee31a Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Wed, 24 May 2023 09:08:16 +0200 Subject: [PATCH 032/132] nits --- vms/platformvm/state/stakers_model_generator.go | 4 ++-- .../state/stakers_model_generator_test.go | 4 ++-- vms/platformvm/state/stakers_model_storage.go | 2 +- vms/platformvm/state/stakers_model_storage_test.go | 14 ++++++++------ 4 files changed, 13 insertions(+), 11 deletions(-) diff --git a/vms/platformvm/state/stakers_model_generator.go b/vms/platformvm/state/stakers_model_generator.go index 19668c6c91a4..9c1a410b0ef1 100644 --- a/vms/platformvm/state/stakers_model_generator.go +++ b/vms/platformvm/state/stakers_model_generator.go @@ -67,7 +67,7 @@ func stakerGenerator( } // stakerTimeData holds Staker's time related data in order to generate them -// while fullfilling the following constrains: +// while fulfilling the following constrains: // 1. EndTime >= StartTime // 2. NextTime == EndTime for current priorities // 3. NextTime == StartTime for pending priorities @@ -200,7 +200,7 @@ var genID = gen.SliceOfN(lengthID, gen.UInt8()).FlatMap( reflect.TypeOf([]byte{}), ) -// genID is the helper generator for ids.NodeID objects +// genNodeID is the helper generator for ids.NodeID objects var genNodeID = gen.SliceOfN(lengthNodeID, gen.UInt8()).FlatMap( func(v interface{}) gopter.Gen { byteSlice := v.([]byte) diff --git a/vms/platformvm/state/stakers_model_generator_test.go b/vms/platformvm/state/stakers_model_generator_test.go index e99ae84fc365..6b9661d6c070 100644 --- a/vms/platformvm/state/stakers_model_generator_test.go +++ b/vms/platformvm/state/stakers_model_generator_test.go @@ -14,8 +14,8 @@ import ( "github.com/leanovate/gopter/prop" ) -// TestGeneratedStakersValidity documents and verifies the -// invariants enforced by the staker generator +// TestGeneratedStakersValidity tests the staker generator itself. +// It documents and verifies theinvariants enforced by the staker generator. func TestGeneratedStakersValidity(t *testing.T) { properties := gopter.NewProperties(nil) diff --git a/vms/platformvm/state/stakers_model_storage.go b/vms/platformvm/state/stakers_model_storage.go index c83a00634b3a..f7f3cdc6def6 100644 --- a/vms/platformvm/state/stakers_model_storage.go +++ b/vms/platformvm/state/stakers_model_storage.go @@ -224,7 +224,7 @@ type stakersStorageIteratorModel struct { // sortedStakers contains the sorted list of stakers // as it should be returned by iteration. // sortedStakers must be sorted upon stakersStorageIteratorModel creation. - // Stakers are evicted from sortedStakers as Value() is called. + // Stakers are evicted from sortedStakers as Next() is called. sortedStakers []*Staker } diff --git a/vms/platformvm/state/stakers_model_storage_test.go b/vms/platformvm/state/stakers_model_storage_test.go index 73c341ad751f..8c99504f9d77 100644 --- a/vms/platformvm/state/stakers_model_storage_test.go +++ b/vms/platformvm/state/stakers_model_storage_test.go @@ -240,6 +240,8 @@ func (*deleteCurrentValidatorCommand) Run(sut commands.SystemUnderTest) commands if err != nil { panic(err) } + defer stakerIt.Release() + var ( found = false validator *Staker @@ -255,7 +257,6 @@ func (*deleteCurrentValidatorCommand) Run(sut commands.SystemUnderTest) commands if !found { return sys // no current validator to delete } - stakerIt.Release() topDiff.DeleteCurrentValidator(validator) return sys // returns sys to allow comparison with state in PostCondition @@ -267,6 +268,7 @@ func (*deleteCurrentValidatorCommand) NextState(cmdState commands.State) command if err != nil { return err } + defer stakerIt.Release() var ( found = false @@ -283,7 +285,6 @@ func (*deleteCurrentValidatorCommand) NextState(cmdState commands.State) command if !found { return cmdState // no current validator to add delegator to } - stakerIt.Release() model.DeleteCurrentValidator(validator) return cmdState @@ -338,6 +339,7 @@ func addCurrentDelegatorInSystem(sys *sysUnderTest, candidateDelegator *Staker) if err != nil { return err } + defer stakerIt.Release() var ( found = false @@ -354,7 +356,6 @@ func addCurrentDelegatorInSystem(sys *sysUnderTest, candidateDelegator *Staker) if !found { return nil // no current validator to add delegator to } - stakerIt.Release() // 2. Add a delegator to it delegator := candidateDelegator @@ -385,6 +386,7 @@ func addCurrentDelegatorInModel(model *stakersStorageModel, candidateDelegator * if err != nil { return err } + defer stakerIt.Release() var ( found = false @@ -401,7 +403,6 @@ func addCurrentDelegatorInModel(model *stakersStorageModel, candidateDelegator * if !found { return nil // no current validator to add delegator to } - stakerIt.Release() // 2. Add a delegator to it delegator := candidateDelegator @@ -451,6 +452,8 @@ func (*deleteCurrentDelegatorCommand) Run(sut commands.SystemUnderTest) commands if err != nil { panic(err) } + defer stakerIt.Release() + var ( found = false delegator *Staker @@ -465,7 +468,6 @@ func (*deleteCurrentDelegatorCommand) Run(sut commands.SystemUnderTest) commands if !found { return sys // no current validator to delete } - stakerIt.Release() topDiff.DeleteCurrentDelegator(delegator) return sys // returns sys to allow comparison with state in PostCondition @@ -477,6 +479,7 @@ func (*deleteCurrentDelegatorCommand) NextState(cmdState commands.State) command if err != nil { return err } + defer stakerIt.Release() var ( found = false @@ -492,7 +495,6 @@ func (*deleteCurrentDelegatorCommand) NextState(cmdState commands.State) command if !found { return cmdState // no current validator to add delegator to } - stakerIt.Release() model.DeleteCurrentDelegator(delegator) return cmdState From d6d0a835246ea824ea8ad129886f6d29a448bba2 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Wed, 24 May 2023 09:45:54 +0200 Subject: [PATCH 033/132] nits --- .../state/stakers_model_storage_test.go | 18 ++++++++++++------ vms/platformvm/state/stakers_ops_test.go | 2 +- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/vms/platformvm/state/stakers_model_storage_test.go b/vms/platformvm/state/stakers_model_storage_test.go index 8c99504f9d77..7c238273a8ee 100644 --- a/vms/platformvm/state/stakers_model_storage_test.go +++ b/vms/platformvm/state/stakers_model_storage_test.go @@ -240,7 +240,6 @@ func (*deleteCurrentValidatorCommand) Run(sut commands.SystemUnderTest) commands if err != nil { panic(err) } - defer stakerIt.Release() var ( found = false @@ -255,8 +254,10 @@ func (*deleteCurrentValidatorCommand) Run(sut commands.SystemUnderTest) commands } } if !found { + stakerIt.Release() return sys // no current validator to delete } + stakerIt.Release() // release before modifying stakers collection topDiff.DeleteCurrentValidator(validator) return sys // returns sys to allow comparison with state in PostCondition @@ -268,7 +269,6 @@ func (*deleteCurrentValidatorCommand) NextState(cmdState commands.State) command if err != nil { return err } - defer stakerIt.Release() var ( found = false @@ -283,8 +283,10 @@ func (*deleteCurrentValidatorCommand) NextState(cmdState commands.State) command } } if !found { + stakerIt.Release() return cmdState // no current validator to add delegator to } + stakerIt.Release() // release before modifying stakers collection model.DeleteCurrentValidator(validator) return cmdState @@ -339,7 +341,6 @@ func addCurrentDelegatorInSystem(sys *sysUnderTest, candidateDelegator *Staker) if err != nil { return err } - defer stakerIt.Release() var ( found = false @@ -354,8 +355,10 @@ func addCurrentDelegatorInSystem(sys *sysUnderTest, candidateDelegator *Staker) } } if !found { + stakerIt.Release() return nil // no current validator to add delegator to } + stakerIt.Release() // release before modifying stakers collection // 2. Add a delegator to it delegator := candidateDelegator @@ -386,7 +389,6 @@ func addCurrentDelegatorInModel(model *stakersStorageModel, candidateDelegator * if err != nil { return err } - defer stakerIt.Release() var ( found = false @@ -401,8 +403,10 @@ func addCurrentDelegatorInModel(model *stakersStorageModel, candidateDelegator * } } if !found { + stakerIt.Release() return nil // no current validator to add delegator to } + stakerIt.Release() // release before modifying stakers collection // 2. Add a delegator to it delegator := candidateDelegator @@ -452,7 +456,6 @@ func (*deleteCurrentDelegatorCommand) Run(sut commands.SystemUnderTest) commands if err != nil { panic(err) } - defer stakerIt.Release() var ( found = false @@ -466,8 +469,10 @@ func (*deleteCurrentDelegatorCommand) Run(sut commands.SystemUnderTest) commands } } if !found { + stakerIt.Release() return sys // no current validator to delete } + stakerIt.Release() // release before modifying stakers collection topDiff.DeleteCurrentDelegator(delegator) return sys // returns sys to allow comparison with state in PostCondition @@ -479,7 +484,6 @@ func (*deleteCurrentDelegatorCommand) NextState(cmdState commands.State) command if err != nil { return err } - defer stakerIt.Release() var ( found = false @@ -493,8 +497,10 @@ func (*deleteCurrentDelegatorCommand) NextState(cmdState commands.State) command } } if !found { + stakerIt.Release() return cmdState // no current validator to add delegator to } + stakerIt.Release() // release before modifying stakers collection model.DeleteCurrentDelegator(delegator) return cmdState diff --git a/vms/platformvm/state/stakers_ops_test.go b/vms/platformvm/state/stakers_ops_test.go index 6c1e2514d63e..d4d571a47213 100644 --- a/vms/platformvm/state/stakers_ops_test.go +++ b/vms/platformvm/state/stakers_ops_test.go @@ -224,7 +224,7 @@ func simpleStakerStateProperties(storeCreatorF func() (Stakers, error)) *gopter. subnetID = ids.GenerateTestID() nodeID = ids.GenerateTestNodeID() ) - properties.Property("some current delegators ops", prop.ForAll( + properties.Property("add, delete and query current delegators", prop.ForAll( func(val Staker, dels []Staker) string { store, err := storeCreatorF() if err != nil { From e8ad209085b22c51b6189a0c2309770e8008d93a Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Wed, 24 May 2023 10:26:34 +0200 Subject: [PATCH 034/132] reduced code duplication in UTs --- vms/platformvm/state/stakers_ops_test.go | 127 +++++++++++------------ 1 file changed, 60 insertions(+), 67 deletions(-) diff --git a/vms/platformvm/state/stakers_ops_test.go b/vms/platformvm/state/stakers_ops_test.go index d4d571a47213..97f09c78289f 100644 --- a/vms/platformvm/state/stakers_ops_test.go +++ b/vms/platformvm/state/stakers_ops_test.go @@ -70,15 +70,10 @@ func simpleStakerStateProperties(storeCreatorF func() (Stakers, error)) *gopter. if err != database.ErrNotFound { return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) } - - currIT, err := store.GetCurrentStakerIterator() // check version 2 + err = checkCurrentStakersContent(store, []Staker{}) if err != nil { - return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) + return err.Error() } - if currIT.Next() { - return fmt.Sprintf("expected empty iterator, got at least element %v", currIT.Value()) - } - currIT.Release() // it's fine deleting unknown validator store.DeleteCurrentValidator(&s) @@ -86,15 +81,10 @@ func simpleStakerStateProperties(storeCreatorF func() (Stakers, error)) *gopter. if err != database.ErrNotFound { return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) } - - currIT, err = store.GetCurrentStakerIterator() // check version 2 + err = checkCurrentStakersContent(store, []Staker{}) if err != nil { - return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) - } - if currIT.Next() { - return fmt.Sprintf("expected empty iterator, got at least element %v", currIT.Value()) + return err.Error() } - currIT.Release() // insert the staker and show it can be found store.PutCurrentValidator(&s) @@ -105,18 +95,10 @@ func simpleStakerStateProperties(storeCreatorF func() (Stakers, error)) *gopter. if !reflect.DeepEqual(&s, retrievedStaker) { return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &s, retrievedStaker) } - - currIT, err = store.GetCurrentStakerIterator() // check version 2 + err = checkCurrentStakersContent(store, []Staker{s}) if err != nil { - return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) - } - if !currIT.Next() { - return errNonEmptyIteratorExpected.Error() + return err.Error() } - if !reflect.DeepEqual(currIT.Value(), retrievedStaker) { - return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &s, retrievedStaker) - } - currIT.Release() // delete the staker and show it won't be found anymore store.DeleteCurrentValidator(&s) @@ -124,15 +106,10 @@ func simpleStakerStateProperties(storeCreatorF func() (Stakers, error)) *gopter. if err != database.ErrNotFound { return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) } - - currIT, err = store.GetCurrentStakerIterator() // check version 2 + err = checkCurrentStakersContent(store, []Staker{}) if err != nil { - return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) - } - if currIT.Next() { - return fmt.Sprintf("expected empty iterator, got at least element %v", currIT.Value()) + return err.Error() } - currIT.Release() return "" }, @@ -240,18 +217,10 @@ func simpleStakerStateProperties(storeCreatorF func() (Stakers, error)) *gopter. if !reflect.DeepEqual(&val, retrievedValidator) { return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &val, retrievedValidator) } - - valIt, err := store.GetCurrentStakerIterator() // check version 2 + err = checkCurrentStakersContent(store, []Staker{val}) if err != nil { - return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) + return err.Error() } - if !valIt.Next() { - return errNonEmptyIteratorExpected.Error() - } - if !reflect.DeepEqual(valIt.Value(), retrievedValidator) { - return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &val, retrievedValidator) - } - valIt.Release() // store delegators for _, del := range dels { @@ -304,23 +273,11 @@ func simpleStakerStateProperties(storeCreatorF func() (Stakers, error)) *gopter. delIt.Release() // check no missing delegators in the whole staker set - for _, del := range dels { - found := false - fullDelIt, err := store.GetCurrentStakerIterator() - if err != nil { - return fmt.Sprintf("unexpected failure in current delegators iterator creation, error %v", err) - } - for fullDelIt.Next() { - if reflect.DeepEqual(*fullDelIt.Value(), del) { - found = true - break - } - } - fullDelIt.Release() - - if !found { - return fmt.Sprintf("missing delegator %v", del) - } + stakersSet := dels + stakersSet = append(stakersSet, val) + err = checkCurrentStakersContent(store, stakersSet) + if err != nil { + return err.Error() } // delete delegators @@ -380,17 +337,10 @@ func simpleStakerStateProperties(storeCreatorF func() (Stakers, error)) *gopter. return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &val, retrievedValidator) } - valIt, err := store.GetCurrentStakerIterator() // check version 2 + err = checkCurrentStakersContent(store, []Staker{val}) if err != nil { - return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) + return err.Error() } - if !valIt.Next() { - return errNonEmptyIteratorExpected.Error() - } - if !reflect.DeepEqual(valIt.Value(), retrievedValidator) { - return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &val, retrievedValidator) - } - valIt.Release() // store delegators for _, del := range dels { @@ -504,3 +454,46 @@ func simpleStakerStateProperties(storeCreatorF func() (Stakers, error)) *gopter. return properties } + +// verify wheter store contains exactly the stakers specify in the list. +// stakers order does not matter. Also stakers get consumes while checking +func checkCurrentStakersContent(store Stakers, stakers []Staker) error { + currIT, err := store.GetCurrentStakerIterator() + if err != nil { + return fmt.Errorf("unexpected failure in staker iterator creation, error %v", err) + } + defer currIT.Release() + + if len(stakers) == 0 { + if currIT.Next() { + return fmt.Errorf("expected empty iterator, got at least element %v", currIT.Value()) + } + return nil + } + + for currIT.Next() { + var ( + staker = currIT.Value() + found = false + + retrievedStakerIdx = 0 + ) + + for idx, s := range stakers { + if reflect.DeepEqual(staker, &s) { + retrievedStakerIdx = idx + found = true + } + } + if !found { + return fmt.Errorf("found extra staker %v", staker) + } + stakers[retrievedStakerIdx] = stakers[len(stakers)-1] // order does not matter + stakers = stakers[:len(stakers)-1] + } + + if len(stakers) != 0 { + return fmt.Errorf("missing stakers") + } + return nil +} From 7ef586d036d7a14be095e553edc5059f6a8173c3 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Wed, 24 May 2023 10:42:28 +0200 Subject: [PATCH 035/132] nits --- vms/platformvm/state/stakers_helpers_test.go | 6 +- vms/platformvm/state/stakers_ops_test.go | 126 ++++++++----------- 2 files changed, 55 insertions(+), 77 deletions(-) diff --git a/vms/platformvm/state/stakers_helpers_test.go b/vms/platformvm/state/stakers_helpers_test.go index 3218cab680cc..90321c52c609 100644 --- a/vms/platformvm/state/stakers_helpers_test.go +++ b/vms/platformvm/state/stakers_helpers_test.go @@ -4,7 +4,6 @@ package state import ( - "errors" "fmt" "time" @@ -42,9 +41,12 @@ var ( defaultValidateEndTime = defaultValidateStartTime.Add(10 * defaultMinStakingDuration) defaultTxFee = uint64(100) - errNonEmptyIteratorExpected = errors.New("expected non-empty iterator, got no elements") + pending stakerStatus = 0 + current stakerStatus = 1 ) +type stakerStatus int + type versionsHolder struct { baseState State } diff --git a/vms/platformvm/state/stakers_ops_test.go b/vms/platformvm/state/stakers_ops_test.go index 97f09c78289f..78d23f9d4e0f 100644 --- a/vms/platformvm/state/stakers_ops_test.go +++ b/vms/platformvm/state/stakers_ops_test.go @@ -4,6 +4,7 @@ package state import ( + "errors" "fmt" "math" "reflect" @@ -66,47 +67,47 @@ func simpleStakerStateProperties(storeCreatorF func() (Stakers, error)) *gopter. } // no staker before insertion - _, err = store.GetCurrentValidator(s.SubnetID, s.NodeID) // check version 1 + _, err = store.GetCurrentValidator(s.SubnetID, s.NodeID) if err != database.ErrNotFound { return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) } - err = checkCurrentStakersContent(store, []Staker{}) + err = checkStakersContent(store, []Staker{}, current) if err != nil { return err.Error() } // it's fine deleting unknown validator store.DeleteCurrentValidator(&s) - _, err = store.GetCurrentValidator(s.SubnetID, s.NodeID) // check version 1 + _, err = store.GetCurrentValidator(s.SubnetID, s.NodeID) if err != database.ErrNotFound { return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) } - err = checkCurrentStakersContent(store, []Staker{}) + err = checkStakersContent(store, []Staker{}, current) if err != nil { return err.Error() } // insert the staker and show it can be found store.PutCurrentValidator(&s) - retrievedStaker, err := store.GetCurrentValidator(s.SubnetID, s.NodeID) // check version 1 + retrievedStaker, err := store.GetCurrentValidator(s.SubnetID, s.NodeID) if err != nil { return fmt.Sprintf("expected no error, got %v", err) } if !reflect.DeepEqual(&s, retrievedStaker) { return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &s, retrievedStaker) } - err = checkCurrentStakersContent(store, []Staker{s}) + err = checkStakersContent(store, []Staker{s}, current) if err != nil { return err.Error() } // delete the staker and show it won't be found anymore store.DeleteCurrentValidator(&s) - _, err = store.GetCurrentValidator(s.SubnetID, s.NodeID) // check version 1 + _, err = store.GetCurrentValidator(s.SubnetID, s.NodeID) if err != database.ErrNotFound { return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) } - err = checkCurrentStakersContent(store, []Staker{}) + err = checkStakersContent(store, []Staker{}, current) if err != nil { return err.Error() } @@ -124,73 +125,50 @@ func simpleStakerStateProperties(storeCreatorF func() (Stakers, error)) *gopter. } // no staker before insertion - _, err = store.GetPendingValidator(s.SubnetID, s.NodeID) // check version 1 + _, err = store.GetPendingValidator(s.SubnetID, s.NodeID) if err != database.ErrNotFound { return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) } - - pendIt, err := store.GetPendingStakerIterator() // check version 2 + err = checkStakersContent(store, []Staker{}, pending) if err != nil { - return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) - } - if pendIt.Next() { - return fmt.Sprintf("expected empty iterator, got at least element %v", pendIt.Value()) + return err.Error() } - pendIt.Release() // it's fine deleting unknown validator store.DeletePendingValidator(&s) - _, err = store.GetPendingValidator(s.SubnetID, s.NodeID) // check version 1 + _, err = store.GetPendingValidator(s.SubnetID, s.NodeID) if err != database.ErrNotFound { return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) } - - pendIt, err = store.GetPendingStakerIterator() // check version 2 + err = checkStakersContent(store, []Staker{}, pending) if err != nil { - return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) - } - if pendIt.Next() { - return fmt.Sprintf("expected empty iterator, got at least element %v", pendIt.Value()) + return err.Error() } - pendIt.Release() // insert the staker and show it can be found store.PutPendingValidator(&s) - retrievedStaker, err := store.GetPendingValidator(s.SubnetID, s.NodeID) // check version 1 + retrievedStaker, err := store.GetPendingValidator(s.SubnetID, s.NodeID) if err != nil { return fmt.Sprintf("expected no error, got %v", err) } if !reflect.DeepEqual(&s, retrievedStaker) { return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &s, retrievedStaker) } - - pendIt, err = store.GetPendingStakerIterator() // check version 2 + err = checkStakersContent(store, []Staker{s}, pending) if err != nil { - return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) - } - if !pendIt.Next() { - return errNonEmptyIteratorExpected.Error() - } - if !reflect.DeepEqual(pendIt.Value(), retrievedStaker) { - return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &s, retrievedStaker) + return err.Error() } - pendIt.Release() // delete the staker and show it won't be found anymore store.DeletePendingValidator(&s) - _, err = store.GetPendingValidator(s.SubnetID, s.NodeID) // check version 1 + _, err = store.GetPendingValidator(s.SubnetID, s.NodeID) if err != database.ErrNotFound { return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) } - - pendIt, err = store.GetPendingStakerIterator() // check version 2 + err = checkStakersContent(store, []Staker{}, pending) if err != nil { - return fmt.Sprintf("unexpected failure in staker iterator creation, error %v", err) - } - if pendIt.Next() { - return fmt.Sprintf("expected empty iterator, got at least element %v", pendIt.Value()) + return err.Error() } - pendIt.Release() return "" }, @@ -210,14 +188,14 @@ func simpleStakerStateProperties(storeCreatorF func() (Stakers, error)) *gopter. // store validator store.PutCurrentValidator(&val) - retrievedValidator, err := store.GetCurrentValidator(val.SubnetID, val.NodeID) // check version 1 + retrievedValidator, err := store.GetCurrentValidator(val.SubnetID, val.NodeID) if err != nil { return fmt.Sprintf("expected no error, got %v", err) } if !reflect.DeepEqual(&val, retrievedValidator) { return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &val, retrievedValidator) } - err = checkCurrentStakersContent(store, []Staker{val}) + err = checkStakersContent(store, []Staker{val}, current) if err != nil { return err.Error() } @@ -275,7 +253,7 @@ func simpleStakerStateProperties(storeCreatorF func() (Stakers, error)) *gopter. // check no missing delegators in the whole staker set stakersSet := dels stakersSet = append(stakersSet, val) - err = checkCurrentStakersContent(store, stakersSet) + err = checkStakersContent(store, stakersSet, current) if err != nil { return err.Error() } @@ -329,7 +307,7 @@ func simpleStakerStateProperties(storeCreatorF func() (Stakers, error)) *gopter. // store validator store.PutCurrentValidator(&val) - retrievedValidator, err := store.GetCurrentValidator(val.SubnetID, val.NodeID) // check version 1 + retrievedValidator, err := store.GetCurrentValidator(val.SubnetID, val.NodeID) if err != nil { return fmt.Sprintf("expected no error, got %v", err) } @@ -337,7 +315,7 @@ func simpleStakerStateProperties(storeCreatorF func() (Stakers, error)) *gopter. return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &val, retrievedValidator) } - err = checkCurrentStakersContent(store, []Staker{val}) + err = checkStakersContent(store, []Staker{val}, current) if err != nil { return err.Error() } @@ -393,23 +371,9 @@ func simpleStakerStateProperties(storeCreatorF func() (Stakers, error)) *gopter. delIt.Release() // check no missing delegators in the whole staker set - for _, del := range dels { - found := false - fullDelIt, err := store.GetPendingStakerIterator() - if err != nil { - return fmt.Sprintf("unexpected failure in current delegators iterator creation, error %v", err) - } - for fullDelIt.Next() { - if reflect.DeepEqual(*fullDelIt.Value(), del) { - found = true - break - } - } - fullDelIt.Release() - - if !found { - return fmt.Sprintf("missing delegator %v", del) - } + err = checkStakersContent(store, dels, pending) + if err != nil { + return err.Error() } // delete delegators @@ -455,32 +419,44 @@ func simpleStakerStateProperties(storeCreatorF func() (Stakers, error)) *gopter. return properties } -// verify wheter store contains exactly the stakers specify in the list. +// verify whether store contains exactly the stakers specify in the list. // stakers order does not matter. Also stakers get consumes while checking -func checkCurrentStakersContent(store Stakers, stakers []Staker) error { - currIT, err := store.GetCurrentStakerIterator() +func checkStakersContent(store Stakers, stakers []Staker, stakersType stakerStatus) error { + var ( + it StakerIterator + err error + ) + + switch stakersType { + case current: + it, err = store.GetCurrentStakerIterator() + case pending: + it, err = store.GetPendingStakerIterator() + default: + return errors.New("Unhandled stakers status") + } if err != nil { return fmt.Errorf("unexpected failure in staker iterator creation, error %v", err) } - defer currIT.Release() + defer it.Release() if len(stakers) == 0 { - if currIT.Next() { - return fmt.Errorf("expected empty iterator, got at least element %v", currIT.Value()) + if it.Next() { + return fmt.Errorf("expected empty iterator, got at least element %v", it.Value()) } return nil } - for currIT.Next() { + for it.Next() { var ( - staker = currIT.Value() + staker = it.Value() found = false retrievedStakerIdx = 0 ) for idx, s := range stakers { - if reflect.DeepEqual(staker, &s) { + if reflect.DeepEqual(*staker, s) { retrievedStakerIdx = idx found = true } @@ -493,7 +469,7 @@ func checkCurrentStakersContent(store Stakers, stakers []Staker) error { } if len(stakers) != 0 { - return fmt.Errorf("missing stakers") + return errors.New("missing stakers") } return nil } From 33b7dd9433b715b20ec5017c0ee8b68f2db8cbf9 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Thu, 1 Jun 2023 17:52:08 +0200 Subject: [PATCH 036/132] UT nit --- vms/platformvm/state/stakers_model_storage_test.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/vms/platformvm/state/stakers_model_storage_test.go b/vms/platformvm/state/stakers_model_storage_test.go index 7c238273a8ee..03adf5d1380a 100644 --- a/vms/platformvm/state/stakers_model_storage_test.go +++ b/vms/platformvm/state/stakers_model_storage_test.go @@ -251,6 +251,7 @@ func (*deleteCurrentValidatorCommand) Run(sut commands.SystemUnderTest) commands validator.Priority == txs.SubnetPermissionlessValidatorCurrentPriority || validator.Priority == txs.PrimaryNetworkValidatorCurrentPriority { found = true + break } } if !found { @@ -280,6 +281,7 @@ func (*deleteCurrentValidatorCommand) NextState(cmdState commands.State) command validator.Priority == txs.SubnetPermissionlessValidatorCurrentPriority || validator.Priority == txs.PrimaryNetworkValidatorCurrentPriority { found = true + break } } if !found { @@ -352,6 +354,7 @@ func addCurrentDelegatorInSystem(sys *sysUnderTest, candidateDelegator *Staker) validator.Priority == txs.SubnetPermissionlessValidatorCurrentPriority || validator.Priority == txs.PrimaryNetworkValidatorCurrentPriority { found = true + break } } if !found { @@ -400,6 +403,7 @@ func addCurrentDelegatorInModel(model *stakersStorageModel, candidateDelegator * validator.Priority == txs.SubnetPermissionlessValidatorCurrentPriority || validator.Priority == txs.PrimaryNetworkValidatorCurrentPriority { found = true + break } } if !found { @@ -466,6 +470,7 @@ func (*deleteCurrentDelegatorCommand) Run(sut commands.SystemUnderTest) commands if delegator.Priority == txs.SubnetPermissionlessDelegatorCurrentPriority || delegator.Priority == txs.PrimaryNetworkDelegatorCurrentPriority { found = true + break } } if !found { @@ -494,6 +499,7 @@ func (*deleteCurrentDelegatorCommand) NextState(cmdState commands.State) command if delegator.Priority == txs.SubnetPermissionlessDelegatorCurrentPriority || delegator.Priority == txs.PrimaryNetworkDelegatorCurrentPriority { found = true + break } } if !found { From c5941a8b98802b7c4c446fa2a9a72b6b0889350a Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Wed, 14 Jun 2023 12:09:35 +0200 Subject: [PATCH 037/132] appease linter --- vms/platformvm/state/stakers_helpers_test.go | 3 ++- vms/platformvm/state/stakers_model_generator.go | 4 ++-- vms/platformvm/state/stakers_model_generator_test.go | 5 +++-- vms/platformvm/state/stakers_model_storage_test.go | 5 +++-- vms/platformvm/state/stakers_ops_test.go | 7 ++++--- 5 files changed, 14 insertions(+), 10 deletions(-) diff --git a/vms/platformvm/state/stakers_helpers_test.go b/vms/platformvm/state/stakers_helpers_test.go index 90321c52c609..39d276041907 100644 --- a/vms/platformvm/state/stakers_helpers_test.go +++ b/vms/platformvm/state/stakers_helpers_test.go @@ -7,6 +7,8 @@ import ( "fmt" "time" + "github.com/prometheus/client_golang/prometheus" + "github.com/ava-labs/avalanchego/chains" "github.com/ava-labs/avalanchego/database/manager" "github.com/ava-labs/avalanchego/database/versiondb" @@ -24,7 +26,6 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/metrics" "github.com/ava-labs/avalanchego/vms/platformvm/reward" - "github.com/prometheus/client_golang/prometheus" ) var ( diff --git a/vms/platformvm/state/stakers_model_generator.go b/vms/platformvm/state/stakers_model_generator.go index 9c1a410b0ef1..84f029d0292f 100644 --- a/vms/platformvm/state/stakers_model_generator.go +++ b/vms/platformvm/state/stakers_model_generator.go @@ -7,13 +7,13 @@ import ( "reflect" "time" + "github.com/leanovate/gopter" + "github.com/leanovate/gopter/gen" blst "github.com/supranational/blst/bindings/go" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/leanovate/gopter" - "github.com/leanovate/gopter/gen" ) type generatorPriorityType uint8 diff --git a/vms/platformvm/state/stakers_model_generator_test.go b/vms/platformvm/state/stakers_model_generator_test.go index 6b9661d6c070..7b5af90bfbc7 100644 --- a/vms/platformvm/state/stakers_model_generator_test.go +++ b/vms/platformvm/state/stakers_model_generator_test.go @@ -8,10 +8,11 @@ import ( "math" "testing" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/leanovate/gopter" "github.com/leanovate/gopter/prop" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) // TestGeneratedStakersValidity tests the staker generator itself. diff --git a/vms/platformvm/state/stakers_model_storage_test.go b/vms/platformvm/state/stakers_model_storage_test.go index 03adf5d1380a..47c2dd15400f 100644 --- a/vms/platformvm/state/stakers_model_storage_test.go +++ b/vms/platformvm/state/stakers_model_storage_test.go @@ -10,11 +10,12 @@ import ( "sync/atomic" "testing" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/leanovate/gopter" "github.com/leanovate/gopter/commands" "github.com/leanovate/gopter/gen" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) var ( diff --git a/vms/platformvm/state/stakers_ops_test.go b/vms/platformvm/state/stakers_ops_test.go index 78d23f9d4e0f..d918ff947b25 100644 --- a/vms/platformvm/state/stakers_ops_test.go +++ b/vms/platformvm/state/stakers_ops_test.go @@ -10,12 +10,13 @@ import ( "reflect" "testing" - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/set" "github.com/leanovate/gopter" "github.com/leanovate/gopter/gen" "github.com/leanovate/gopter/prop" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" ) // TestSimpleStakersOperations checks that State and Diff conform our stakersStorageModel. From 59de7f15d12868d2a85e9d350dc5bc55b9c621ba Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Mon, 19 Jun 2023 14:50:55 +0200 Subject: [PATCH 038/132] nit --- .../state/{stakers_ops_test.go => stakers_properties_test.go} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename vms/platformvm/state/{stakers_ops_test.go => stakers_properties_test.go} (100%) diff --git a/vms/platformvm/state/stakers_ops_test.go b/vms/platformvm/state/stakers_properties_test.go similarity index 100% rename from vms/platformvm/state/stakers_ops_test.go rename to vms/platformvm/state/stakers_properties_test.go From 73a8117d364ee0827ce850a966ccf1146c5f2100 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Mon, 19 Jun 2023 15:26:38 +0200 Subject: [PATCH 039/132] nit --- .../state/stakers_model_storage_test.go | 96 +++++++------------ 1 file changed, 32 insertions(+), 64 deletions(-) diff --git a/vms/platformvm/state/stakers_model_storage_test.go b/vms/platformvm/state/stakers_model_storage_test.go index 47c2dd15400f..80a4f7ff394f 100644 --- a/vms/platformvm/state/stakers_model_storage_test.go +++ b/vms/platformvm/state/stakers_model_storage_test.go @@ -147,6 +147,9 @@ var stakersCommands = &commands.ProtoCommands{ baseState.PutPendingDelegator(staker) } } + if err := baseState.Commit(); err != nil { + panic(err) + } return newSysUnderTest(baseState) }, @@ -158,8 +161,8 @@ var stakersCommands = &commands.ProtoCommands{ panic(err) } }, - // Note: using gen.Const(newStakersStorageModel()) would not recreated model - // among calls. Hence just use a dummy generated with sole purpose of recreating model + // a trick to force command regeneration at each sampling. + // gen.Const would not allow it InitialStateGen: gen.IntRange(1, 2).Map( func(int) *stakersStorageModel { return newStakersStorageModel() @@ -207,14 +210,7 @@ func (*putCurrentValidatorCommand) PreCondition(commands.State) bool { } func (*putCurrentValidatorCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { - model := cmdState.(*stakersStorageModel) - sys := res.(*sysUnderTest) - - if checkSystemAndModelContent(model, sys) { - return &gopter.PropResult{Status: gopter.PropTrue} - } - - return &gopter.PropResult{Status: gopter.PropFalse} + return checkSystemAndModelContent(cmdState, res) } func (v *putCurrentValidatorCommand) String() string { @@ -296,24 +292,20 @@ func (*deleteCurrentValidatorCommand) NextState(cmdState commands.State) command } func (*deleteCurrentValidatorCommand) PreCondition(commands.State) bool { + // We allow deleting an un-existing validator return true } func (*deleteCurrentValidatorCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { - model := cmdState.(*stakersStorageModel) - sys := res.(*sysUnderTest) - - if checkSystemAndModelContent(model, sys) { - return &gopter.PropResult{Status: gopter.PropTrue} - } - - return &gopter.PropResult{Status: gopter.PropFalse} + return checkSystemAndModelContent(cmdState, res) } func (*deleteCurrentValidatorCommand) String() string { return "DeleteCurrentValidator" } +// a trick to force command regeneration at each sampling. +// gen.Const would not allow it var genDeleteCurrentValidatorCommand = gen.IntRange(1, 2).Map( func(int) commands.Command { return &deleteCurrentValidatorCommand{} @@ -427,14 +419,7 @@ func (*putCurrentDelegatorCommand) PreCondition(commands.State) bool { } func (*putCurrentDelegatorCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { - model := cmdState.(*stakersStorageModel) - sys := res.(*sysUnderTest) - - if checkSystemAndModelContent(model, sys) { - return &gopter.PropResult{Status: gopter.PropTrue} - } - - return &gopter.PropResult{Status: gopter.PropFalse} + return checkSystemAndModelContent(cmdState, res) } func (v *putCurrentDelegatorCommand) String() string { @@ -518,20 +503,15 @@ func (*deleteCurrentDelegatorCommand) PreCondition(commands.State) bool { } func (*deleteCurrentDelegatorCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { - model := cmdState.(*stakersStorageModel) - sys := res.(*sysUnderTest) - - if checkSystemAndModelContent(model, sys) { - return &gopter.PropResult{Status: gopter.PropTrue} - } - - return &gopter.PropResult{Status: gopter.PropFalse} + return checkSystemAndModelContent(cmdState, res) } func (*deleteCurrentDelegatorCommand) String() string { return "DeleteCurrentDelegator" } +// a trick to force command regeneration at each sampling. +// gen.Const would not allow it var genDeleteCurrentDelegatorCommand = gen.IntRange(1, 2).Map( func(int) commands.Command { return &deleteCurrentDelegatorCommand{} @@ -556,20 +536,15 @@ func (*addTopDiffCommand) PreCondition(commands.State) bool { } func (*addTopDiffCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { - model := cmdState.(*stakersStorageModel) - sys := res.(*sysUnderTest) - - if checkSystemAndModelContent(model, sys) { - return &gopter.PropResult{Status: gopter.PropTrue} - } - - return &gopter.PropResult{Status: gopter.PropFalse} + return checkSystemAndModelContent(cmdState, res) } func (*addTopDiffCommand) String() string { return "AddTopDiffCommand" } +// a trick to force command regeneration at each sampling. +// gen.Const would not allow it var genAddTopDiffCommand = gen.IntRange(1, 2).Map( func(int) commands.Command { return &addTopDiffCommand{} @@ -594,20 +569,15 @@ func (*applyBottomDiffCommand) PreCondition(commands.State) bool { } func (*applyBottomDiffCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { - model := cmdState.(*stakersStorageModel) - sys := res.(*sysUnderTest) - - if checkSystemAndModelContent(model, sys) { - return &gopter.PropResult{Status: gopter.PropTrue} - } - - return &gopter.PropResult{Status: gopter.PropFalse} + return checkSystemAndModelContent(cmdState, res) } func (*applyBottomDiffCommand) String() string { return "ApplyBottomDiffCommand" } +// a trick to force command regeneration at each sampling. +// gen.Const would not allow it var genApplyBottomDiffCommand = gen.IntRange(1, 2).Map( func(int) commands.Command { return &applyBottomDiffCommand{} @@ -635,37 +605,35 @@ func (*commitBottomStateCommand) PreCondition(commands.State) bool { } func (*commitBottomStateCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { - model := cmdState.(*stakersStorageModel) - sys := res.(*sysUnderTest) - - if checkSystemAndModelContent(model, sys) { - return &gopter.PropResult{Status: gopter.PropTrue} - } - - return &gopter.PropResult{Status: gopter.PropFalse} + return checkSystemAndModelContent(cmdState, res) } func (*commitBottomStateCommand) String() string { return "CommitBottomStateCommand" } +// a trick to force command regeneration at each sampling. +// gen.Const would not allow it var genCommitBottomStateCommand = gen.IntRange(1, 2).Map( func(int) commands.Command { return &commitBottomStateCommand{} }, ) -func checkSystemAndModelContent(model *stakersStorageModel, sys *sysUnderTest) bool { +func checkSystemAndModelContent(cmdState commands.State, res commands.Result) *gopter.PropResult { + model := cmdState.(*stakersStorageModel) + sys := res.(*sysUnderTest) + // top view content must always match model content topDiff := sys.getTopChainState() modelIt, err := model.GetCurrentStakerIterator() if err != nil { - return false + return &gopter.PropResult{Status: gopter.PropFalse} } sysIt, err := topDiff.GetCurrentStakerIterator() if err != nil { - return false + return &gopter.PropResult{Status: gopter.PropFalse} } modelStakers := make([]*Staker, 0) @@ -681,15 +649,15 @@ func checkSystemAndModelContent(model *stakersStorageModel, sys *sysUnderTest) b sysIt.Release() if len(modelStakers) != len(sysStakers) { - return false + return &gopter.PropResult{Status: gopter.PropFalse} } for idx, modelStaker := range modelStakers { sysStaker := sysStakers[idx] if modelStaker == nil || sysStaker == nil || !reflect.DeepEqual(modelStaker, sysStaker) { - return false + return &gopter.PropResult{Status: gopter.PropFalse} } } - return true + return &gopter.PropResult{Status: gopter.PropTrue} } From 5fbeab98a231aa5713d7287495a7fa9a000332a5 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Mon, 19 Jun 2023 18:44:40 +0200 Subject: [PATCH 040/132] nits --- vms/platformvm/blocks/executor/acceptor.go | 20 +++++-- vms/platformvm/state/stakers_helpers_test.go | 3 +- .../state/stakers_model_storage_test.go | 2 +- .../state/stakers_properties_test.go | 53 ++++++++++--------- 4 files changed, 49 insertions(+), 29 deletions(-) diff --git a/vms/platformvm/blocks/executor/acceptor.go b/vms/platformvm/blocks/executor/acceptor.go index 54adc602545e..af0783877dd0 100644 --- a/vms/platformvm/blocks/executor/acceptor.go +++ b/vms/platformvm/blocks/executor/acceptor.go @@ -154,7 +154,11 @@ func (a *acceptor) ApricotAtomicBlock(b *blocks.ApricotAtomicBlock) error { // Update the state to reflect the changes made in [onAcceptState]. if err := blkState.onAcceptState.Apply(a.state); err != nil { - return err + return fmt.Errorf( + "failed to apply accept state for block %s: %w", + blkID, + err, + ) } defer a.state.Abort() @@ -239,9 +243,15 @@ func (a *acceptor) optionBlock(b, parent blocks.Block) error { if !ok { return fmt.Errorf("%w %s", errMissingBlockState, blkID) } + if err := blkState.onAcceptState.Apply(a.state); err != nil { - return err + return fmt.Errorf( + "failed to apply accept state for block %s: %w", + blkID, + err, + ) } + return a.state.Commit() } @@ -280,7 +290,11 @@ func (a *acceptor) standardBlock(b blocks.Block) error { // Update the state to reflect the changes made in [onAcceptState]. if err := blkState.onAcceptState.Apply(a.state); err != nil { - return err + return fmt.Errorf( + "failed to apply accept state for block %s: %w", + blkID, + err, + ) } defer a.state.Abort() diff --git a/vms/platformvm/state/stakers_helpers_test.go b/vms/platformvm/state/stakers_helpers_test.go index 39d276041907..311fbcbafbf9 100644 --- a/vms/platformvm/state/stakers_helpers_test.go +++ b/vms/platformvm/state/stakers_helpers_test.go @@ -56,11 +56,12 @@ func (h *versionsHolder) GetState(blkID ids.ID) (Chain, bool) { return h.baseState, blkID == h.baseState.GetLastAccepted() } -func buildChainState() (State, error) { +func buildChainState(trackedSubnets []ids.ID) (State, error) { baseDBManager := manager.NewMemDB(version.Semantic1_0_0) baseDB := versiondb.New(baseDBManager.Current().Database) cfg := defaultConfig() + cfg.TrackedSubnets.Add(trackedSubnets...) ctx := snow.DefaultContextTest() ctx.NetworkID = constants.UnitTestID diff --git a/vms/platformvm/state/stakers_model_storage_test.go b/vms/platformvm/state/stakers_model_storage_test.go index 80a4f7ff394f..00eb74d595a2 100644 --- a/vms/platformvm/state/stakers_model_storage_test.go +++ b/vms/platformvm/state/stakers_model_storage_test.go @@ -125,7 +125,7 @@ func (s *sysUnderTest) flushBottomDiff() bool { var stakersCommands = &commands.ProtoCommands{ NewSystemUnderTestFunc: func(initialState commands.State) commands.SystemUnderTest { model := initialState.(*stakersStorageModel) - baseState, err := buildChainState() + baseState, err := buildChainState(nil) if err != nil { panic(err) } diff --git a/vms/platformvm/state/stakers_properties_test.go b/vms/platformvm/state/stakers_properties_test.go index d918ff947b25..8a8b4fafe18c 100644 --- a/vms/platformvm/state/stakers_properties_test.go +++ b/vms/platformvm/state/stakers_properties_test.go @@ -19,30 +19,18 @@ import ( "github.com/ava-labs/avalanchego/utils/set" ) -// TestSimpleStakersOperations checks that State and Diff conform our stakersStorageModel. -// TestSimpleStakersOperations tests State and Diff in isolation, over simple operations. +// TestGeneralStakerContainersProperties checks that State and Diff conform our stakersStorageModel. +// TestGeneralStakerContainersProperties tests State and Diff in isolation, over simple operations. // TestStateAndDiffComparisonToStorageModel carries a more involved verification over a production-like // mix of State and Diffs. -func TestSimpleStakersOperations(t *testing.T) { +func TestGeneralStakerContainersProperties(t *testing.T) { storeCreators := map[string]func() (Stakers, error){ "base state": func() (Stakers, error) { - return buildChainState() + return buildChainState(nil) }, "diff": func() (Stakers, error) { - baseState, err := buildChainState() - if err != nil { - return nil, fmt.Errorf("unexpected error while creating chain base state, err %v", err) - } - - genesisID := baseState.GetLastAccepted() - versions := &versionsHolder{ - baseState: baseState, - } - store, err := NewDiff(genesisID, versions) - if err != nil { - return nil, fmt.Errorf("unexpected error while creating diff, err %v", err) - } - return store, nil + diff, _, err := buildDiffOnTopOfBaseState(nil) + return diff, err }, "storage model": func() (Stakers, error) { //nolint:golint,unparam return newStakersStorageModel(), nil @@ -51,13 +39,13 @@ func TestSimpleStakersOperations(t *testing.T) { for storeType, storeCreatorF := range storeCreators { t.Run(storeType, func(t *testing.T) { - properties := simpleStakerStateProperties(storeCreatorF) + properties := generalStakerContainersProperties(storeCreatorF) properties.TestingRun(t) }) } } -func simpleStakerStateProperties(storeCreatorF func() (Stakers, error)) *gopter.Properties { +func generalStakerContainersProperties(storeCreatorF func() (Stakers, error)) *gopter.Properties { properties := gopter.NewProperties(nil) properties.Property("add, delete and query current validators", prop.ForAll( @@ -102,7 +90,7 @@ func simpleStakerStateProperties(storeCreatorF func() (Stakers, error)) *gopter. return err.Error() } - // delete the staker and show it won't be found anymore + // delete the staker and show it's not found anymore store.DeleteCurrentValidator(&s) _, err = store.GetCurrentValidator(s.SubnetID, s.NodeID) if err != database.ErrNotFound { @@ -160,7 +148,7 @@ func simpleStakerStateProperties(storeCreatorF func() (Stakers, error)) *gopter. return err.Error() } - // delete the staker and show it won't be found anymore + // delete the staker and show it's found anymore store.DeletePendingValidator(&s) _, err = store.GetPendingValidator(s.SubnetID, s.NodeID) if err != database.ErrNotFound { @@ -420,8 +408,25 @@ func simpleStakerStateProperties(storeCreatorF func() (Stakers, error)) *gopter. return properties } -// verify whether store contains exactly the stakers specify in the list. -// stakers order does not matter. Also stakers get consumes while checking +func buildDiffOnTopOfBaseState(trackedSubnets []ids.ID) (Diff, State, error) { + baseState, err := buildChainState(trackedSubnets) + if err != nil { + return nil, nil, fmt.Errorf("unexpected error while creating chain base state, err %v", err) + } + + genesisID := baseState.GetLastAccepted() + versions := &versionsHolder{ + baseState: baseState, + } + diff, err := NewDiff(genesisID, versions) + if err != nil { + return nil, nil, fmt.Errorf("unexpected error while creating diff, err %v", err) + } + return diff, baseState, nil +} + +// [checkStakersContent] verifies whether store contains exactly the stakers specified in the list. +// stakers order does not matter. stakers slice gets consumed while checking. func checkStakersContent(store Stakers, stakers []Staker, stakersType stakerStatus) error { var ( it StakerIterator From 628b5c6f1e2d4d8565773369f6b84e75803da172 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Tue, 20 Jun 2023 10:20:31 +0200 Subject: [PATCH 041/132] wip: restructuring stakers generators --- vms/platformvm/state/stakers_helpers_test.go | 16 +- .../state/stakers_model_generator.go | 212 --------- .../stakers_model_generator_check_test.go | 223 ++++++++++ .../state/stakers_model_generator_test.go | 410 +++++++++++++++--- .../state/stakers_model_storage_test.go | 106 +++-- .../state/stakers_properties_test.go | 232 ++++++---- 6 files changed, 818 insertions(+), 381 deletions(-) delete mode 100644 vms/platformvm/state/stakers_model_generator.go create mode 100644 vms/platformvm/state/stakers_model_generator_check_test.go diff --git a/vms/platformvm/state/stakers_helpers_test.go b/vms/platformvm/state/stakers_helpers_test.go index 311fbcbafbf9..0c268c88b7fd 100644 --- a/vms/platformvm/state/stakers_helpers_test.go +++ b/vms/platformvm/state/stakers_helpers_test.go @@ -56,6 +56,16 @@ func (h *versionsHolder) GetState(blkID ids.ID) (Chain, bool) { return h.baseState, blkID == h.baseState.GetLastAccepted() } +func buildStateCtx() *snow.Context { + ctx := snow.DefaultContextTest() + ctx.NetworkID = constants.UnitTestID + ctx.XChainID = xChainID + ctx.CChainID = cChainID + ctx.AVAXAssetID = avaxAssetID + + return ctx +} + func buildChainState(trackedSubnets []ids.ID) (State, error) { baseDBManager := manager.NewMemDB(version.Semantic1_0_0) baseDB := versiondb.New(baseDBManager.Current().Database) @@ -63,11 +73,7 @@ func buildChainState(trackedSubnets []ids.ID) (State, error) { cfg := defaultConfig() cfg.TrackedSubnets.Add(trackedSubnets...) - ctx := snow.DefaultContextTest() - ctx.NetworkID = constants.UnitTestID - ctx.XChainID = xChainID - ctx.CChainID = cChainID - ctx.AVAXAssetID = avaxAssetID + ctx := buildStateCtx() genesisBytes, err := buildGenesisTest(ctx) if err != nil { diff --git a/vms/platformvm/state/stakers_model_generator.go b/vms/platformvm/state/stakers_model_generator.go deleted file mode 100644 index 84f029d0292f..000000000000 --- a/vms/platformvm/state/stakers_model_generator.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package state - -import ( - "reflect" - "time" - - "github.com/leanovate/gopter" - "github.com/leanovate/gopter/gen" - blst "github.com/supranational/blst/bindings/go" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/crypto/bls" - "github.com/ava-labs/avalanchego/vms/platformvm/txs" -) - -type generatorPriorityType uint8 - -const ( - anyPriority generatorPriorityType = iota - currentValidator - currentDelegator - pendingValidator - pendingDelegator -) - -// stakerGenerator helps creating random yet reproducible Staker objects, -// which can be used in our property tests. stakerGenerator takes care of -// enforcing some Staker invariants on each and every random sample. -// TestGeneratedStakersValidity documents and verifies the enforced invariants. -func stakerGenerator( - prio generatorPriorityType, - subnet *ids.ID, - nodeID *ids.NodeID, - maxWeight uint64, // helps avoiding overflows in delegator tests -) gopter.Gen { - return genStakerTimeData(prio).FlatMap( - func(v interface{}) gopter.Gen { - macro := v.(stakerTimeData) - - genStakerSubnetID := genID - genStakerNodeID := genNodeID - if subnet != nil { - genStakerSubnetID = gen.Const(*subnet) - } - if nodeID != nil { - genStakerNodeID = gen.Const(*nodeID) - } - - return gen.Struct(reflect.TypeOf(Staker{}), map[string]gopter.Gen{ - "TxID": genID, - "NodeID": genStakerNodeID, - "PublicKey": genBlsKey, - "SubnetID": genStakerSubnetID, - "Weight": gen.UInt64Range(0, maxWeight), - "StartTime": gen.Const(macro.StartTime), - "EndTime": gen.Const(macro.EndTime), - "PotentialReward": gen.UInt64(), - "NextTime": gen.Const(macro.NextTime), - "Priority": gen.Const(macro.Priority), - }) - }, - reflect.TypeOf(stakerTimeData{}), - ) -} - -// stakerTimeData holds Staker's time related data in order to generate them -// while fulfilling the following constrains: -// 1. EndTime >= StartTime -// 2. NextTime == EndTime for current priorities -// 3. NextTime == StartTime for pending priorities -type stakerTimeData struct { - StartTime time.Time - EndTime time.Time - Priority txs.Priority - NextTime time.Time -} - -func genStakerTimeData(prio generatorPriorityType) gopter.Gen { - return genStakerMicroData(prio).FlatMap( - func(v interface{}) gopter.Gen { - micro := v.(stakerMicroData) - - var ( - startTime = micro.StartTime - endTime = micro.StartTime.Add(time.Duration(micro.Duration * int64(time.Hour))) - priority = micro.Priority - ) - - startTimeGen := gen.Const(startTime) - endTimeGen := gen.Const(endTime) - priorityGen := gen.Const(priority) - var nextTimeGen gopter.Gen - if priority == txs.SubnetPermissionedValidatorCurrentPriority || - priority == txs.SubnetPermissionlessDelegatorCurrentPriority || - priority == txs.SubnetPermissionlessValidatorCurrentPriority || - priority == txs.PrimaryNetworkDelegatorCurrentPriority || - priority == txs.PrimaryNetworkValidatorCurrentPriority { - nextTimeGen = gen.Const(endTime) - } else { - nextTimeGen = gen.Const(startTime) - } - - return gen.Struct(reflect.TypeOf(stakerTimeData{}), map[string]gopter.Gen{ - "StartTime": startTimeGen, - "EndTime": endTimeGen, - "Priority": priorityGen, - "NextTime": nextTimeGen, - }) - }, - reflect.TypeOf(stakerMicroData{}), - ) -} - -// stakerMicroData holds seed attributes to generate stakerMacroData -type stakerMicroData struct { - StartTime time.Time - Duration int64 - Priority txs.Priority -} - -// genStakerMicroData is the helper to generate stakerMicroData -func genStakerMicroData(prio generatorPriorityType) gopter.Gen { - return gen.Struct(reflect.TypeOf(&stakerMicroData{}), map[string]gopter.Gen{ - "StartTime": gen.Time(), - "Duration": gen.Int64Range(1, 365*24), - "Priority": genPriority(prio), - }) -} - -func genPriority(p generatorPriorityType) gopter.Gen { - switch p { - case anyPriority: - return gen.OneConstOf( - txs.PrimaryNetworkDelegatorApricotPendingPriority, - txs.PrimaryNetworkValidatorPendingPriority, - txs.PrimaryNetworkDelegatorBanffPendingPriority, - txs.SubnetPermissionlessValidatorPendingPriority, - txs.SubnetPermissionlessDelegatorPendingPriority, - txs.SubnetPermissionedValidatorPendingPriority, - txs.SubnetPermissionedValidatorCurrentPriority, - txs.SubnetPermissionlessDelegatorCurrentPriority, - txs.SubnetPermissionlessValidatorCurrentPriority, - txs.PrimaryNetworkDelegatorCurrentPriority, - txs.PrimaryNetworkValidatorCurrentPriority, - ) - case currentValidator: - return gen.OneConstOf( - txs.SubnetPermissionedValidatorCurrentPriority, - txs.SubnetPermissionlessValidatorCurrentPriority, - txs.PrimaryNetworkValidatorCurrentPriority, - ) - case currentDelegator: - return gen.OneConstOf( - txs.SubnetPermissionlessDelegatorCurrentPriority, - txs.PrimaryNetworkDelegatorCurrentPriority, - ) - case pendingValidator: - return gen.OneConstOf( - txs.PrimaryNetworkValidatorPendingPriority, - txs.SubnetPermissionlessValidatorPendingPriority, - txs.SubnetPermissionedValidatorPendingPriority, - ) - case pendingDelegator: - return gen.OneConstOf( - txs.PrimaryNetworkDelegatorApricotPendingPriority, - txs.PrimaryNetworkDelegatorBanffPendingPriority, - txs.SubnetPermissionlessDelegatorPendingPriority, - ) - default: - panic("unhandled priority type") - } -} - -var genBlsKey = gen.SliceOfN(lengthID, gen.UInt8()).FlatMap( - func(v interface{}) gopter.Gen { - byteSlice := v.([]byte) - sk := blst.KeyGen(byteSlice) - pk := bls.PublicFromSecretKey(sk) - return gen.Const(pk) - }, - reflect.TypeOf([]byte{}), -) - -const ( - lengthID = 32 - lengthNodeID = 20 -) - -// genID is the helper generator for ids.ID objects -var genID = gen.SliceOfN(lengthID, gen.UInt8()).FlatMap( - func(v interface{}) gopter.Gen { - byteSlice := v.([]byte) - var byteArray [lengthID]byte - copy(byteArray[:], byteSlice) - return gen.Const(ids.ID(byteArray)) - }, - reflect.TypeOf([]byte{}), -) - -// genNodeID is the helper generator for ids.NodeID objects -var genNodeID = gen.SliceOfN(lengthNodeID, gen.UInt8()).FlatMap( - func(v interface{}) gopter.Gen { - byteSlice := v.([]byte) - var byteArray [lengthNodeID]byte - copy(byteArray[:], byteSlice) - return gen.Const(ids.NodeID(byteArray)) - }, - reflect.TypeOf([]byte{}), -) diff --git a/vms/platformvm/state/stakers_model_generator_check_test.go b/vms/platformvm/state/stakers_model_generator_check_test.go new file mode 100644 index 000000000000..f8b6468539ad --- /dev/null +++ b/vms/platformvm/state/stakers_model_generator_check_test.go @@ -0,0 +1,223 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package state + +import ( + "errors" + "fmt" + "testing" + + "github.com/ethereum/go-ethereum/common/math" + "github.com/leanovate/gopter" + "github.com/leanovate/gopter/prop" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/vms/platformvm/signer" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" +) + +var ( + errNotAStakerTx = errors.New("tx is not a stakerTx") + errWrongNodeID = errors.New("unexpected nodeID") +) + +// TestGeneratedStakersValidity tests the staker generator itself. +// It documents and verifies theinvariants enforced by the staker generator. +func TestGeneratedStakersValidity(t *testing.T) { + properties := gopter.NewProperties(nil) + + ctx := buildStateCtx() + subnetID := ids.GenerateTestID() + nodeID := ids.GenerateTestNodeID() + + properties.Property("AddValidatorTx generator checks", prop.ForAll( + func(nonInitTx *txs.Tx) string { + signedTx, err := txs.NewSigned(nonInitTx.Unsigned, txs.Codec, nil) + if err != nil { + panic(fmt.Errorf("failed signing tx, %w", err)) + } + + if err := signedTx.SyntacticVerify(ctx); err != nil { + return err.Error() + } + + addValTx, ok := signedTx.Unsigned.(*txs.AddValidatorTx) + if !ok { + return errNotAStakerTx.Error() + } + + if nodeID != addValTx.NodeID() { + return errWrongNodeID.Error() + } + + currentVal, err := NewCurrentStaker(signedTx.ID(), addValTx, uint64(100)) + if err != nil { + return err.Error() + } + + if currentVal.EndTime.Before(currentVal.StartTime) { + return fmt.Sprintf("startTime %v not before endTime %v, staker %v", + currentVal.StartTime, currentVal.EndTime, currentVal) + } + + pendingVal, err := NewPendingStaker(signedTx.ID(), addValTx) + if err != nil { + return err.Error() + } + + if pendingVal.EndTime.Before(pendingVal.StartTime) { + return fmt.Sprintf("startTime %v not before endTime %v, staker %v", + pendingVal.StartTime, pendingVal.EndTime, pendingVal) + } + + return "" + }, + addValidatorTxGenerator(ctx, &nodeID), + )) + + properties.Property("AddDelegatorTx generator checks", prop.ForAll( + func(nonInitTx *txs.Tx) string { + signedTx, err := txs.NewSigned(nonInitTx.Unsigned, txs.Codec, nil) + if err != nil { + panic(fmt.Errorf("failed signing tx, %w", err)) + } + + if err := signedTx.SyntacticVerify(ctx); err != nil { + return err.Error() + } + + addDelTx, ok := signedTx.Unsigned.(*txs.AddDelegatorTx) + if !ok { + return errNotAStakerTx.Error() + } + + if nodeID != addDelTx.NodeID() { + return errWrongNodeID.Error() + } + + currentDel, err := NewCurrentStaker(signedTx.ID(), addDelTx, uint64(100)) + if err != nil { + return err.Error() + } + + if currentDel.EndTime.Before(currentDel.StartTime) { + return fmt.Sprintf("startTime %v not before endTime %v, staker %v", + currentDel.StartTime, currentDel.EndTime, currentDel) + } + + pendingDel, err := NewPendingStaker(signedTx.ID(), addDelTx) + if err != nil { + return err.Error() + } + + if pendingDel.EndTime.Before(pendingDel.StartTime) { + return fmt.Sprintf("startTime %v not before endTime %v, staker %v", + pendingDel.StartTime, pendingDel.EndTime, pendingDel) + } + + return "" + }, + addDelegatorTxGenerator(ctx, &nodeID, math.MaxUint64), + )) + + properties.Property("addPermissionlessValidatorTx generator checks", prop.ForAll( + func(nonInitTx *txs.Tx) string { + signedTx, err := txs.NewSigned(nonInitTx.Unsigned, txs.Codec, nil) + if err != nil { + panic(fmt.Errorf("failed signing tx, %w", err)) + } + + if err := signedTx.SyntacticVerify(ctx); err != nil { + return err.Error() + } + + addValTx, ok := signedTx.Unsigned.(*txs.AddPermissionlessValidatorTx) + if !ok { + return errNotAStakerTx.Error() + } + + if nodeID != addValTx.NodeID() { + return errWrongNodeID.Error() + } + + if subnetID != addValTx.SubnetID() { + return "subnet not duly set" + } + + currentVal, err := NewCurrentStaker(signedTx.ID(), addValTx, uint64(100)) + if err != nil { + return err.Error() + } + + if currentVal.EndTime.Before(currentVal.StartTime) { + return fmt.Sprintf("startTime %v not before endTime %v, staker %v", + currentVal.StartTime, currentVal.EndTime, currentVal) + } + + pendingVal, err := NewPendingStaker(signedTx.ID(), addValTx) + if err != nil { + return err.Error() + } + + if pendingVal.EndTime.Before(pendingVal.StartTime) { + return fmt.Sprintf("startTime %v not before endTime %v, staker %v", + pendingVal.StartTime, pendingVal.EndTime, pendingVal) + } + + return "" + }, + addPermissionlessValidatorTxGenerator(ctx, &subnetID, &nodeID, &signer.Empty{}), + )) + + properties.Property("addPermissionlessDelegatorTx generator checks", prop.ForAll( + func(nonInitTx *txs.Tx) string { + signedTx, err := txs.NewSigned(nonInitTx.Unsigned, txs.Codec, nil) + if err != nil { + panic(fmt.Errorf("failed signing tx, %w", err)) + } + + if err := signedTx.SyntacticVerify(ctx); err != nil { + return err.Error() + } + + addDelTx, ok := signedTx.Unsigned.(*txs.AddPermissionlessDelegatorTx) + if !ok { + return errNotAStakerTx.Error() + } + + if nodeID != addDelTx.NodeID() { + return errWrongNodeID.Error() + } + + if subnetID != addDelTx.SubnetID() { + return "subnet not duly set" + } + + currentDel, err := NewCurrentStaker(signedTx.ID(), addDelTx, uint64(100)) + if err != nil { + return err.Error() + } + + if currentDel.EndTime.Before(currentDel.StartTime) { + return fmt.Sprintf("startTime %v not before endTime %v, staker %v", + currentDel.StartTime, currentDel.EndTime, currentDel) + } + + pendingDel, err := NewPendingStaker(signedTx.ID(), addDelTx) + if err != nil { + return err.Error() + } + + if pendingDel.EndTime.Before(pendingDel.StartTime) { + return fmt.Sprintf("startTime %v not before endTime %v, staker %v", + pendingDel.StartTime, pendingDel.EndTime, pendingDel) + } + + return "" + }, + addPermissionlessDelegatorTxGenerator(ctx, &subnetID, &nodeID, math.MaxUint64), + )) + + properties.TestingRun(t) +} diff --git a/vms/platformvm/state/stakers_model_generator_test.go b/vms/platformvm/state/stakers_model_generator_test.go index 7b5af90bfbc7..38faac813dec 100644 --- a/vms/platformvm/state/stakers_model_generator_test.go +++ b/vms/platformvm/state/stakers_model_generator_test.go @@ -6,80 +6,368 @@ package state import ( "fmt" "math" - "testing" + "reflect" + "time" "github.com/leanovate/gopter" - "github.com/leanovate/gopter/prop" + "github.com/leanovate/gopter/gen" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/platformvm/reward" + "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) -// TestGeneratedStakersValidity tests the staker generator itself. -// It documents and verifies theinvariants enforced by the staker generator. -func TestGeneratedStakersValidity(t *testing.T) { - properties := gopter.NewProperties(nil) +type generatorPriorityType uint8 - properties.Property("EndTime never before StartTime", prop.ForAll( - func(s Staker) string { - if s.EndTime.Before(s.StartTime) { - return fmt.Sprintf("startTime %v not before endTime %v, staker %v", - s.StartTime, s.EndTime, s) +const ( + permissionlessValidator generatorPriorityType = iota + permissionedValidator + permissionlessDelegator + permissionedDelegator +) + +// TODO ABENEGIA: complete +// stakerTxGenerator helps creating random yet reproducible Staker objects, +// which can be used in our property tests. stakerTxGenerator takes care of +// enforcing some Staker invariants on each and every random sample. +// TestGeneratedStakersValidity documents and verifies the enforced invariants. +func stakerTxGenerator( + ctx *snow.Context, + priority generatorPriorityType, + subnetID *ids.ID, + nodeID *ids.NodeID, + blsSigner signer.Signer, + maxWeight uint64, // helps avoiding overflows in delegator tests +) gopter.Gen { + switch priority { + case permissionedValidator: + return addValidatorTxGenerator(ctx, nodeID) + case permissionedDelegator: + return addDelegatorTxGenerator(ctx, nodeID, maxWeight) + case permissionlessValidator: + return addPermissionlessValidatorTxGenerator(ctx, subnetID, nodeID, blsSigner) + case permissionlessDelegator: + return addPermissionlessDelegatorTxGenerator(ctx, subnetID, nodeID, maxWeight) + default: + panic(fmt.Sprintf("unhandled tx priority %v", priority)) + } +} + +func addPermissionlessValidatorTxGenerator( + ctx *snow.Context, + subnetID *ids.ID, + nodeID *ids.NodeID, + blsSigner signer.Signer, +) gopter.Gen { + return validatorTxGenerator(nodeID, math.MaxUint64).FlatMap( + func(v interface{}) gopter.Gen { + genStakerSubnetID := genID + if subnetID != nil { + genStakerSubnetID = gen.Const(*subnetID) } - return "" + validatorTx := v.(txs.Validator) + + specificGen := gen.StructPtr(reflect.TypeOf(&txs.AddPermissionlessValidatorTx{}), map[string]gopter.Gen{ + "BaseTx": gen.Const(txs.BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: ctx.NetworkID, + BlockchainID: ctx.ChainID, + Ins: []*avax.TransferableInput{}, + Outs: []*avax.TransferableOutput{}, + }, + }), + "Validator": gen.Const(validatorTx), + "Subnet": genStakerSubnetID, + "Signer": gen.Const(blsSigner), + "StakeOuts": gen.Const([]*avax.TransferableOutput{ + { + Asset: avax.Asset{ + ID: ctx.AVAXAssetID, + }, + Out: &secp256k1fx.TransferOutput{ + Amt: validatorTx.Weight(), + }, + }, + }), + "ValidatorRewardsOwner": gen.Const( + &secp256k1fx.OutputOwners{ + Addrs: []ids.ShortID{}, + }, + ), + "DelegatorRewardsOwner": gen.Const( + &secp256k1fx.OutputOwners{ + Addrs: []ids.ShortID{}, + }, + ), + "DelegationShares": gen.UInt32Range(0, reward.PercentDenominator), + }) + + return specificGen.FlatMap( + func(v interface{}) gopter.Gen { + stakerTx := v.(*txs.AddPermissionlessValidatorTx) + + if err := stakerTx.SyntacticVerify(ctx); err != nil { + panic(fmt.Errorf("failed syntax verification in tx generator, %w", err)) + } + + // Note: we don't sign the tx here, since we want the freedom to modify + // the stakerTx just before testing while avoid having the wrong txID. + // We use txs.Tx as a box to return a txs.StakerTx interface. + sTx := &txs.Tx{Unsigned: stakerTx} + + return gen.Const(sTx) + }, + reflect.TypeOf(&txs.AddPermissionlessValidatorTx{}), + ) }, - stakerGenerator(anyPriority, nil, nil, math.MaxUint64), - )) - - properties.Property("NextTime coherent with priority", prop.ForAll( - func(s Staker) string { - switch p := s.Priority; p { - case txs.PrimaryNetworkDelegatorApricotPendingPriority, - txs.PrimaryNetworkDelegatorBanffPendingPriority, - txs.SubnetPermissionlessDelegatorPendingPriority, - txs.PrimaryNetworkValidatorPendingPriority, - txs.SubnetPermissionlessValidatorPendingPriority, - txs.SubnetPermissionedValidatorPendingPriority: - if !s.NextTime.Equal(s.StartTime) { - return fmt.Sprintf("pending staker has nextTime %v different from startTime %v, staker %v", - s.NextTime, s.StartTime, s) - } - return "" - - case txs.PrimaryNetworkDelegatorCurrentPriority, - txs.SubnetPermissionlessDelegatorCurrentPriority, - txs.PrimaryNetworkValidatorCurrentPriority, - txs.SubnetPermissionlessValidatorCurrentPriority, - txs.SubnetPermissionedValidatorCurrentPriority: - if !s.NextTime.Equal(s.EndTime) { - return fmt.Sprintf("current staker has nextTime %v different from endTime %v, staker %v", - s.NextTime, s.EndTime, s) - } - return "" - - default: - return fmt.Sprintf("priority %v unhandled in test", p) - } + reflect.TypeOf(&txs.AddPermissionlessValidatorTx{}), + ) +} + +func addValidatorTxGenerator( + ctx *snow.Context, + nodeID *ids.NodeID, +) gopter.Gen { + return validatorTxGenerator(nodeID, math.MaxUint64).FlatMap( + func(v interface{}) gopter.Gen { + validatorTx := v.(txs.Validator) + + specificGen := gen.StructPtr(reflect.TypeOf(&txs.AddValidatorTx{}), map[string]gopter.Gen{ + "BaseTx": gen.Const(txs.BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: ctx.NetworkID, + BlockchainID: ctx.ChainID, + Ins: []*avax.TransferableInput{}, + Outs: []*avax.TransferableOutput{}, + }, + }), + "Validator": gen.Const(validatorTx), + "StakeOuts": gen.Const([]*avax.TransferableOutput{ + { + Asset: avax.Asset{ + ID: ctx.AVAXAssetID, + }, + Out: &secp256k1fx.TransferOutput{ + Amt: validatorTx.Weight(), + }, + }, + }), + "RewardsOwner": gen.Const( + &secp256k1fx.OutputOwners{ + Addrs: []ids.ShortID{}, + }, + ), + "DelegationShares": gen.UInt32Range(0, reward.PercentDenominator), + }) + + return specificGen.FlatMap( + func(v interface{}) gopter.Gen { + stakerTx := v.(*txs.AddValidatorTx) + + if err := stakerTx.SyntacticVerify(ctx); err != nil { + panic(fmt.Errorf("failed syntax verification in tx generator, %w", err)) + } + + // Note: we don't sign the tx here, since we want the freedom to modify + // the stakerTx just before testing while avoid having the wrong txID. + // We use txs.Tx as a box to return a txs.StakerTx interface. + sTx := &txs.Tx{Unsigned: stakerTx} + + return gen.Const(sTx) + }, + reflect.TypeOf(&txs.AddValidatorTx{}), + ) }, - stakerGenerator(anyPriority, nil, nil, math.MaxUint64), - )) - - subnetID := ids.GenerateTestID() - nodeID := ids.GenerateTestNodeID() - properties.Property("subnetID and nodeID set as specified", prop.ForAll( - func(s Staker) string { - if s.SubnetID != subnetID { - return fmt.Sprintf("unexpected subnetID, expected %v, got %v", - subnetID, s.SubnetID) + reflect.TypeOf(txs.Validator{}), + ) +} + +func addPermissionlessDelegatorTxGenerator( + ctx *snow.Context, + subnetID *ids.ID, + nodeID *ids.NodeID, + maxWeight uint64, // helps avoiding overflows in delegator tests +) gopter.Gen { + return validatorTxGenerator(nodeID, maxWeight).FlatMap( + func(v interface{}) gopter.Gen { + genStakerSubnetID := genID + if subnetID != nil { + genStakerSubnetID = gen.Const(*subnetID) } - if s.NodeID != nodeID { - return fmt.Sprintf("unexpected nodeID, expected %v, got %v", - nodeID, s.NodeID) + + validatorTx := v.(txs.Validator) + specificGen := gen.StructPtr(reflect.TypeOf(txs.AddPermissionlessDelegatorTx{}), map[string]gopter.Gen{ + "BaseTx": gen.Const(txs.BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: ctx.NetworkID, + BlockchainID: ctx.ChainID, + Ins: []*avax.TransferableInput{}, + Outs: []*avax.TransferableOutput{}, + }, + }), + "Validator": gen.Const(validatorTx), + "Subnet": genStakerSubnetID, + "StakeOuts": gen.Const([]*avax.TransferableOutput{ + { + Asset: avax.Asset{ + ID: ctx.AVAXAssetID, + }, + Out: &secp256k1fx.TransferOutput{ + Amt: validatorTx.Weight(), + }, + }, + }), + "DelegationRewardsOwner": gen.Const( + &secp256k1fx.OutputOwners{ + Addrs: []ids.ShortID{}, + }, + ), + }) + + return specificGen.FlatMap( + func(v interface{}) gopter.Gen { + stakerTx := v.(*txs.AddPermissionlessDelegatorTx) + + if err := stakerTx.SyntacticVerify(ctx); err != nil { + panic(fmt.Errorf("failed syntax verification in tx generator, %w", err)) + } + + // Note: we don't sign the tx here, since we want the freedom to modify + // the stakerTx just before testing while avoid having the wrong txID. + // We use txs.Tx as a box to return a txs.StakerTx interface. + sTx := &txs.Tx{Unsigned: stakerTx} + + return gen.Const(sTx) + }, + reflect.TypeOf(&txs.AddPermissionlessDelegatorTx{}), + ) + }, + reflect.TypeOf(txs.Validator{}), + ) +} + +func addDelegatorTxGenerator( + ctx *snow.Context, + nodeID *ids.NodeID, + maxWeight uint64, // helps avoiding overflows in delegator tests +) gopter.Gen { + return validatorTxGenerator(nodeID, maxWeight).FlatMap( + func(v interface{}) gopter.Gen { + validatorTx := v.(txs.Validator) + specificGen := gen.StructPtr(reflect.TypeOf(txs.AddDelegatorTx{}), map[string]gopter.Gen{ + "BaseTx": gen.Const(txs.BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: ctx.NetworkID, + BlockchainID: ctx.ChainID, + Ins: []*avax.TransferableInput{}, + Outs: []*avax.TransferableOutput{}, + }, + }), + "Validator": gen.Const(validatorTx), + "StakeOuts": gen.Const([]*avax.TransferableOutput{ + { + Asset: avax.Asset{ + ID: ctx.AVAXAssetID, + }, + Out: &secp256k1fx.TransferOutput{ + Amt: validatorTx.Weight(), + }, + }, + }), + "DelegationRewardsOwner": gen.Const( + &secp256k1fx.OutputOwners{ + Addrs: []ids.ShortID{}, + }, + ), + }) + + return specificGen.FlatMap( + func(v interface{}) gopter.Gen { + stakerTx := v.(*txs.AddDelegatorTx) + + if err := stakerTx.SyntacticVerify(ctx); err != nil { + panic(fmt.Errorf("failed syntax verification in tx generator, %w", err)) + } + + // Note: we don't sign the tx here, since we want the freedom to modify + // the stakerTx just before testing while avoid having the wrong txID. + // We use txs.Tx as a box to return a txs.StakerTx interface. + sTx := &txs.Tx{Unsigned: stakerTx} + + return gen.Const(sTx) + }, + reflect.TypeOf(&txs.AddDelegatorTx{}), + ) + }, + reflect.TypeOf(txs.Validator{}), + ) +} + +func validatorTxGenerator( + nodeID *ids.NodeID, + maxWeight uint64, // helps avoiding overflows in delegator tests +) gopter.Gen { + return genStakerMicroData().FlatMap( + func(v interface{}) gopter.Gen { + macro := v.(stakerMicroData) + + genStakerNodeID := genNodeID + if nodeID != nil { + genStakerNodeID = gen.Const(*nodeID) } - return "" + + return gen.Struct(reflect.TypeOf(txs.Validator{}), map[string]gopter.Gen{ + "NodeID": genStakerNodeID, + "Start": gen.Const(uint64(macro.StartTime.Unix())), + "End": gen.Const(uint64(macro.StartTime.Add(time.Duration(macro.Duration)).Unix())), + "Wght": gen.UInt64Range(1, maxWeight), + }) }, - stakerGenerator(anyPriority, &subnetID, &nodeID, math.MaxUint64), - )) + reflect.TypeOf(stakerMicroData{}), + ) +} - properties.TestingRun(t) +// stakerMicroData holds seed attributes to generate stakerMacroData +type stakerMicroData struct { + StartTime time.Time + Duration int64 } + +// genStakerMicroData is the helper to generate stakerMicroData +func genStakerMicroData() gopter.Gen { + return gen.Struct(reflect.TypeOf(&stakerMicroData{}), map[string]gopter.Gen{ + "StartTime": gen.Time(), + "Duration": gen.Int64Range(1, 365*24), + }) +} + +const ( + lengthID = 32 + lengthNodeID = 20 +) + +// genID is the helper generator for ids.ID objects +var genID = gen.SliceOfN(lengthID, gen.UInt8()).FlatMap( + func(v interface{}) gopter.Gen { + byteSlice := v.([]byte) + var byteArray [lengthID]byte + copy(byteArray[:], byteSlice) + return gen.Const(ids.ID(byteArray)) + }, + reflect.TypeOf([]byte{}), +) + +// genNodeID is the helper generator for ids.NodeID objects +var genNodeID = gen.SliceOfN(lengthNodeID, gen.UInt8()).FlatMap( + func(v interface{}) gopter.Gen { + byteSlice := v.([]byte) + var byteArray [lengthNodeID]byte + copy(byteArray[:], byteSlice) + return gen.Const(ids.NodeID(byteArray)) + }, + reflect.TypeOf([]byte{}), +) diff --git a/vms/platformvm/state/stakers_model_storage_test.go b/vms/platformvm/state/stakers_model_storage_test.go index 00eb74d595a2..51d251be7a8e 100644 --- a/vms/platformvm/state/stakers_model_storage_test.go +++ b/vms/platformvm/state/stakers_model_storage_test.go @@ -5,7 +5,6 @@ package state import ( "fmt" - "math" "reflect" "sync/atomic" "testing" @@ -15,6 +14,7 @@ import ( "github.com/leanovate/gopter/gen" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) @@ -27,6 +27,8 @@ var ( _ commands.Command = (*addTopDiffCommand)(nil) _ commands.Command = (*applyBottomDiffCommand)(nil) _ commands.Command = (*commitBottomStateCommand)(nil) + + commandsCtx = buildStateCtx() ) // TestStateAndDiffComparisonToStorageModel verifies that a production-like @@ -188,19 +190,30 @@ var stakersCommands = &commands.ProtoCommands{ } // PutCurrentValidator section -type putCurrentValidatorCommand Staker +type putCurrentValidatorCommand txs.Tx func (v *putCurrentValidatorCommand) Run(sut commands.SystemUnderTest) commands.Result { - staker := (*Staker)(v) + sTx := (*txs.Tx)(v) sys := sut.(*sysUnderTest) + + currentVal, err := NewCurrentStaker(sTx.ID(), sTx.Unsigned.(txs.Staker), uint64(1000)) + if err != nil { + return sys // state checks later on should spot missing validator + } + topChainState := sys.getTopChainState() - topChainState.PutCurrentValidator(staker) + topChainState.PutCurrentValidator(currentVal) return sys } func (v *putCurrentValidatorCommand) NextState(cmdState commands.State) commands.State { - staker := (*Staker)(v) - cmdState.(*stakersStorageModel).PutCurrentValidator(staker) + sTx := (*txs.Tx)(v) + currentVal, err := NewCurrentStaker(sTx.ID(), sTx.Unsigned.(txs.Staker), uint64(1000)) + if err != nil { + return cmdState // state checks later on should spot missing validator + } + + cmdState.(*stakersStorageModel).PutCurrentValidator(currentVal) return cmdState } @@ -214,13 +227,19 @@ func (*putCurrentValidatorCommand) PostCondition(cmdState commands.State, res co } func (v *putCurrentValidatorCommand) String() string { + stakerTx := v.Unsigned.(txs.StakerTx) return fmt.Sprintf("PutCurrentValidator(subnetID: %v, nodeID: %v, txID: %v, priority: %v, unixStartTime: %v, duration: %v)", - v.SubnetID, v.NodeID, v.TxID, v.Priority, v.StartTime.Unix(), v.EndTime.Sub(v.StartTime)) + stakerTx.SubnetID(), stakerTx.NodeID(), v.TxID, stakerTx.CurrentPriority(), stakerTx.StartTime().Unix(), stakerTx.EndTime().Sub(stakerTx.StartTime())) } -var genPutCurrentValidatorCommand = stakerGenerator(currentValidator, nil, nil, math.MaxUint64).Map( - func(staker Staker) commands.Command { - cmd := (*putCurrentValidatorCommand)(&staker) +var genPutCurrentValidatorCommand = addPermissionlessValidatorTxGenerator(commandsCtx, nil, nil, &signer.Empty{}).Map( + func(nonInitTx *txs.Tx) commands.Command { + sTx, err := txs.NewSigned(nonInitTx.Unsigned, txs.Codec, nil) + if err != nil { + panic(fmt.Errorf("failed signing tx, %w", err)) + } + + cmd := (*putCurrentValidatorCommand)(sTx) return cmd }, ) @@ -313,21 +332,21 @@ var genDeleteCurrentValidatorCommand = gen.IntRange(1, 2).Map( ) // PutCurrentDelegator section -type putCurrentDelegatorCommand Staker +type putCurrentDelegatorCommand txs.Tx func (v *putCurrentDelegatorCommand) Run(sut commands.SystemUnderTest) commands.Result { - candidateDelegator := (*Staker)(v) + candidateDelegator := (*txs.Tx)(v) sys := sut.(*sysUnderTest) - err := addCurrentDelegatorInSystem(sys, candidateDelegator) + err := addCurrentDelegatorInSystem(sys, candidateDelegator.Unsigned) if err != nil { panic(err) } return sys } -func addCurrentDelegatorInSystem(sys *sysUnderTest, candidateDelegator *Staker) error { +func addCurrentDelegatorInSystem(sys *sysUnderTest, candidateDelegatorTx txs.UnsignedTx) error { // 1. check if there is a current validator, already inserted. If not return - // 2. Update candidateDelegator attributes to make it delegator of selected validator + // 2. Update candidateDelegatorTx attributes to make it delegator of selected validator // 3. Add delegator to picked validator chain := sys.getTopChainState() @@ -357,25 +376,35 @@ func addCurrentDelegatorInSystem(sys *sysUnderTest, candidateDelegator *Staker) stakerIt.Release() // release before modifying stakers collection // 2. Add a delegator to it - delegator := candidateDelegator - delegator.SubnetID = validator.SubnetID - delegator.NodeID = validator.NodeID + addPermissionlessDelTx := candidateDelegatorTx.(*txs.AddPermissionlessDelegatorTx) + addPermissionlessDelTx.Subnet = validator.SubnetID + addPermissionlessDelTx.Validator.NodeID = validator.NodeID + + signedTx, err := txs.NewSigned(addPermissionlessDelTx, txs.Codec, nil) + if err != nil { + return fmt.Errorf("failed signing tx, %w", err) + } + + delegator, err := NewCurrentStaker(signedTx.ID(), signedTx.Unsigned.(txs.Staker), uint64(1000)) + if err != nil { + return fmt.Errorf("failed generating staker, %w", err) + } chain.PutCurrentDelegator(delegator) return nil } func (v *putCurrentDelegatorCommand) NextState(cmdState commands.State) commands.State { - candidateDelegator := (*Staker)(v) + candidateDelegator := (*txs.Tx)(v) model := cmdState.(*stakersStorageModel) - err := addCurrentDelegatorInModel(model, candidateDelegator) + err := addCurrentDelegatorInModel(model, candidateDelegator.Unsigned) if err != nil { panic(err) } return cmdState } -func addCurrentDelegatorInModel(model *stakersStorageModel, candidateDelegator *Staker) error { +func addCurrentDelegatorInModel(model *stakersStorageModel, candidateDelegatorTx txs.UnsignedTx) error { // 1. check if there is a current validator, already inserted. If not return // 2. Update candidateDelegator attributes to make it delegator of selected validator // 3. Add delegator to picked validator @@ -406,9 +435,19 @@ func addCurrentDelegatorInModel(model *stakersStorageModel, candidateDelegator * stakerIt.Release() // release before modifying stakers collection // 2. Add a delegator to it - delegator := candidateDelegator - delegator.SubnetID = validator.SubnetID - delegator.NodeID = validator.NodeID + addPermissionlessDelTx := candidateDelegatorTx.(*txs.AddPermissionlessDelegatorTx) + addPermissionlessDelTx.Subnet = validator.SubnetID + addPermissionlessDelTx.Validator.NodeID = validator.NodeID + + signedTx, err := txs.NewSigned(addPermissionlessDelTx, txs.Codec, nil) + if err != nil { + return fmt.Errorf("failed signing tx, %w", err) + } + + delegator, err := NewCurrentStaker(signedTx.ID(), signedTx.Unsigned.(txs.Staker), uint64(1000)) + if err != nil { + return fmt.Errorf("failed generating staker, %w", err) + } model.PutCurrentDelegator(delegator) return nil @@ -423,13 +462,24 @@ func (*putCurrentDelegatorCommand) PostCondition(cmdState commands.State, res co } func (v *putCurrentDelegatorCommand) String() string { + stakerTx := v.Unsigned.(txs.StakerTx) return fmt.Sprintf("putCurrentDelegator(subnetID: %v, nodeID: %v, txID: %v, priority: %v, unixStartTime: %v, duration: %v)", - v.SubnetID, v.NodeID, v.TxID, v.Priority, v.StartTime.Unix(), v.EndTime.Sub(v.StartTime)) + stakerTx.SubnetID(), + stakerTx.NodeID(), + v.TxID, + stakerTx.CurrentPriority(), + stakerTx.StartTime().Unix(), + stakerTx.EndTime().Sub(stakerTx.StartTime())) } -var genPutCurrentDelegatorCommand = stakerGenerator(currentDelegator, nil, nil, 1000).Map( - func(staker Staker) commands.Command { - cmd := (*putCurrentDelegatorCommand)(&staker) +var genPutCurrentDelegatorCommand = addPermissionlessDelegatorTxGenerator(commandsCtx, nil, nil, 1000).Map( + func(nonInitTx *txs.Tx) commands.Command { + sTx, err := txs.NewSigned(nonInitTx.Unsigned, txs.Codec, nil) + if err != nil { + panic(fmt.Errorf("failed signing tx, %w", err)) + } + + cmd := (*putCurrentDelegatorCommand)(sTx) return cmd }, ) diff --git a/vms/platformvm/state/stakers_properties_test.go b/vms/platformvm/state/stakers_properties_test.go index 8a8b4fafe18c..a783d2ce7e43 100644 --- a/vms/platformvm/state/stakers_properties_test.go +++ b/vms/platformvm/state/stakers_properties_test.go @@ -16,7 +16,9 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/vms/platformvm/signer" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) // TestGeneralStakerContainersProperties checks that State and Diff conform our stakersStorageModel. @@ -48,120 +50,142 @@ func TestGeneralStakerContainersProperties(t *testing.T) { func generalStakerContainersProperties(storeCreatorF func() (Stakers, error)) *gopter.Properties { properties := gopter.NewProperties(nil) + ctx := buildStateCtx() + properties.Property("add, delete and query current validators", prop.ForAll( - func(s Staker) string { + func(nonInitTx *txs.Tx) string { store, err := storeCreatorF() if err != nil { return fmt.Sprintf("unexpected error while creating staker store, err %v", err) } + signedTx, err := txs.NewSigned(nonInitTx.Unsigned, txs.Codec, nil) + if err != nil { + panic(fmt.Errorf("failed signing tx in tx generator, %w", err)) + } + + staker, err := NewCurrentStaker(signedTx.ID(), signedTx.Unsigned.(txs.StakerTx), uint64(100)) + if err != nil { + return err.Error() + } + // no staker before insertion - _, err = store.GetCurrentValidator(s.SubnetID, s.NodeID) + _, err = store.GetCurrentValidator(staker.SubnetID, staker.NodeID) if err != database.ErrNotFound { return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) } - err = checkStakersContent(store, []Staker{}, current) + err = checkStakersContent(store, []*Staker{}, current) if err != nil { return err.Error() } // it's fine deleting unknown validator - store.DeleteCurrentValidator(&s) - _, err = store.GetCurrentValidator(s.SubnetID, s.NodeID) + store.DeleteCurrentValidator(staker) + _, err = store.GetCurrentValidator(staker.SubnetID, staker.NodeID) if err != database.ErrNotFound { return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) } - err = checkStakersContent(store, []Staker{}, current) + err = checkStakersContent(store, []*Staker{}, current) if err != nil { return err.Error() } // insert the staker and show it can be found - store.PutCurrentValidator(&s) - retrievedStaker, err := store.GetCurrentValidator(s.SubnetID, s.NodeID) + store.PutCurrentValidator(staker) + retrievedStaker, err := store.GetCurrentValidator(staker.SubnetID, staker.NodeID) if err != nil { return fmt.Sprintf("expected no error, got %v", err) } - if !reflect.DeepEqual(&s, retrievedStaker) { - return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &s, retrievedStaker) + if !reflect.DeepEqual(staker, retrievedStaker) { + return fmt.Sprintf("wrong staker retrieved expected %v, got %v", staker, retrievedStaker) } - err = checkStakersContent(store, []Staker{s}, current) + err = checkStakersContent(store, []*Staker{staker}, current) if err != nil { return err.Error() } // delete the staker and show it's not found anymore - store.DeleteCurrentValidator(&s) - _, err = store.GetCurrentValidator(s.SubnetID, s.NodeID) + store.DeleteCurrentValidator(staker) + _, err = store.GetCurrentValidator(staker.SubnetID, staker.NodeID) if err != database.ErrNotFound { return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) } - err = checkStakersContent(store, []Staker{}, current) + err = checkStakersContent(store, []*Staker{}, current) if err != nil { return err.Error() } return "" }, - stakerGenerator(anyPriority, nil, nil, math.MaxUint64), + stakerTxGenerator(ctx, permissionedValidator, &constants.PrimaryNetworkID, nil, &signer.Empty{}, math.MaxUint64), )) properties.Property("add, delete and query pending validators", prop.ForAll( - func(s Staker) string { + func(nonInitTx *txs.Tx) string { store, err := storeCreatorF() if err != nil { return fmt.Sprintf("unexpected error while creating staker store, err %v", err) } + signedTx, err := txs.NewSigned(nonInitTx.Unsigned, txs.Codec, nil) + if err != nil { + panic(fmt.Errorf("failed signing tx in tx generator, %w", err)) + } + + staker, err := NewPendingStaker(signedTx.ID(), signedTx.Unsigned.(txs.StakerTx)) + if err != nil { + return err.Error() + } + // no staker before insertion - _, err = store.GetPendingValidator(s.SubnetID, s.NodeID) + _, err = store.GetPendingValidator(staker.SubnetID, staker.NodeID) if err != database.ErrNotFound { return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) } - err = checkStakersContent(store, []Staker{}, pending) + err = checkStakersContent(store, []*Staker{}, pending) if err != nil { return err.Error() } // it's fine deleting unknown validator - store.DeletePendingValidator(&s) - _, err = store.GetPendingValidator(s.SubnetID, s.NodeID) + store.DeletePendingValidator(staker) + _, err = store.GetPendingValidator(staker.SubnetID, staker.NodeID) if err != database.ErrNotFound { return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) } - err = checkStakersContent(store, []Staker{}, pending) + err = checkStakersContent(store, []*Staker{}, pending) if err != nil { return err.Error() } // insert the staker and show it can be found - store.PutPendingValidator(&s) - retrievedStaker, err := store.GetPendingValidator(s.SubnetID, s.NodeID) + store.PutPendingValidator(staker) + retrievedStaker, err := store.GetPendingValidator(staker.SubnetID, staker.NodeID) if err != nil { return fmt.Sprintf("expected no error, got %v", err) } - if !reflect.DeepEqual(&s, retrievedStaker) { - return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &s, retrievedStaker) + if !reflect.DeepEqual(staker, retrievedStaker) { + return fmt.Sprintf("wrong staker retrieved expected %v, got %v", staker, retrievedStaker) } - err = checkStakersContent(store, []Staker{s}, pending) + err = checkStakersContent(store, []*Staker{staker}, pending) if err != nil { return err.Error() } // delete the staker and show it's found anymore - store.DeletePendingValidator(&s) - _, err = store.GetPendingValidator(s.SubnetID, s.NodeID) + store.DeletePendingValidator(staker) + _, err = store.GetPendingValidator(staker.SubnetID, staker.NodeID) if err != database.ErrNotFound { return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) } - err = checkStakersContent(store, []Staker{}, pending) + err = checkStakersContent(store, []*Staker{}, pending) if err != nil { return err.Error() } return "" }, - stakerGenerator(anyPriority, nil, nil, math.MaxUint64), + stakerTxGenerator(ctx, permissionedValidator, &constants.PrimaryNetworkID, nil, &signer.Empty{}, math.MaxUint64), )) var ( @@ -169,29 +193,54 @@ func generalStakerContainersProperties(storeCreatorF func() (Stakers, error)) *g nodeID = ids.GenerateTestNodeID() ) properties.Property("add, delete and query current delegators", prop.ForAll( - func(val Staker, dels []Staker) string { + func(nonInitValTx *txs.Tx, nonInitDelTxs []*txs.Tx) string { store, err := storeCreatorF() if err != nil { return fmt.Sprintf("unexpected error while creating staker store, err %v", err) } + signedValTx, err := txs.NewSigned(nonInitValTx.Unsigned, txs.Codec, nil) + if err != nil { + panic(fmt.Errorf("failed signing tx in tx generator, %w", err)) + } + + val, err := NewCurrentStaker(signedValTx.ID(), signedValTx.Unsigned.(txs.StakerTx), uint64(1000)) + if err != nil { + return err.Error() + } + + dels := make([]*Staker, 0, len(nonInitDelTxs)) + for _, nonInitDelTx := range nonInitDelTxs { + signedDelTx, err := txs.NewSigned(nonInitDelTx.Unsigned, txs.Codec, nil) + if err != nil { + panic(fmt.Errorf("failed signing tx in tx generator, %w", err)) + } + + del, err := NewCurrentStaker(signedDelTx.ID(), signedDelTx.Unsigned.(txs.StakerTx), uint64(1000)) + if err != nil { + return err.Error() + } + + dels = append(dels, del) + } + // store validator - store.PutCurrentValidator(&val) + store.PutCurrentValidator(val) retrievedValidator, err := store.GetCurrentValidator(val.SubnetID, val.NodeID) if err != nil { return fmt.Sprintf("expected no error, got %v", err) } - if !reflect.DeepEqual(&val, retrievedValidator) { + if !reflect.DeepEqual(val, retrievedValidator) { return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &val, retrievedValidator) } - err = checkStakersContent(store, []Staker{val}, current) + err = checkStakersContent(store, []*Staker{val}, current) if err != nil { return err.Error() } // store delegators for _, del := range dels { - cpy := del + cpy := *del // it's fine deleting unknown delegator store.DeleteCurrentDelegator(&cpy) @@ -208,7 +257,7 @@ func generalStakerContainersProperties(storeCreatorF func() (Stakers, error)) *g return fmt.Sprintf("unexpected failure in current delegators iterator creation, error %v", err) } for delIt.Next() { - if reflect.DeepEqual(*delIt.Value(), del) { + if reflect.DeepEqual(delIt.Value(), del) { found = true break } @@ -228,7 +277,7 @@ func generalStakerContainersProperties(storeCreatorF func() (Stakers, error)) *g for delIt.Next() { found := false for _, del := range dels { - if reflect.DeepEqual(*delIt.Value(), del) { + if reflect.DeepEqual(delIt.Value(), del) { found = true break } @@ -249,7 +298,7 @@ func generalStakerContainersProperties(storeCreatorF func() (Stakers, error)) *g // delete delegators for _, del := range dels { - cpy := del + cpy := *del store.DeleteCurrentDelegator(&cpy) // check deleted delegator is not there anymore @@ -260,7 +309,7 @@ func generalStakerContainersProperties(storeCreatorF func() (Stakers, error)) *g found := false for delIt.Next() { - if reflect.DeepEqual(*delIt.Value(), del) { + if reflect.DeepEqual(delIt.Value(), del) { found = true break } @@ -273,45 +322,74 @@ func generalStakerContainersProperties(storeCreatorF func() (Stakers, error)) *g return "" }, - stakerGenerator(currentValidator, &subnetID, &nodeID, math.MaxUint64), - gen.SliceOfN(10, stakerGenerator(currentDelegator, &subnetID, &nodeID, math.MaxUint64)). - SuchThat(func(v interface{}) bool { - stakersList := v.([]Staker) - uniqueTxIDs := set.NewSet[ids.ID](len(stakersList)) - for _, staker := range stakersList { - uniqueTxIDs.Add(staker.TxID) - } - - // make sure TxIDs are unique, at least among delegators. - return len(stakersList) == uniqueTxIDs.Len() - }), + stakerTxGenerator(ctx, + permissionlessValidator, + &subnetID, + &nodeID, + &signer.Empty{}, + math.MaxUint64, + ), + gen.SliceOfN(10, + stakerTxGenerator(ctx, + permissionlessDelegator, + &subnetID, + &nodeID, + &signer.Empty{}, + 1000, + ), + ), )) properties.Property("add, delete and query pending delegators", prop.ForAll( - func(val Staker, dels []Staker) string { + func(nonInitValTx *txs.Tx, nonInitDelTxs []*txs.Tx) string { store, err := storeCreatorF() if err != nil { return fmt.Sprintf("unexpected error while creating staker store, err %v", err) } + signedValTx, err := txs.NewSigned(nonInitValTx.Unsigned, txs.Codec, nil) + if err != nil { + panic(fmt.Errorf("failed signing tx in tx generator, %w", err)) + } + + val, err := NewCurrentStaker(signedValTx.ID(), signedValTx.Unsigned.(txs.StakerTx), uint64(1000)) + if err != nil { + return err.Error() + } + + dels := make([]*Staker, 0, len(nonInitDelTxs)) + for _, nonInitDelTx := range nonInitDelTxs { + signedDelTx, err := txs.NewSigned(nonInitDelTx.Unsigned, txs.Codec, nil) + if err != nil { + panic(fmt.Errorf("failed signing tx in tx generator, %w", err)) + } + + del, err := NewCurrentStaker(signedDelTx.ID(), signedDelTx.Unsigned.(txs.StakerTx), uint64(1000)) + if err != nil { + return err.Error() + } + + dels = append(dels, del) + } + // store validator - store.PutCurrentValidator(&val) + store.PutCurrentValidator(val) retrievedValidator, err := store.GetCurrentValidator(val.SubnetID, val.NodeID) if err != nil { return fmt.Sprintf("expected no error, got %v", err) } - if !reflect.DeepEqual(&val, retrievedValidator) { + if !reflect.DeepEqual(val, retrievedValidator) { return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &val, retrievedValidator) } - err = checkStakersContent(store, []Staker{val}, current) + err = checkStakersContent(store, []*Staker{val}, current) if err != nil { return err.Error() } // store delegators for _, del := range dels { - cpy := del + cpy := *del // it's fine deleting unknown delegator store.DeletePendingDelegator(&cpy) @@ -328,7 +406,7 @@ func generalStakerContainersProperties(storeCreatorF func() (Stakers, error)) *g return fmt.Sprintf("unexpected failure in pending delegators iterator creation, error %v", err) } for delIt.Next() { - if reflect.DeepEqual(*delIt.Value(), del) { + if reflect.DeepEqual(delIt.Value(), del) { found = true break } @@ -348,7 +426,7 @@ func generalStakerContainersProperties(storeCreatorF func() (Stakers, error)) *g for delIt.Next() { found := false for _, del := range dels { - if reflect.DeepEqual(*delIt.Value(), del) { + if reflect.DeepEqual(delIt.Value(), del) { found = true break } @@ -367,7 +445,7 @@ func generalStakerContainersProperties(storeCreatorF func() (Stakers, error)) *g // delete delegators for _, del := range dels { - cpy := del + cpy := *del store.DeletePendingDelegator(&cpy) // check deleted delegator is not there anymore @@ -378,7 +456,7 @@ func generalStakerContainersProperties(storeCreatorF func() (Stakers, error)) *g found := false for delIt.Next() { - if reflect.DeepEqual(*delIt.Value(), del) { + if reflect.DeepEqual(delIt.Value(), del) { found = true break } @@ -391,18 +469,22 @@ func generalStakerContainersProperties(storeCreatorF func() (Stakers, error)) *g return "" }, - stakerGenerator(currentValidator, &subnetID, &nodeID, math.MaxUint64), - gen.SliceOfN(10, stakerGenerator(pendingDelegator, &subnetID, &nodeID, math.MaxUint64)). - SuchThat(func(v interface{}) bool { - stakersList := v.([]Staker) - uniqueTxIDs := set.NewSet[ids.ID](len(stakersList)) - for _, staker := range stakersList { - uniqueTxIDs.Add(staker.TxID) - } - - // make sure TxIDs are unique, at least among delegators - return len(stakersList) == uniqueTxIDs.Len() - }), + stakerTxGenerator(ctx, + permissionlessValidator, + &subnetID, + &nodeID, + &signer.Empty{}, + math.MaxUint64, + ), + gen.SliceOfN(10, + stakerTxGenerator(ctx, + permissionlessDelegator, + &subnetID, + &nodeID, + &signer.Empty{}, + 1000, + ), + ), )) return properties @@ -427,7 +509,7 @@ func buildDiffOnTopOfBaseState(trackedSubnets []ids.ID) (Diff, State, error) { // [checkStakersContent] verifies whether store contains exactly the stakers specified in the list. // stakers order does not matter. stakers slice gets consumed while checking. -func checkStakersContent(store Stakers, stakers []Staker, stakersType stakerStatus) error { +func checkStakersContent(store Stakers, stakers []*Staker, stakersType stakerStatus) error { var ( it StakerIterator err error @@ -462,7 +544,7 @@ func checkStakersContent(store Stakers, stakers []Staker, stakersType stakerStat ) for idx, s := range stakers { - if reflect.DeepEqual(*staker, s) { + if reflect.DeepEqual(staker, s) { retrievedStakerIdx = idx found = true } From 564098a97b3fe6965f14a370cd4c306e2894e827 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Wed, 28 Jun 2023 16:38:06 +0200 Subject: [PATCH 042/132] nits --- .../state/stakers_model_generator_test.go | 70 ++++++++++--------- 1 file changed, 37 insertions(+), 33 deletions(-) diff --git a/vms/platformvm/state/stakers_model_generator_test.go b/vms/platformvm/state/stakers_model_generator_test.go index 38faac813dec..603250bd9bd6 100644 --- a/vms/platformvm/state/stakers_model_generator_test.go +++ b/vms/platformvm/state/stakers_model_generator_test.go @@ -30,10 +30,14 @@ const ( permissionedDelegator ) -// TODO ABENEGIA: complete -// stakerTxGenerator helps creating random yet reproducible Staker objects, -// which can be used in our property tests. stakerTxGenerator takes care of -// enforcing some Staker invariants on each and every random sample. +// stakerTxGenerator helps creating random yet reproducible txs.StakerTx, +// which can be used in our property tests. stakerTxGenerator returns txs.StakerTx +// as the Unsigned attribute of a txs.Tx just to work around the inability of +// generators to return interface. The holding txs.Tx signing is deferred to tests +// to allow them modifying stakers parameters without breaking txID. +// A full txs.StakerTx is returned, instead of a Staker object, in order to extend +// property testing to stakers reload (which starts from the transaction). The tx is filled +// just enough to rebuild staker state (inputs/outputs utxos are neglected). // TestGeneratedStakersValidity documents and verifies the enforced invariants. func stakerTxGenerator( ctx *snow.Context, @@ -63,13 +67,13 @@ func addPermissionlessValidatorTxGenerator( nodeID *ids.NodeID, blsSigner signer.Signer, ) gopter.Gen { - return validatorTxGenerator(nodeID, math.MaxUint64).FlatMap( + return stakerDataGenerator(nodeID, math.MaxUint64).FlatMap( func(v interface{}) gopter.Gen { genStakerSubnetID := genID if subnetID != nil { genStakerSubnetID = gen.Const(*subnetID) } - validatorTx := v.(txs.Validator) + stakerData := v.(txs.Validator) specificGen := gen.StructPtr(reflect.TypeOf(&txs.AddPermissionlessValidatorTx{}), map[string]gopter.Gen{ "BaseTx": gen.Const(txs.BaseTx{ @@ -80,7 +84,7 @@ func addPermissionlessValidatorTxGenerator( Outs: []*avax.TransferableOutput{}, }, }), - "Validator": gen.Const(validatorTx), + "Validator": gen.Const(stakerData), "Subnet": genStakerSubnetID, "Signer": gen.Const(blsSigner), "StakeOuts": gen.Const([]*avax.TransferableOutput{ @@ -89,7 +93,7 @@ func addPermissionlessValidatorTxGenerator( ID: ctx.AVAXAssetID, }, Out: &secp256k1fx.TransferOutput{ - Amt: validatorTx.Weight(), + Amt: stakerData.Weight(), }, }, }), @@ -132,9 +136,9 @@ func addValidatorTxGenerator( ctx *snow.Context, nodeID *ids.NodeID, ) gopter.Gen { - return validatorTxGenerator(nodeID, math.MaxUint64).FlatMap( + return stakerDataGenerator(nodeID, math.MaxUint64).FlatMap( func(v interface{}) gopter.Gen { - validatorTx := v.(txs.Validator) + stakerData := v.(txs.Validator) specificGen := gen.StructPtr(reflect.TypeOf(&txs.AddValidatorTx{}), map[string]gopter.Gen{ "BaseTx": gen.Const(txs.BaseTx{ @@ -145,14 +149,14 @@ func addValidatorTxGenerator( Outs: []*avax.TransferableOutput{}, }, }), - "Validator": gen.Const(validatorTx), + "Validator": gen.Const(stakerData), "StakeOuts": gen.Const([]*avax.TransferableOutput{ { Asset: avax.Asset{ ID: ctx.AVAXAssetID, }, Out: &secp256k1fx.TransferOutput{ - Amt: validatorTx.Weight(), + Amt: stakerData.Weight(), }, }, }), @@ -192,15 +196,15 @@ func addPermissionlessDelegatorTxGenerator( nodeID *ids.NodeID, maxWeight uint64, // helps avoiding overflows in delegator tests ) gopter.Gen { - return validatorTxGenerator(nodeID, maxWeight).FlatMap( + return stakerDataGenerator(nodeID, maxWeight).FlatMap( func(v interface{}) gopter.Gen { genStakerSubnetID := genID if subnetID != nil { genStakerSubnetID = gen.Const(*subnetID) } - validatorTx := v.(txs.Validator) - specificGen := gen.StructPtr(reflect.TypeOf(txs.AddPermissionlessDelegatorTx{}), map[string]gopter.Gen{ + stakerData := v.(txs.Validator) + delGen := gen.StructPtr(reflect.TypeOf(txs.AddPermissionlessDelegatorTx{}), map[string]gopter.Gen{ "BaseTx": gen.Const(txs.BaseTx{ BaseTx: avax.BaseTx{ NetworkID: ctx.NetworkID, @@ -209,7 +213,7 @@ func addPermissionlessDelegatorTxGenerator( Outs: []*avax.TransferableOutput{}, }, }), - "Validator": gen.Const(validatorTx), + "Validator": gen.Const(stakerData), "Subnet": genStakerSubnetID, "StakeOuts": gen.Const([]*avax.TransferableOutput{ { @@ -217,7 +221,7 @@ func addPermissionlessDelegatorTxGenerator( ID: ctx.AVAXAssetID, }, Out: &secp256k1fx.TransferOutput{ - Amt: validatorTx.Weight(), + Amt: stakerData.Weight(), }, }, }), @@ -228,7 +232,7 @@ func addPermissionlessDelegatorTxGenerator( ), }) - return specificGen.FlatMap( + return delGen.FlatMap( func(v interface{}) gopter.Gen { stakerTx := v.(*txs.AddPermissionlessDelegatorTx) @@ -255,10 +259,10 @@ func addDelegatorTxGenerator( nodeID *ids.NodeID, maxWeight uint64, // helps avoiding overflows in delegator tests ) gopter.Gen { - return validatorTxGenerator(nodeID, maxWeight).FlatMap( + return stakerDataGenerator(nodeID, maxWeight).FlatMap( func(v interface{}) gopter.Gen { - validatorTx := v.(txs.Validator) - specificGen := gen.StructPtr(reflect.TypeOf(txs.AddDelegatorTx{}), map[string]gopter.Gen{ + stakerData := v.(txs.Validator) + delGen := gen.StructPtr(reflect.TypeOf(txs.AddDelegatorTx{}), map[string]gopter.Gen{ "BaseTx": gen.Const(txs.BaseTx{ BaseTx: avax.BaseTx{ NetworkID: ctx.NetworkID, @@ -267,14 +271,14 @@ func addDelegatorTxGenerator( Outs: []*avax.TransferableOutput{}, }, }), - "Validator": gen.Const(validatorTx), + "Validator": gen.Const(stakerData), "StakeOuts": gen.Const([]*avax.TransferableOutput{ { Asset: avax.Asset{ ID: ctx.AVAXAssetID, }, Out: &secp256k1fx.TransferOutput{ - Amt: validatorTx.Weight(), + Amt: stakerData.Weight(), }, }, }), @@ -285,7 +289,7 @@ func addDelegatorTxGenerator( ), }) - return specificGen.FlatMap( + return delGen.FlatMap( func(v interface{}) gopter.Gen { stakerTx := v.(*txs.AddDelegatorTx) @@ -307,13 +311,13 @@ func addDelegatorTxGenerator( ) } -func validatorTxGenerator( +func stakerDataGenerator( nodeID *ids.NodeID, maxWeight uint64, // helps avoiding overflows in delegator tests ) gopter.Gen { - return genStakerMicroData().FlatMap( + return genStakerTimeData().FlatMap( func(v interface{}) gopter.Gen { - macro := v.(stakerMicroData) + macro := v.(stakerTimeData) genStakerNodeID := genNodeID if nodeID != nil { @@ -327,19 +331,19 @@ func validatorTxGenerator( "Wght": gen.UInt64Range(1, maxWeight), }) }, - reflect.TypeOf(stakerMicroData{}), + reflect.TypeOf(stakerTimeData{}), ) } -// stakerMicroData holds seed attributes to generate stakerMacroData -type stakerMicroData struct { +// stakerTimeData holds seed attributes to generate a random-yet-reproducible txs.Validator +type stakerTimeData struct { StartTime time.Time Duration int64 } -// genStakerMicroData is the helper to generate stakerMicroData -func genStakerMicroData() gopter.Gen { - return gen.Struct(reflect.TypeOf(&stakerMicroData{}), map[string]gopter.Gen{ +// genStakerTimeData is the helper to generate stakerMicroData +func genStakerTimeData() gopter.Gen { + return gen.Struct(reflect.TypeOf(&stakerTimeData{}), map[string]gopter.Gen{ "StartTime": gen.Time(), "Duration": gen.Int64Range(1, 365*24), }) From 4ed0ec60c2b6beede093e9850aa48ab534956136 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Wed, 28 Jun 2023 18:43:40 +0200 Subject: [PATCH 043/132] extended property tests with state rebuild checks --- vms/platformvm/state/stakers_helpers_test.go | 9 +- .../state/stakers_model_generator_test.go | 6 +- .../state/stakers_model_storage_test.go | 82 ++++++++++++++++++- .../state/stakers_properties_test.go | 11 ++- 4 files changed, 92 insertions(+), 16 deletions(-) diff --git a/vms/platformvm/state/stakers_helpers_test.go b/vms/platformvm/state/stakers_helpers_test.go index 0c268c88b7fd..532356fbf51c 100644 --- a/vms/platformvm/state/stakers_helpers_test.go +++ b/vms/platformvm/state/stakers_helpers_test.go @@ -10,8 +10,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/chains" - "github.com/ava-labs/avalanchego/database/manager" - "github.com/ava-labs/avalanchego/database/versiondb" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/uptime" @@ -21,7 +20,6 @@ import ( "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/platformvm/api" "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/metrics" @@ -66,10 +64,7 @@ func buildStateCtx() *snow.Context { return ctx } -func buildChainState(trackedSubnets []ids.ID) (State, error) { - baseDBManager := manager.NewMemDB(version.Semantic1_0_0) - baseDB := versiondb.New(baseDBManager.Current().Database) - +func buildChainState(baseDB database.Database, trackedSubnets []ids.ID) (State, error) { cfg := defaultConfig() cfg.TrackedSubnets.Add(trackedSubnets...) diff --git a/vms/platformvm/state/stakers_model_generator_test.go b/vms/platformvm/state/stakers_model_generator_test.go index 603250bd9bd6..3a9dc1ab0140 100644 --- a/vms/platformvm/state/stakers_model_generator_test.go +++ b/vms/platformvm/state/stakers_model_generator_test.go @@ -317,7 +317,7 @@ func stakerDataGenerator( ) gopter.Gen { return genStakerTimeData().FlatMap( func(v interface{}) gopter.Gen { - macro := v.(stakerTimeData) + stakerData := v.(stakerTimeData) genStakerNodeID := genNodeID if nodeID != nil { @@ -326,8 +326,8 @@ func stakerDataGenerator( return gen.Struct(reflect.TypeOf(txs.Validator{}), map[string]gopter.Gen{ "NodeID": genStakerNodeID, - "Start": gen.Const(uint64(macro.StartTime.Unix())), - "End": gen.Const(uint64(macro.StartTime.Add(time.Duration(macro.Duration)).Unix())), + "Start": gen.Const(uint64(stakerData.StartTime.Unix())), + "End": gen.Const(uint64(stakerData.StartTime.Add(time.Duration(stakerData.Duration)).Unix())), "Wght": gen.UInt64Range(1, maxWeight), }) }, diff --git a/vms/platformvm/state/stakers_model_storage_test.go b/vms/platformvm/state/stakers_model_storage_test.go index 51d251be7a8e..68460be8b86c 100644 --- a/vms/platformvm/state/stakers_model_storage_test.go +++ b/vms/platformvm/state/stakers_model_storage_test.go @@ -13,8 +13,13 @@ import ( "github.com/leanovate/gopter/commands" "github.com/leanovate/gopter/gen" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/platformvm/signer" + "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) @@ -27,6 +32,7 @@ var ( _ commands.Command = (*addTopDiffCommand)(nil) _ commands.Command = (*applyBottomDiffCommand)(nil) _ commands.Command = (*commitBottomStateCommand)(nil) + _ commands.Command = (*rebuildStateCommand)(nil) commandsCtx = buildStateCtx() ) @@ -53,13 +59,15 @@ func TestStateAndDiffComparisonToStorageModel(t *testing.T) { type sysUnderTest struct { diffBlkIDSeed uint64 + baseDB database.Database baseState State sortedDiffIDs []ids.ID diffsMap map[ids.ID]Diff } -func newSysUnderTest(baseState State) *sysUnderTest { +func newSysUnderTest(baseDB database.Database, baseState State) *sysUnderTest { sys := &sysUnderTest{ + baseDB: baseDB, baseState: baseState, diffsMap: map[ids.ID]Diff{}, sortedDiffIDs: []ids.ID{}, @@ -127,7 +135,10 @@ func (s *sysUnderTest) flushBottomDiff() bool { var stakersCommands = &commands.ProtoCommands{ NewSystemUnderTestFunc: func(initialState commands.State) commands.SystemUnderTest { model := initialState.(*stakersStorageModel) - baseState, err := buildChainState(nil) + + baseDBManager := manager.NewMemDB(version.Semantic1_0_0) + baseDB := versiondb.New(baseDBManager.Current().Database) + baseState, err := buildChainState(baseDB, nil) if err != nil { panic(err) } @@ -153,7 +164,7 @@ var stakersCommands = &commands.ProtoCommands{ panic(err) } - return newSysUnderTest(baseState) + return newSysUnderTest(baseDB, baseState) }, DestroySystemUnderTestFunc: func(sut commands.SystemUnderTest) { // retrieve base state and close it @@ -185,6 +196,7 @@ var stakersCommands = &commands.ProtoCommands{ genAddTopDiffCommand, genApplyBottomDiffCommand, genCommitBottomStateCommand, + genRebuildStateCommand, ) }, } @@ -203,6 +215,7 @@ func (v *putCurrentValidatorCommand) Run(sut commands.SystemUnderTest) commands. topChainState := sys.getTopChainState() topChainState.PutCurrentValidator(currentVal) + topChainState.AddTx(sTx, status.Committed) return sys } @@ -229,7 +242,13 @@ func (*putCurrentValidatorCommand) PostCondition(cmdState commands.State, res co func (v *putCurrentValidatorCommand) String() string { stakerTx := v.Unsigned.(txs.StakerTx) return fmt.Sprintf("PutCurrentValidator(subnetID: %v, nodeID: %v, txID: %v, priority: %v, unixStartTime: %v, duration: %v)", - stakerTx.SubnetID(), stakerTx.NodeID(), v.TxID, stakerTx.CurrentPriority(), stakerTx.StartTime().Unix(), stakerTx.EndTime().Sub(stakerTx.StartTime())) + stakerTx.SubnetID(), + stakerTx.NodeID(), + v.TxID, + stakerTx.CurrentPriority(), + stakerTx.StartTime().Unix(), + stakerTx.EndTime().Sub(stakerTx.StartTime()), + ) } var genPutCurrentValidatorCommand = addPermissionlessValidatorTxGenerator(commandsCtx, nil, nil, &signer.Empty{}).Map( @@ -391,6 +410,7 @@ func addCurrentDelegatorInSystem(sys *sysUnderTest, candidateDelegatorTx txs.Uns } chain.PutCurrentDelegator(delegator) + chain.AddTx(signedTx, status.Committed) return nil } @@ -670,6 +690,60 @@ var genCommitBottomStateCommand = gen.IntRange(1, 2).Map( }, ) +// rebuildStateCommand section +type rebuildStateCommand struct{} + +func (*rebuildStateCommand) Run(sut commands.SystemUnderTest) commands.Result { + sys := sut.(*sysUnderTest) + + // 1. Persist all outstanding changes + for sys.flushBottomDiff() { + err := sys.baseState.Commit() + if err != nil { + panic(err) + } + } + + if err := sys.baseState.Commit(); err != nil { + panic(err) + } + + // 2. Rebuild the state from the db + baseState, err := buildChainState(sys.baseDB, nil) + if err != nil { + panic(err) + } + sys.baseState = baseState + sys.diffsMap = map[ids.ID]Diff{} + sys.sortedDiffIDs = []ids.ID{} + + return sys +} + +func (*rebuildStateCommand) NextState(cmdState commands.State) commands.State { + return cmdState // model has no diffs +} + +func (*rebuildStateCommand) PreCondition(commands.State) bool { + return true +} + +func (*rebuildStateCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { + return checkSystemAndModelContent(cmdState, res) +} + +func (*rebuildStateCommand) String() string { + return "RebuildStateCommand" +} + +// a trick to force command regeneration at each sampling. +// gen.Const would not allow it +var genRebuildStateCommand = gen.IntRange(1, 2).Map( + func(int) commands.Command { + return &rebuildStateCommand{} + }, +) + func checkSystemAndModelContent(cmdState commands.State, res commands.Result) *gopter.PropResult { model := cmdState.(*stakersStorageModel) sys := res.(*sysUnderTest) diff --git a/vms/platformvm/state/stakers_properties_test.go b/vms/platformvm/state/stakers_properties_test.go index a783d2ce7e43..1fcf00142796 100644 --- a/vms/platformvm/state/stakers_properties_test.go +++ b/vms/platformvm/state/stakers_properties_test.go @@ -15,8 +15,11 @@ import ( "github.com/leanovate/gopter/prop" "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) @@ -28,7 +31,9 @@ import ( func TestGeneralStakerContainersProperties(t *testing.T) { storeCreators := map[string]func() (Stakers, error){ "base state": func() (Stakers, error) { - return buildChainState(nil) + baseDBManager := manager.NewMemDB(version.Semantic1_0_0) + baseDB := versiondb.New(baseDBManager.Current().Database) + return buildChainState(baseDB, nil) }, "diff": func() (Stakers, error) { diff, _, err := buildDiffOnTopOfBaseState(nil) @@ -491,7 +496,9 @@ func generalStakerContainersProperties(storeCreatorF func() (Stakers, error)) *g } func buildDiffOnTopOfBaseState(trackedSubnets []ids.ID) (Diff, State, error) { - baseState, err := buildChainState(trackedSubnets) + baseDBManager := manager.NewMemDB(version.Semantic1_0_0) + baseDB := versiondb.New(baseDBManager.Current().Database) + baseState, err := buildChainState(baseDB, trackedSubnets) if err != nil { return nil, nil, fmt.Errorf("unexpected error while creating chain base state, err %v", err) } From 79f78c229911970c2b6b457d323c19b749f13ae2 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Wed, 28 Jun 2023 19:22:14 +0200 Subject: [PATCH 044/132] nits --- vms/platformvm/state/stakers_helpers_test.go | 2 +- .../stakers_model_generator_check_test.go | 28 ++++++++++++++++--- 2 files changed, 25 insertions(+), 5 deletions(-) diff --git a/vms/platformvm/state/stakers_helpers_test.go b/vms/platformvm/state/stakers_helpers_test.go index 532356fbf51c..8a7160e497dd 100644 --- a/vms/platformvm/state/stakers_helpers_test.go +++ b/vms/platformvm/state/stakers_helpers_test.go @@ -107,7 +107,7 @@ func defaultConfig() *config.Config { RewardConfig: reward.Config{ MaxConsumptionRate: .12 * reward.PercentDenominator, MinConsumptionRate: .10 * reward.PercentDenominator, - MintingPeriod: 365 * 24 * time.Hour, + MintingPeriod: defaultMaxStakingDuration, SupplyCap: 720 * units.MegaAvax, }, ApricotPhase3Time: defaultValidateEndTime, diff --git a/vms/platformvm/state/stakers_model_generator_check_test.go b/vms/platformvm/state/stakers_model_generator_check_test.go index f8b6468539ad..e43c95c5cbc0 100644 --- a/vms/platformvm/state/stakers_model_generator_check_test.go +++ b/vms/platformvm/state/stakers_model_generator_check_test.go @@ -8,7 +8,6 @@ import ( "fmt" "testing" - "github.com/ethereum/go-ethereum/common/math" "github.com/leanovate/gopter" "github.com/leanovate/gopter/prop" @@ -23,13 +22,14 @@ var ( ) // TestGeneratedStakersValidity tests the staker generator itself. -// It documents and verifies theinvariants enforced by the staker generator. +// It documents and verifies the invariants enforced by the staker generator. func TestGeneratedStakersValidity(t *testing.T) { properties := gopter.NewProperties(nil) ctx := buildStateCtx() subnetID := ids.GenerateTestID() nodeID := ids.GenerateTestNodeID() + maxDelegatorWeight := uint64(2023) properties.Property("AddValidatorTx generator checks", prop.ForAll( func(nonInitTx *txs.Tx) string { @@ -106,6 +106,11 @@ func TestGeneratedStakersValidity(t *testing.T) { currentDel.StartTime, currentDel.EndTime, currentDel) } + if currentDel.Weight > maxDelegatorWeight { + return fmt.Sprintf("delegator weight %v above maximum %v, staker %v", + currentDel.Weight, maxDelegatorWeight, currentDel) + } + pendingDel, err := NewPendingStaker(signedTx.ID(), addDelTx) if err != nil { return err.Error() @@ -116,9 +121,14 @@ func TestGeneratedStakersValidity(t *testing.T) { pendingDel.StartTime, pendingDel.EndTime, pendingDel) } + if pendingDel.Weight > maxDelegatorWeight { + return fmt.Sprintf("delegator weight %v above maximum %v, staker %v", + pendingDel.Weight, maxDelegatorWeight, pendingDel) + } + return "" }, - addDelegatorTxGenerator(ctx, &nodeID, math.MaxUint64), + addDelegatorTxGenerator(ctx, &nodeID, maxDelegatorWeight), )) properties.Property("addPermissionlessValidatorTx generator checks", prop.ForAll( @@ -204,6 +214,11 @@ func TestGeneratedStakersValidity(t *testing.T) { currentDel.StartTime, currentDel.EndTime, currentDel) } + if currentDel.Weight > maxDelegatorWeight { + return fmt.Sprintf("delegator weight %v above maximum %v, staker %v", + currentDel.Weight, maxDelegatorWeight, currentDel) + } + pendingDel, err := NewPendingStaker(signedTx.ID(), addDelTx) if err != nil { return err.Error() @@ -214,9 +229,14 @@ func TestGeneratedStakersValidity(t *testing.T) { pendingDel.StartTime, pendingDel.EndTime, pendingDel) } + if pendingDel.Weight > maxDelegatorWeight { + return fmt.Sprintf("delegator weight %v above maximum %v, staker %v", + pendingDel.Weight, maxDelegatorWeight, pendingDel) + } + return "" }, - addPermissionlessDelegatorTxGenerator(ctx, &subnetID, &nodeID, math.MaxUint64), + addPermissionlessDelegatorTxGenerator(ctx, &subnetID, &nodeID, maxDelegatorWeight), )) properties.TestingRun(t) From afb6296df190599539e23fa0e67b93677297c946 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Wed, 5 Jul 2023 13:54:37 +0200 Subject: [PATCH 045/132] skewed stakers property tests towards primary network + more fixes --- vms/platformvm/state/masked_iterator_test.go | 10 +- vms/platformvm/state/staker_test.go | 2 +- vms/platformvm/state/stakers.go | 24 +- .../stakers_model_generator_check_test.go | 6 +- .../state/stakers_model_generator_test.go | 68 ++-- vms/platformvm/state/stakers_model_storage.go | 8 +- .../state/stakers_model_storage_test.go | 336 +++++++++++++----- .../state/stakers_properties_test.go | 12 +- 8 files changed, 336 insertions(+), 130 deletions(-) diff --git a/vms/platformvm/state/masked_iterator_test.go b/vms/platformvm/state/masked_iterator_test.go index 8ba719d3e732..a3c43818d3b4 100644 --- a/vms/platformvm/state/masked_iterator_test.go +++ b/vms/platformvm/state/masked_iterator_test.go @@ -17,19 +17,23 @@ func TestMaskedIterator(t *testing.T) { stakers := []*Staker{ { TxID: ids.GenerateTestID(), + Weight: 0, // just to simplify debugging NextTime: time.Unix(0, 0), }, { TxID: ids.GenerateTestID(), - NextTime: time.Unix(1, 0), + Weight: 10, // just to simplify debugging + NextTime: time.Unix(10, 0), }, { TxID: ids.GenerateTestID(), - NextTime: time.Unix(2, 0), + Weight: 20, // just to simplify debugging + NextTime: time.Unix(20, 0), }, { TxID: ids.GenerateTestID(), - NextTime: time.Unix(3, 0), + Weight: 30, // just to simplify debugging + NextTime: time.Unix(30, 0), }, } maskedStakers := map[ids.ID]*Staker{ diff --git a/vms/platformvm/state/staker_test.go b/vms/platformvm/state/staker_test.go index bb196e846132..a32bc60039ac 100644 --- a/vms/platformvm/state/staker_test.go +++ b/vms/platformvm/state/staker_test.go @@ -145,7 +145,7 @@ func TestNewCurrentStaker(t *testing.T) { subnetID := ids.GenerateTestID() weight := uint64(12345) startTime := time.Now() - endTime := time.Now() + endTime := startTime.Add(time.Hour) potentialReward := uint64(54321) currentPriority := txs.SubnetPermissionedValidatorCurrentPriority diff --git a/vms/platformvm/state/stakers.go b/vms/platformvm/state/stakers.go index 2113979cb42f..bb202a640bb6 100644 --- a/vms/platformvm/state/stakers.go +++ b/vms/platformvm/state/stakers.go @@ -148,8 +148,13 @@ func (v *baseStakers) DeleteValidator(staker *Staker) { v.pruneValidator(staker.SubnetID, staker.NodeID) validatorDiff := v.getOrCreateValidatorDiff(staker.SubnetID, staker.NodeID) - validatorDiff.validatorStatus = deleted - validatorDiff.validator = staker + if validatorDiff.validatorStatus == added { + validatorDiff.validatorStatus = unmodified + validatorDiff.validator = nil + } else { + validatorDiff.validatorStatus = deleted + validatorDiff.validator = staker + } v.stakers.Delete(staker) } @@ -190,10 +195,19 @@ func (v *baseStakers) DeleteDelegator(staker *Staker) { v.pruneValidator(staker.SubnetID, staker.NodeID) validatorDiff := v.getOrCreateValidatorDiff(staker.SubnetID, staker.NodeID) - if validatorDiff.deletedDelegators == nil { - validatorDiff.deletedDelegators = make(map[ids.ID]*Staker) + found := false + if validatorDiff.addedDelegators != nil { + if _, found = validatorDiff.addedDelegators.Get(staker); found { + // delegator to be removed was just added. Wipe it up here + validatorDiff.addedDelegators.Delete(staker) + } + } + if !found { + if validatorDiff.deletedDelegators == nil { + validatorDiff.deletedDelegators = make(map[ids.ID]*Staker) + } + validatorDiff.deletedDelegators[staker.TxID] = staker } - validatorDiff.deletedDelegators[staker.TxID] = staker v.stakers.Delete(staker) } diff --git a/vms/platformvm/state/stakers_model_generator_check_test.go b/vms/platformvm/state/stakers_model_generator_check_test.go index e43c95c5cbc0..8c841c779e5b 100644 --- a/vms/platformvm/state/stakers_model_generator_check_test.go +++ b/vms/platformvm/state/stakers_model_generator_check_test.go @@ -6,13 +6,13 @@ package state import ( "errors" "fmt" + "math" "testing" "github.com/leanovate/gopter" "github.com/leanovate/gopter/prop" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) @@ -73,7 +73,7 @@ func TestGeneratedStakersValidity(t *testing.T) { return "" }, - addValidatorTxGenerator(ctx, &nodeID), + addValidatorTxGenerator(ctx, &nodeID, math.MaxUint64), )) properties.Property("AddDelegatorTx generator checks", prop.ForAll( @@ -177,7 +177,7 @@ func TestGeneratedStakersValidity(t *testing.T) { return "" }, - addPermissionlessValidatorTxGenerator(ctx, &subnetID, &nodeID, &signer.Empty{}), + addPermissionlessValidatorTxGenerator(ctx, &subnetID, &nodeID, math.MaxUint64), )) properties.Property("addPermissionlessDelegatorTx generator checks", prop.ForAll( diff --git a/vms/platformvm/state/stakers_model_generator_test.go b/vms/platformvm/state/stakers_model_generator_test.go index 3a9dc1ab0140..add9810069b2 100644 --- a/vms/platformvm/state/stakers_model_generator_test.go +++ b/vms/platformvm/state/stakers_model_generator_test.go @@ -5,7 +5,6 @@ package state import ( "fmt" - "math" "reflect" "time" @@ -14,11 +13,14 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + + blst "github.com/supranational/blst/bindings/go" ) type generatorPriorityType uint8 @@ -44,16 +46,15 @@ func stakerTxGenerator( priority generatorPriorityType, subnetID *ids.ID, nodeID *ids.NodeID, - blsSigner signer.Signer, maxWeight uint64, // helps avoiding overflows in delegator tests ) gopter.Gen { switch priority { case permissionedValidator: - return addValidatorTxGenerator(ctx, nodeID) + return addValidatorTxGenerator(ctx, nodeID, maxWeight) case permissionedDelegator: return addDelegatorTxGenerator(ctx, nodeID, maxWeight) case permissionlessValidator: - return addPermissionlessValidatorTxGenerator(ctx, subnetID, nodeID, blsSigner) + return addPermissionlessValidatorTxGenerator(ctx, subnetID, nodeID, maxWeight) case permissionlessDelegator: return addPermissionlessDelegatorTxGenerator(ctx, subnetID, nodeID, maxWeight) default: @@ -65,14 +66,26 @@ func addPermissionlessValidatorTxGenerator( ctx *snow.Context, subnetID *ids.ID, nodeID *ids.NodeID, - blsSigner signer.Signer, + maxWeight uint64, ) gopter.Gen { - return stakerDataGenerator(nodeID, math.MaxUint64).FlatMap( + return stakerDataGenerator(nodeID, maxWeight).FlatMap( func(v interface{}) gopter.Gen { - genStakerSubnetID := genID + genStakerSubnetID := subnetIDGen if subnetID != nil { genStakerSubnetID = gen.Const(*subnetID) } + + // always return a non-empty bls key here. Will drop it + // below, in txs.Tx generator if needed. + fullBlsKeyGen := gen.SliceOfN(32, gen.UInt8()).FlatMap( + func(v interface{}) gopter.Gen { + bytes := v.([]byte) + sk1 := blst.KeyGen(bytes) + return gen.Const(signer.NewProofOfPossession(sk1)) + }, + reflect.TypeOf(&signer.ProofOfPossession{}), + ) + stakerData := v.(txs.Validator) specificGen := gen.StructPtr(reflect.TypeOf(&txs.AddPermissionlessValidatorTx{}), map[string]gopter.Gen{ @@ -86,7 +99,7 @@ func addPermissionlessValidatorTxGenerator( }), "Validator": gen.Const(stakerData), "Subnet": genStakerSubnetID, - "Signer": gen.Const(blsSigner), + "Signer": fullBlsKeyGen, "StakeOuts": gen.Const([]*avax.TransferableOutput{ { Asset: avax.Asset{ @@ -114,6 +127,11 @@ func addPermissionlessValidatorTxGenerator( func(v interface{}) gopter.Gen { stakerTx := v.(*txs.AddPermissionlessValidatorTx) + // drop Signer if needed + if stakerTx.Subnet != constants.PlatformChainID { + stakerTx.Signer = &signer.Empty{} + } + if err := stakerTx.SyntacticVerify(ctx); err != nil { panic(fmt.Errorf("failed syntax verification in tx generator, %w", err)) } @@ -135,8 +153,9 @@ func addPermissionlessValidatorTxGenerator( func addValidatorTxGenerator( ctx *snow.Context, nodeID *ids.NodeID, + maxWeight uint64, ) gopter.Gen { - return stakerDataGenerator(nodeID, math.MaxUint64).FlatMap( + return stakerDataGenerator(nodeID, maxWeight).FlatMap( func(v interface{}) gopter.Gen { stakerData := v.(txs.Validator) @@ -198,7 +217,7 @@ func addPermissionlessDelegatorTxGenerator( ) gopter.Gen { return stakerDataGenerator(nodeID, maxWeight).FlatMap( func(v interface{}) gopter.Gen { - genStakerSubnetID := genID + genStakerSubnetID := subnetIDGen if subnetID != nil { genStakerSubnetID = gen.Const(*subnetID) } @@ -345,7 +364,7 @@ type stakerTimeData struct { func genStakerTimeData() gopter.Gen { return gen.Struct(reflect.TypeOf(&stakerTimeData{}), map[string]gopter.Gen{ "StartTime": gen.Time(), - "Duration": gen.Int64Range(1, 365*24), + "Duration": gen.Int64Range(int64(time.Hour), int64(365*24*time.Hour)), }) } @@ -354,16 +373,25 @@ const ( lengthNodeID = 20 ) -// genID is the helper generator for ids.ID objects -var genID = gen.SliceOfN(lengthID, gen.UInt8()).FlatMap( - func(v interface{}) gopter.Gen { - byteSlice := v.([]byte) - var byteArray [lengthID]byte - copy(byteArray[:], byteSlice) - return gen.Const(ids.ID(byteArray)) +// subnetIDGen is the helper generator for subnetID, duly skewed towards primary network +var subnetIDGen = gen.Weighted([]gen.WeightedGen{ + { + Weight: 50, + Gen: gen.Const(constants.PrimaryNetworkID), }, - reflect.TypeOf([]byte{}), -) + { + Weight: 50, + Gen: gen.SliceOfN(lengthID, gen.UInt8()).FlatMap( + func(v interface{}) gopter.Gen { + byteSlice := v.([]byte) + var byteArray [lengthID]byte + copy(byteArray[:], byteSlice) + return gen.Const(ids.ID(byteArray)) + }, + reflect.TypeOf([]byte{}), + ), + }, +}) // genNodeID is the helper generator for ids.NodeID objects var genNodeID = gen.SliceOfN(lengthNodeID, gen.UInt8()).FlatMap( diff --git a/vms/platformvm/state/stakers_model_storage.go b/vms/platformvm/state/stakers_model_storage.go index f7f3cdc6def6..0e4d456581d4 100644 --- a/vms/platformvm/state/stakers_model_storage.go +++ b/vms/platformvm/state/stakers_model_storage.go @@ -146,12 +146,12 @@ func putDelegator(staker *Staker, domain map[subnetNodeKey]map[ids.ID]*Staker) { nodeID: staker.NodeID, } - ls, found := domain[key] + dels, found := domain[key] if !found { - ls = make(map[ids.ID]*Staker) - domain[key] = ls + dels = make(map[ids.ID]*Staker) + domain[key] = dels } - ls[staker.TxID] = staker + dels[staker.TxID] = staker } func (m *stakersStorageModel) DeleteCurrentDelegator(staker *Staker) { diff --git a/vms/platformvm/state/stakers_model_storage_test.go b/vms/platformvm/state/stakers_model_storage_test.go index 68460be8b86c..ec46adc79dec 100644 --- a/vms/platformvm/state/stakers_model_storage_test.go +++ b/vms/platformvm/state/stakers_model_storage_test.go @@ -17,8 +17,8 @@ import ( "github.com/ava-labs/avalanchego/database/manager" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/version" - "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) @@ -82,7 +82,7 @@ func (s *sysUnderTest) GetState(blkID ids.ID) (Chain, bool) { return s.baseState, blkID == s.baseState.GetLastAccepted() } -func (s *sysUnderTest) addDiffOnTop() { +func (s *sysUnderTest) addDiffOnTop() error { newTopBlkID := ids.Empty.Prefix(atomic.AddUint64(&s.diffBlkIDSeed, 1)) var topBlkID ids.ID if len(s.sortedDiffIDs) == 0 { @@ -92,10 +92,11 @@ func (s *sysUnderTest) addDiffOnTop() { } newTopDiff, err := NewDiff(topBlkID, s) if err != nil { - panic(err) + return err } s.sortedDiffIDs = append(s.sortedDiffIDs, newTopBlkID) s.diffsMap[newTopBlkID] = newTopDiff + return nil } // getTopChainState returns top diff or baseState @@ -112,22 +113,22 @@ func (s *sysUnderTest) getTopChainState() Chain { } // flushBottomDiff applies bottom diff if available -func (s *sysUnderTest) flushBottomDiff() bool { +func (s *sysUnderTest) flushBottomDiff() (bool, error) { if len(s.sortedDiffIDs) == 0 { - return false + return false, nil } bottomDiffID := s.sortedDiffIDs[0] diffToApply := s.diffsMap[bottomDiffID] err := diffToApply.Apply(s.baseState) if err != nil { - panic(err) + return true, err } s.baseState.SetLastAccepted(bottomDiffID) s.sortedDiffIDs = s.sortedDiffIDs[1:] delete(s.diffsMap, bottomDiffID) - return true + return true, nil } // stakersCommands creates/destroy the system under test and generates @@ -208,7 +209,8 @@ func (v *putCurrentValidatorCommand) Run(sut commands.SystemUnderTest) commands. sTx := (*txs.Tx)(v) sys := sut.(*sysUnderTest) - currentVal, err := NewCurrentStaker(sTx.ID(), sTx.Unsigned.(txs.Staker), uint64(1000)) + stakerTx := sTx.Unsigned.(txs.StakerTx) + currentVal, err := NewCurrentStaker(sTx.ID(), stakerTx, uint64(1000)) if err != nil { return sys // state checks later on should spot missing validator } @@ -221,7 +223,8 @@ func (v *putCurrentValidatorCommand) Run(sut commands.SystemUnderTest) commands. func (v *putCurrentValidatorCommand) NextState(cmdState commands.State) commands.State { sTx := (*txs.Tx)(v) - currentVal, err := NewCurrentStaker(sTx.ID(), sTx.Unsigned.(txs.Staker), uint64(1000)) + stakerTx := sTx.Unsigned.(txs.StakerTx) + currentVal, err := NewCurrentStaker(sTx.ID(), stakerTx, uint64(1000)) if err != nil { return cmdState // state checks later on should spot missing validator } @@ -236,7 +239,11 @@ func (*putCurrentValidatorCommand) PreCondition(commands.State) bool { } func (*putCurrentValidatorCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { - return checkSystemAndModelContent(cmdState, res) + if !checkSystemAndModelContent(cmdState, res) { + return &gopter.PropResult{Status: gopter.PropFalse} + } + + return &gopter.PropResult{Status: gopter.PropTrue} } func (v *putCurrentValidatorCommand) String() string { @@ -251,7 +258,7 @@ func (v *putCurrentValidatorCommand) String() string { ) } -var genPutCurrentValidatorCommand = addPermissionlessValidatorTxGenerator(commandsCtx, nil, nil, &signer.Empty{}).Map( +var genPutCurrentValidatorCommand = addPermissionlessValidatorTxGenerator(commandsCtx, nil, nil, 1000).Map( func(nonInitTx *txs.Tx) commands.Command { sTx, err := txs.NewSigned(nonInitTx.Unsigned, txs.Codec, nil) if err != nil { @@ -264,16 +271,19 @@ var genPutCurrentValidatorCommand = addPermissionlessValidatorTxGenerator(comman ) // DeleteCurrentValidator section -type deleteCurrentValidatorCommand struct{} +type deleteCurrentValidatorCommand struct { + err error +} -func (*deleteCurrentValidatorCommand) Run(sut commands.SystemUnderTest) commands.Result { - // delete first validator, if any +func (cmd *deleteCurrentValidatorCommand) Run(sut commands.SystemUnderTest) commands.Result { + // delete first validator without delegators, if any sys := sut.(*sysUnderTest) topDiff := sys.getTopChainState() stakerIt, err := topDiff.GetCurrentStakerIterator() if err != nil { - panic(err) + cmd.err = err + return sys } var ( @@ -282,13 +292,23 @@ func (*deleteCurrentValidatorCommand) Run(sut commands.SystemUnderTest) commands ) for !found && stakerIt.Next() { validator = stakerIt.Value() - if validator.Priority == txs.SubnetPermissionedValidatorCurrentPriority || - validator.Priority == txs.SubnetPermissionlessValidatorCurrentPriority || - validator.Priority == txs.PrimaryNetworkValidatorCurrentPriority { - found = true - break + if validator.Priority.IsCurrentValidator() { + // check validators has no delegators + delIt, err := topDiff.GetCurrentDelegatorIterator(validator.SubnetID, validator.NodeID) + if err != nil { + cmd.err = err + stakerIt.Release() + return sys + } + if !delIt.Next() { + found = true + break + } else { + continue + } } } + if !found { stakerIt.Release() return sys // no current validator to delete @@ -299,11 +319,13 @@ func (*deleteCurrentValidatorCommand) Run(sut commands.SystemUnderTest) commands return sys // returns sys to allow comparison with state in PostCondition } -func (*deleteCurrentValidatorCommand) NextState(cmdState commands.State) commands.State { +func (cmd *deleteCurrentValidatorCommand) NextState(cmdState commands.State) commands.State { + // delete first validator without delegators, if any model := cmdState.(*stakersStorageModel) stakerIt, err := model.GetCurrentStakerIterator() if err != nil { - return err + cmd.err = err + return model } var ( @@ -312,11 +334,20 @@ func (*deleteCurrentValidatorCommand) NextState(cmdState commands.State) command ) for !found && stakerIt.Next() { validator = stakerIt.Value() - if validator.Priority == txs.SubnetPermissionedValidatorCurrentPriority || - validator.Priority == txs.SubnetPermissionlessValidatorCurrentPriority || - validator.Priority == txs.PrimaryNetworkValidatorCurrentPriority { - found = true - break + if validator.Priority.IsCurrentValidator() { + // check validators has no delegators + delIt, err := model.GetCurrentDelegatorIterator(validator.SubnetID, validator.NodeID) + if err != nil { + cmd.err = err + stakerIt.Release() + return model + } + if !delIt.Next() { + found = true + break + } else { + continue + } } } if !found { @@ -334,8 +365,17 @@ func (*deleteCurrentValidatorCommand) PreCondition(commands.State) bool { return true } -func (*deleteCurrentValidatorCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { - return checkSystemAndModelContent(cmdState, res) +func (cmd *deleteCurrentValidatorCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { + if cmd.err != nil { + cmd.err = nil // reset for next runs + return &gopter.PropResult{Status: gopter.PropFalse} + } + + if !checkSystemAndModelContent(cmdState, res) { + return &gopter.PropResult{Status: gopter.PropFalse} + } + + return &gopter.PropResult{Status: gopter.PropTrue} } func (*deleteCurrentValidatorCommand) String() string { @@ -351,14 +391,17 @@ var genDeleteCurrentValidatorCommand = gen.IntRange(1, 2).Map( ) // PutCurrentDelegator section -type putCurrentDelegatorCommand txs.Tx +type putCurrentDelegatorCommand struct { + sTx txs.Tx + err error +} func (v *putCurrentDelegatorCommand) Run(sut commands.SystemUnderTest) commands.Result { - candidateDelegator := (*txs.Tx)(v) + candidateDelegator := v.sTx sys := sut.(*sysUnderTest) err := addCurrentDelegatorInSystem(sys, candidateDelegator.Unsigned) if err != nil { - panic(err) + v.err = err } return sys } @@ -381,9 +424,7 @@ func addCurrentDelegatorInSystem(sys *sysUnderTest, candidateDelegatorTx txs.Uns ) for !found && stakerIt.Next() { validator = stakerIt.Value() - if validator.Priority == txs.SubnetPermissionedValidatorCurrentPriority || - validator.Priority == txs.SubnetPermissionlessValidatorCurrentPriority || - validator.Priority == txs.PrimaryNetworkValidatorCurrentPriority { + if validator.Priority.IsCurrentValidator() { found = true break } @@ -415,11 +456,11 @@ func addCurrentDelegatorInSystem(sys *sysUnderTest, candidateDelegatorTx txs.Uns } func (v *putCurrentDelegatorCommand) NextState(cmdState commands.State) commands.State { - candidateDelegator := (*txs.Tx)(v) + candidateDelegator := v.sTx model := cmdState.(*stakersStorageModel) err := addCurrentDelegatorInModel(model, candidateDelegator.Unsigned) if err != nil { - panic(err) + v.err = err } return cmdState } @@ -441,9 +482,7 @@ func addCurrentDelegatorInModel(model *stakersStorageModel, candidateDelegatorTx ) for !found && stakerIt.Next() { validator = stakerIt.Value() - if validator.Priority == txs.SubnetPermissionedValidatorCurrentPriority || - validator.Priority == txs.SubnetPermissionlessValidatorCurrentPriority || - validator.Priority == txs.PrimaryNetworkValidatorCurrentPriority { + if validator.Priority.IsCurrentValidator() { found = true break } @@ -477,16 +516,25 @@ func (*putCurrentDelegatorCommand) PreCondition(commands.State) bool { return true } -func (*putCurrentDelegatorCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { - return checkSystemAndModelContent(cmdState, res) +func (v *putCurrentDelegatorCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { + if v.err != nil { + v.err = nil // reset for next runs + return &gopter.PropResult{Status: gopter.PropFalse} + } + + if !checkSystemAndModelContent(cmdState, res) { + return &gopter.PropResult{Status: gopter.PropFalse} + } + + return &gopter.PropResult{Status: gopter.PropTrue} } func (v *putCurrentDelegatorCommand) String() string { - stakerTx := v.Unsigned.(txs.StakerTx) - return fmt.Sprintf("putCurrentDelegator(subnetID: %v, nodeID: %v, txID: %v, priority: %v, unixStartTime: %v, duration: %v)", + stakerTx := v.sTx.Unsigned.(txs.StakerTx) + return fmt.Sprintf("PutCurrentDelegator(subnetID: %v, nodeID: %v, txID: %v, priority: %v, unixStartTime: %v, duration: %v)", stakerTx.SubnetID(), stakerTx.NodeID(), - v.TxID, + v.sTx.TxID, stakerTx.CurrentPriority(), stakerTx.StartTime().Unix(), stakerTx.EndTime().Sub(stakerTx.StartTime())) @@ -499,22 +547,27 @@ var genPutCurrentDelegatorCommand = addPermissionlessDelegatorTxGenerator(comman panic(fmt.Errorf("failed signing tx, %w", err)) } - cmd := (*putCurrentDelegatorCommand)(sTx) + cmd := &putCurrentDelegatorCommand{ + sTx: *sTx, + } return cmd }, ) // DeleteCurrentDelegator section -type deleteCurrentDelegatorCommand struct{} +type deleteCurrentDelegatorCommand struct { + err error +} -func (*deleteCurrentDelegatorCommand) Run(sut commands.SystemUnderTest) commands.Result { - // delete first validator, if any +func (cmd *deleteCurrentDelegatorCommand) Run(sut commands.SystemUnderTest) commands.Result { + // delete first delegator, if any sys := sut.(*sysUnderTest) topDiff := sys.getTopChainState() stakerIt, err := topDiff.GetCurrentStakerIterator() if err != nil { - panic(err) + cmd.err = err + return sys } var ( @@ -523,8 +576,7 @@ func (*deleteCurrentDelegatorCommand) Run(sut commands.SystemUnderTest) commands ) for !found && stakerIt.Next() { delegator = stakerIt.Value() - if delegator.Priority == txs.SubnetPermissionlessDelegatorCurrentPriority || - delegator.Priority == txs.PrimaryNetworkDelegatorCurrentPriority { + if delegator.Priority.IsCurrentDelegator() { found = true break } @@ -552,8 +604,7 @@ func (*deleteCurrentDelegatorCommand) NextState(cmdState commands.State) command ) for !found && stakerIt.Next() { delegator = stakerIt.Value() - if delegator.Priority == txs.SubnetPermissionlessDelegatorCurrentPriority || - delegator.Priority == txs.PrimaryNetworkDelegatorCurrentPriority { + if delegator.Priority.IsCurrentDelegator() { found = true break } @@ -572,8 +623,17 @@ func (*deleteCurrentDelegatorCommand) PreCondition(commands.State) bool { return true } -func (*deleteCurrentDelegatorCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { - return checkSystemAndModelContent(cmdState, res) +func (cmd *deleteCurrentDelegatorCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { + if cmd.err != nil { + cmd.err = nil // reset for next runs + return &gopter.PropResult{Status: gopter.PropFalse} + } + + if !checkSystemAndModelContent(cmdState, res) { + return &gopter.PropResult{Status: gopter.PropFalse} + } + + return &gopter.PropResult{Status: gopter.PropTrue} } func (*deleteCurrentDelegatorCommand) String() string { @@ -589,11 +649,16 @@ var genDeleteCurrentDelegatorCommand = gen.IntRange(1, 2).Map( ) // addTopDiffCommand section -type addTopDiffCommand struct{} +type addTopDiffCommand struct { + err error +} -func (*addTopDiffCommand) Run(sut commands.SystemUnderTest) commands.Result { +func (cmd *addTopDiffCommand) Run(sut commands.SystemUnderTest) commands.Result { sys := sut.(*sysUnderTest) - sys.addDiffOnTop() + err := sys.addDiffOnTop() + if err != nil { + cmd.err = err + } return sys } @@ -605,8 +670,17 @@ func (*addTopDiffCommand) PreCondition(commands.State) bool { return true } -func (*addTopDiffCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { - return checkSystemAndModelContent(cmdState, res) +func (cmd *addTopDiffCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { + if cmd.err != nil { + cmd.err = nil // reset for next runs + return &gopter.PropResult{Status: gopter.PropFalse} + } + + if !checkSystemAndModelContent(cmdState, res) { + return &gopter.PropResult{Status: gopter.PropFalse} + } + + return &gopter.PropResult{Status: gopter.PropTrue} } func (*addTopDiffCommand) String() string { @@ -622,11 +696,14 @@ var genAddTopDiffCommand = gen.IntRange(1, 2).Map( ) // applyBottomDiffCommand section -type applyBottomDiffCommand struct{} +type applyBottomDiffCommand struct { + err error +} -func (*applyBottomDiffCommand) Run(sut commands.SystemUnderTest) commands.Result { +func (cmd *applyBottomDiffCommand) Run(sut commands.SystemUnderTest) commands.Result { sys := sut.(*sysUnderTest) - _ = sys.flushBottomDiff() + _, cmd.err = sys.flushBottomDiff() + return sys } @@ -638,8 +715,17 @@ func (*applyBottomDiffCommand) PreCondition(commands.State) bool { return true } -func (*applyBottomDiffCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { - return checkSystemAndModelContent(cmdState, res) +func (cmd *applyBottomDiffCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { + if cmd.err != nil { + cmd.err = nil // reset for next runs + return &gopter.PropResult{Status: gopter.PropFalse} + } + + if !checkSystemAndModelContent(cmdState, res) { + return &gopter.PropResult{Status: gopter.PropFalse} + } + + return &gopter.PropResult{Status: gopter.PropTrue} } func (*applyBottomDiffCommand) String() string { @@ -655,13 +741,16 @@ var genApplyBottomDiffCommand = gen.IntRange(1, 2).Map( ) // commitBottomStateCommand section -type commitBottomStateCommand struct{} +type commitBottomStateCommand struct { + err error +} -func (*commitBottomStateCommand) Run(sut commands.SystemUnderTest) commands.Result { +func (cmd *commitBottomStateCommand) Run(sut commands.SystemUnderTest) commands.Result { sys := sut.(*sysUnderTest) err := sys.baseState.Commit() if err != nil { - panic(err) + cmd.err = err + return sys } return sys } @@ -674,8 +763,17 @@ func (*commitBottomStateCommand) PreCondition(commands.State) bool { return true } -func (*commitBottomStateCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { - return checkSystemAndModelContent(cmdState, res) +func (cmd *commitBottomStateCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { + if cmd.err != nil { + cmd.err = nil // reset for next runs + return &gopter.PropResult{Status: gopter.PropFalse} + } + + if !checkSystemAndModelContent(cmdState, res) { + return &gopter.PropResult{Status: gopter.PropFalse} + } + + return &gopter.PropResult{Status: gopter.PropTrue} } func (*commitBottomStateCommand) String() string { @@ -691,27 +789,40 @@ var genCommitBottomStateCommand = gen.IntRange(1, 2).Map( ) // rebuildStateCommand section -type rebuildStateCommand struct{} +type rebuildStateCommand struct { + err error +} -func (*rebuildStateCommand) Run(sut commands.SystemUnderTest) commands.Result { +func (cmd *rebuildStateCommand) Run(sut commands.SystemUnderTest) commands.Result { sys := sut.(*sysUnderTest) // 1. Persist all outstanding changes - for sys.flushBottomDiff() { - err := sys.baseState.Commit() + for { + diffFound, err := sys.flushBottomDiff() if err != nil { - panic(err) + cmd.err = err + return sys + } + if !diffFound { + break + } + + if err := sys.baseState.Commit(); err != nil { + cmd.err = err + return sys } } if err := sys.baseState.Commit(); err != nil { - panic(err) + cmd.err = err + return sys } // 2. Rebuild the state from the db baseState, err := buildChainState(sys.baseDB, nil) if err != nil { - panic(err) + cmd.err = err + return sys } sys.baseState = baseState sys.diffsMap = map[ids.ID]Diff{} @@ -728,8 +839,21 @@ func (*rebuildStateCommand) PreCondition(commands.State) bool { return true } -func (*rebuildStateCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { - return checkSystemAndModelContent(cmdState, res) +func (cmd *rebuildStateCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { + if cmd.err != nil { + cmd.err = nil // reset for next runs + return &gopter.PropResult{Status: gopter.PropFalse} + } + + if !checkSystemAndModelContent(cmdState, res) { + return &gopter.PropResult{Status: gopter.PropFalse} + } + + if !checkValidatorSetContent(res) { + return &gopter.PropResult{Status: gopter.PropFalse} + } + + return &gopter.PropResult{Status: gopter.PropTrue} } func (*rebuildStateCommand) String() string { @@ -744,7 +868,7 @@ var genRebuildStateCommand = gen.IntRange(1, 2).Map( }, ) -func checkSystemAndModelContent(cmdState commands.State, res commands.Result) *gopter.PropResult { +func checkSystemAndModelContent(cmdState commands.State, res commands.Result) bool { model := cmdState.(*stakersStorageModel) sys := res.(*sysUnderTest) @@ -753,11 +877,11 @@ func checkSystemAndModelContent(cmdState commands.State, res commands.Result) *g modelIt, err := model.GetCurrentStakerIterator() if err != nil { - return &gopter.PropResult{Status: gopter.PropFalse} + return false } sysIt, err := topDiff.GetCurrentStakerIterator() if err != nil { - return &gopter.PropResult{Status: gopter.PropFalse} + return false } modelStakers := make([]*Staker, 0) @@ -773,15 +897,55 @@ func checkSystemAndModelContent(cmdState commands.State, res commands.Result) *g sysIt.Release() if len(modelStakers) != len(sysStakers) { - return &gopter.PropResult{Status: gopter.PropFalse} + return false } for idx, modelStaker := range modelStakers { sysStaker := sysStakers[idx] if modelStaker == nil || sysStaker == nil || !reflect.DeepEqual(modelStaker, sysStaker) { - return &gopter.PropResult{Status: gopter.PropFalse} + return false } } - return &gopter.PropResult{Status: gopter.PropTrue} + return true +} + +func checkValidatorSetContent(res commands.Result) bool { + sys := res.(*sysUnderTest) + valSet := sys.baseState.(*state).cfg.Validators + + sysIt, err := sys.baseState.GetCurrentStakerIterator() + if err != nil { + return false + } + + // valContent subnetID -> nodeID -> aggregate weight (validator's own weight + delegators' weight) + valContent := make(map[ids.ID]map[ids.NodeID]uint64) + for sysIt.Next() { + val := sysIt.Value() + if val.SubnetID != constants.PrimaryNetworkID { + continue + } + nodes, found := valContent[val.SubnetID] + if !found { + nodes = make(map[ids.NodeID]uint64) + valContent[val.SubnetID] = nodes + } + nodes[val.NodeID] += val.Weight + } + sysIt.Release() + + for subnetID, nodes := range valContent { + vals, found := valSet.Get(subnetID) + if !found { + return false + } + for nodeID, weight := range nodes { + valWeight := vals.GetWeight(nodeID) + if weight != valWeight { + return false + } + } + } + return true } diff --git a/vms/platformvm/state/stakers_properties_test.go b/vms/platformvm/state/stakers_properties_test.go index 1fcf00142796..9c565df1e3c1 100644 --- a/vms/platformvm/state/stakers_properties_test.go +++ b/vms/platformvm/state/stakers_properties_test.go @@ -20,7 +20,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/version" - "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) @@ -69,7 +68,8 @@ func generalStakerContainersProperties(storeCreatorF func() (Stakers, error)) *g panic(fmt.Errorf("failed signing tx in tx generator, %w", err)) } - staker, err := NewCurrentStaker(signedTx.ID(), signedTx.Unsigned.(txs.StakerTx), uint64(100)) + stakerTx := signedTx.Unsigned.(txs.StakerTx) + staker, err := NewCurrentStaker(signedTx.ID(), stakerTx, uint64(100)) if err != nil { return err.Error() } @@ -122,7 +122,7 @@ func generalStakerContainersProperties(storeCreatorF func() (Stakers, error)) *g return "" }, - stakerTxGenerator(ctx, permissionedValidator, &constants.PrimaryNetworkID, nil, &signer.Empty{}, math.MaxUint64), + stakerTxGenerator(ctx, permissionedValidator, &constants.PrimaryNetworkID, nil, math.MaxUint64), )) properties.Property("add, delete and query pending validators", prop.ForAll( @@ -190,7 +190,7 @@ func generalStakerContainersProperties(storeCreatorF func() (Stakers, error)) *g return "" }, - stakerTxGenerator(ctx, permissionedValidator, &constants.PrimaryNetworkID, nil, &signer.Empty{}, math.MaxUint64), + stakerTxGenerator(ctx, permissionedValidator, &constants.PrimaryNetworkID, nil, math.MaxUint64), )) var ( @@ -331,7 +331,6 @@ func generalStakerContainersProperties(storeCreatorF func() (Stakers, error)) *g permissionlessValidator, &subnetID, &nodeID, - &signer.Empty{}, math.MaxUint64, ), gen.SliceOfN(10, @@ -339,7 +338,6 @@ func generalStakerContainersProperties(storeCreatorF func() (Stakers, error)) *g permissionlessDelegator, &subnetID, &nodeID, - &signer.Empty{}, 1000, ), ), @@ -478,7 +476,6 @@ func generalStakerContainersProperties(storeCreatorF func() (Stakers, error)) *g permissionlessValidator, &subnetID, &nodeID, - &signer.Empty{}, math.MaxUint64, ), gen.SliceOfN(10, @@ -486,7 +483,6 @@ func generalStakerContainersProperties(storeCreatorF func() (Stakers, error)) *g permissionlessDelegator, &subnetID, &nodeID, - &signer.Empty{}, 1000, ), ), From f3281bcb8b48df963aaded1ca80f1291941d454a Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Thu, 6 Jul 2023 11:23:57 +0200 Subject: [PATCH 046/132] fixed storage ops --- .../state/stakers_model_storage_test.go | 163 ++++++++---------- 1 file changed, 70 insertions(+), 93 deletions(-) diff --git a/vms/platformvm/state/stakers_model_storage_test.go b/vms/platformvm/state/stakers_model_storage_test.go index ec46adc79dec..391ddf6054c9 100644 --- a/vms/platformvm/state/stakers_model_storage_test.go +++ b/vms/platformvm/state/stakers_model_storage_test.go @@ -30,8 +30,7 @@ var ( _ commands.Command = (*putCurrentDelegatorCommand)(nil) _ commands.Command = (*deleteCurrentDelegatorCommand)(nil) _ commands.Command = (*addTopDiffCommand)(nil) - _ commands.Command = (*applyBottomDiffCommand)(nil) - _ commands.Command = (*commitBottomStateCommand)(nil) + _ commands.Command = (*applyAndCommitBottomDiffCommand)(nil) _ commands.Command = (*rebuildStateCommand)(nil) commandsCtx = buildStateCtx() @@ -195,18 +194,20 @@ var stakersCommands = &commands.ProtoCommands{ genDeleteCurrentDelegatorCommand, genAddTopDiffCommand, - genApplyBottomDiffCommand, - genCommitBottomStateCommand, + genApplyAndCommitBottomDiffCommand, genRebuildStateCommand, ) }, } // PutCurrentValidator section -type putCurrentValidatorCommand txs.Tx +type putCurrentValidatorCommand struct { + sTx *txs.Tx + err error +} -func (v *putCurrentValidatorCommand) Run(sut commands.SystemUnderTest) commands.Result { - sTx := (*txs.Tx)(v) +func (cmd *putCurrentValidatorCommand) Run(sut commands.SystemUnderTest) commands.Result { + sTx := cmd.sTx sys := sut.(*sysUnderTest) stakerTx := sTx.Unsigned.(txs.StakerTx) @@ -221,8 +222,8 @@ func (v *putCurrentValidatorCommand) Run(sut commands.SystemUnderTest) commands. return sys } -func (v *putCurrentValidatorCommand) NextState(cmdState commands.State) commands.State { - sTx := (*txs.Tx)(v) +func (cmd *putCurrentValidatorCommand) NextState(cmdState commands.State) commands.State { + sTx := cmd.sTx stakerTx := sTx.Unsigned.(txs.StakerTx) currentVal, err := NewCurrentStaker(sTx.ID(), stakerTx, uint64(1000)) if err != nil { @@ -238,7 +239,12 @@ func (*putCurrentValidatorCommand) PreCondition(commands.State) bool { return true } -func (*putCurrentValidatorCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { +func (cmd *putCurrentValidatorCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { + if cmd.err != nil { + cmd.err = nil // reset for next runs + return &gopter.PropResult{Status: gopter.PropFalse} + } + if !checkSystemAndModelContent(cmdState, res) { return &gopter.PropResult{Status: gopter.PropFalse} } @@ -246,12 +252,12 @@ func (*putCurrentValidatorCommand) PostCondition(cmdState commands.State, res co return &gopter.PropResult{Status: gopter.PropTrue} } -func (v *putCurrentValidatorCommand) String() string { - stakerTx := v.Unsigned.(txs.StakerTx) - return fmt.Sprintf("PutCurrentValidator(subnetID: %v, nodeID: %v, txID: %v, priority: %v, unixStartTime: %v, duration: %v)", +func (cmd *putCurrentValidatorCommand) String() string { + stakerTx := cmd.sTx.Unsigned.(txs.StakerTx) + return fmt.Sprintf("\nputCurrentValidator(subnetID: %v, nodeID: %v, txID: %v, priority: %v, unixStartTime: %v, duration: %v)", stakerTx.SubnetID(), stakerTx.NodeID(), - v.TxID, + cmd.sTx.TxID, stakerTx.CurrentPriority(), stakerTx.StartTime().Unix(), stakerTx.EndTime().Sub(stakerTx.StartTime()), @@ -265,7 +271,10 @@ var genPutCurrentValidatorCommand = addPermissionlessValidatorTxGenerator(comman panic(fmt.Errorf("failed signing tx, %w", err)) } - cmd := (*putCurrentValidatorCommand)(sTx) + cmd := &putCurrentValidatorCommand{ + sTx: sTx, + err: nil, + } return cmd }, ) @@ -379,7 +388,7 @@ func (cmd *deleteCurrentValidatorCommand) PostCondition(cmdState commands.State, } func (*deleteCurrentValidatorCommand) String() string { - return "DeleteCurrentValidator" + return "\ndeleteCurrentValidator" } // a trick to force command regeneration at each sampling. @@ -392,16 +401,16 @@ var genDeleteCurrentValidatorCommand = gen.IntRange(1, 2).Map( // PutCurrentDelegator section type putCurrentDelegatorCommand struct { - sTx txs.Tx + sTx *txs.Tx err error } -func (v *putCurrentDelegatorCommand) Run(sut commands.SystemUnderTest) commands.Result { - candidateDelegator := v.sTx +func (cmd *putCurrentDelegatorCommand) Run(sut commands.SystemUnderTest) commands.Result { + candidateDelegator := cmd.sTx sys := sut.(*sysUnderTest) err := addCurrentDelegatorInSystem(sys, candidateDelegator.Unsigned) if err != nil { - v.err = err + cmd.err = err } return sys } @@ -455,12 +464,12 @@ func addCurrentDelegatorInSystem(sys *sysUnderTest, candidateDelegatorTx txs.Uns return nil } -func (v *putCurrentDelegatorCommand) NextState(cmdState commands.State) commands.State { - candidateDelegator := v.sTx +func (cmd *putCurrentDelegatorCommand) NextState(cmdState commands.State) commands.State { + candidateDelegator := cmd.sTx model := cmdState.(*stakersStorageModel) err := addCurrentDelegatorInModel(model, candidateDelegator.Unsigned) if err != nil { - v.err = err + cmd.err = err } return cmdState } @@ -516,9 +525,9 @@ func (*putCurrentDelegatorCommand) PreCondition(commands.State) bool { return true } -func (v *putCurrentDelegatorCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { - if v.err != nil { - v.err = nil // reset for next runs +func (cmd *putCurrentDelegatorCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { + if cmd.err != nil { + cmd.err = nil // reset for next runs return &gopter.PropResult{Status: gopter.PropFalse} } @@ -529,12 +538,12 @@ func (v *putCurrentDelegatorCommand) PostCondition(cmdState commands.State, res return &gopter.PropResult{Status: gopter.PropTrue} } -func (v *putCurrentDelegatorCommand) String() string { - stakerTx := v.sTx.Unsigned.(txs.StakerTx) - return fmt.Sprintf("PutCurrentDelegator(subnetID: %v, nodeID: %v, txID: %v, priority: %v, unixStartTime: %v, duration: %v)", +func (cmd *putCurrentDelegatorCommand) String() string { + stakerTx := cmd.sTx.Unsigned.(txs.StakerTx) + return fmt.Sprintf("\nputCurrentDelegator(subnetID: %v, nodeID: %v, txID: %v, priority: %v, unixStartTime: %v, duration: %v)", stakerTx.SubnetID(), stakerTx.NodeID(), - v.sTx.TxID, + cmd.sTx.TxID, stakerTx.CurrentPriority(), stakerTx.StartTime().Unix(), stakerTx.EndTime().Sub(stakerTx.StartTime())) @@ -548,7 +557,7 @@ var genPutCurrentDelegatorCommand = addPermissionlessDelegatorTxGenerator(comman } cmd := &putCurrentDelegatorCommand{ - sTx: *sTx, + sTx: sTx, } return cmd }, @@ -562,12 +571,20 @@ type deleteCurrentDelegatorCommand struct { func (cmd *deleteCurrentDelegatorCommand) Run(sut commands.SystemUnderTest) commands.Result { // delete first delegator, if any sys := sut.(*sysUnderTest) + _, err := deleteCurrentDelegator(sys) + if err != nil { + cmd.err = err + } + return sys // returns sys to allow comparison with state in PostCondition +} + +func deleteCurrentDelegator(sys *sysUnderTest) (bool, error) { + // delete first validator, if any topDiff := sys.getTopChainState() stakerIt, err := topDiff.GetCurrentStakerIterator() if err != nil { - cmd.err = err - return sys + return false, err } var ( @@ -583,12 +600,12 @@ func (cmd *deleteCurrentDelegatorCommand) Run(sut commands.SystemUnderTest) comm } if !found { stakerIt.Release() - return sys // no current validator to delete + return false, nil // no current validator to delete } stakerIt.Release() // release before modifying stakers collection topDiff.DeleteCurrentDelegator(delegator) - return sys // returns sys to allow comparison with state in PostCondition + return true, nil } func (*deleteCurrentDelegatorCommand) NextState(cmdState commands.State) commands.State { @@ -637,7 +654,7 @@ func (cmd *deleteCurrentDelegatorCommand) PostCondition(cmdState commands.State, } func (*deleteCurrentDelegatorCommand) String() string { - return "DeleteCurrentDelegator" + return "\ndeleteCurrentDelegator" } // a trick to force command regeneration at each sampling. @@ -684,7 +701,7 @@ func (cmd *addTopDiffCommand) PostCondition(cmdState commands.State, res command } func (*addTopDiffCommand) String() string { - return "AddTopDiffCommand" + return "\naddTopDiffCommand" } // a trick to force command regeneration at each sampling. @@ -695,75 +712,35 @@ var genAddTopDiffCommand = gen.IntRange(1, 2).Map( }, ) -// applyBottomDiffCommand section -type applyBottomDiffCommand struct { +// applyAndCommitBottomDiffCommand section +type applyAndCommitBottomDiffCommand struct { err error } -func (cmd *applyBottomDiffCommand) Run(sut commands.SystemUnderTest) commands.Result { +func (cmd *applyAndCommitBottomDiffCommand) Run(sut commands.SystemUnderTest) commands.Result { sys := sut.(*sysUnderTest) - _, cmd.err = sys.flushBottomDiff() - - return sys -} - -func (*applyBottomDiffCommand) NextState(cmdState commands.State) commands.State { - return cmdState // model has no diffs -} - -func (*applyBottomDiffCommand) PreCondition(commands.State) bool { - return true -} - -func (cmd *applyBottomDiffCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { - if cmd.err != nil { - cmd.err = nil // reset for next runs - return &gopter.PropResult{Status: gopter.PropFalse} - } - - if !checkSystemAndModelContent(cmdState, res) { - return &gopter.PropResult{Status: gopter.PropFalse} + if _, err := sys.flushBottomDiff(); err != nil { + cmd.err = err + return sys } - return &gopter.PropResult{Status: gopter.PropTrue} -} - -func (*applyBottomDiffCommand) String() string { - return "ApplyBottomDiffCommand" -} - -// a trick to force command regeneration at each sampling. -// gen.Const would not allow it -var genApplyBottomDiffCommand = gen.IntRange(1, 2).Map( - func(int) commands.Command { - return &applyBottomDiffCommand{} - }, -) - -// commitBottomStateCommand section -type commitBottomStateCommand struct { - err error -} - -func (cmd *commitBottomStateCommand) Run(sut commands.SystemUnderTest) commands.Result { - sys := sut.(*sysUnderTest) - err := sys.baseState.Commit() - if err != nil { + if err := sys.baseState.Commit(); err != nil { cmd.err = err return sys } + return sys } -func (*commitBottomStateCommand) NextState(cmdState commands.State) commands.State { +func (*applyAndCommitBottomDiffCommand) NextState(cmdState commands.State) commands.State { return cmdState // model has no diffs } -func (*commitBottomStateCommand) PreCondition(commands.State) bool { +func (*applyAndCommitBottomDiffCommand) PreCondition(commands.State) bool { return true } -func (cmd *commitBottomStateCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { +func (cmd *applyAndCommitBottomDiffCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { if cmd.err != nil { cmd.err = nil // reset for next runs return &gopter.PropResult{Status: gopter.PropFalse} @@ -776,15 +753,15 @@ func (cmd *commitBottomStateCommand) PostCondition(cmdState commands.State, res return &gopter.PropResult{Status: gopter.PropTrue} } -func (*commitBottomStateCommand) String() string { - return "CommitBottomStateCommand" +func (*applyAndCommitBottomDiffCommand) String() string { + return "\napplyAndCommitBottomDiffCommand" } // a trick to force command regeneration at each sampling. // gen.Const would not allow it -var genCommitBottomStateCommand = gen.IntRange(1, 2).Map( +var genApplyAndCommitBottomDiffCommand = gen.IntRange(1, 2).Map( func(int) commands.Command { - return &commitBottomStateCommand{} + return &applyAndCommitBottomDiffCommand{} }, ) @@ -857,7 +834,7 @@ func (cmd *rebuildStateCommand) PostCondition(cmdState commands.State, res comma } func (*rebuildStateCommand) String() string { - return "RebuildStateCommand" + return "\nrebuildStateCommand" } // a trick to force command regeneration at each sampling. From 4929d31f359f0abe5df7fda138378efed7486750 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Thu, 6 Jul 2023 11:42:25 +0200 Subject: [PATCH 047/132] nit --- .../state/stakers_model_storage_test.go | 148 +++++++++--------- 1 file changed, 74 insertions(+), 74 deletions(-) diff --git a/vms/platformvm/state/stakers_model_storage_test.go b/vms/platformvm/state/stakers_model_storage_test.go index 391ddf6054c9..ce91bed11452 100644 --- a/vms/platformvm/state/stakers_model_storage_test.go +++ b/vms/platformvm/state/stakers_model_storage_test.go @@ -56,80 +56,6 @@ func TestStateAndDiffComparisonToStorageModel(t *testing.T) { properties.TestingRun(t) } -type sysUnderTest struct { - diffBlkIDSeed uint64 - baseDB database.Database - baseState State - sortedDiffIDs []ids.ID - diffsMap map[ids.ID]Diff -} - -func newSysUnderTest(baseDB database.Database, baseState State) *sysUnderTest { - sys := &sysUnderTest{ - baseDB: baseDB, - baseState: baseState, - diffsMap: map[ids.ID]Diff{}, - sortedDiffIDs: []ids.ID{}, - } - return sys -} - -func (s *sysUnderTest) GetState(blkID ids.ID) (Chain, bool) { - if state, found := s.diffsMap[blkID]; found { - return state, found - } - return s.baseState, blkID == s.baseState.GetLastAccepted() -} - -func (s *sysUnderTest) addDiffOnTop() error { - newTopBlkID := ids.Empty.Prefix(atomic.AddUint64(&s.diffBlkIDSeed, 1)) - var topBlkID ids.ID - if len(s.sortedDiffIDs) == 0 { - topBlkID = s.baseState.GetLastAccepted() - } else { - topBlkID = s.sortedDiffIDs[len(s.sortedDiffIDs)-1] - } - newTopDiff, err := NewDiff(topBlkID, s) - if err != nil { - return err - } - s.sortedDiffIDs = append(s.sortedDiffIDs, newTopBlkID) - s.diffsMap[newTopBlkID] = newTopDiff - return nil -} - -// getTopChainState returns top diff or baseState -func (s *sysUnderTest) getTopChainState() Chain { - var topChainStateID ids.ID - if len(s.sortedDiffIDs) != 0 { - topChainStateID = s.sortedDiffIDs[len(s.sortedDiffIDs)-1] - } else { - topChainStateID = s.baseState.GetLastAccepted() - } - - topChainState, _ := s.GetState(topChainStateID) - return topChainState -} - -// flushBottomDiff applies bottom diff if available -func (s *sysUnderTest) flushBottomDiff() (bool, error) { - if len(s.sortedDiffIDs) == 0 { - return false, nil - } - bottomDiffID := s.sortedDiffIDs[0] - diffToApply := s.diffsMap[bottomDiffID] - - err := diffToApply.Apply(s.baseState) - if err != nil { - return true, err - } - s.baseState.SetLastAccepted(bottomDiffID) - - s.sortedDiffIDs = s.sortedDiffIDs[1:] - delete(s.diffsMap, bottomDiffID) - return true, nil -} - // stakersCommands creates/destroy the system under test and generates // commands and initial states (stakersStorageModel) var stakersCommands = &commands.ProtoCommands{ @@ -926,3 +852,77 @@ func checkValidatorSetContent(res commands.Result) bool { } return true } + +type sysUnderTest struct { + diffBlkIDSeed uint64 + baseDB database.Database + baseState State + sortedDiffIDs []ids.ID + diffsMap map[ids.ID]Diff +} + +func newSysUnderTest(baseDB database.Database, baseState State) *sysUnderTest { + sys := &sysUnderTest{ + baseDB: baseDB, + baseState: baseState, + diffsMap: map[ids.ID]Diff{}, + sortedDiffIDs: []ids.ID{}, + } + return sys +} + +func (s *sysUnderTest) GetState(blkID ids.ID) (Chain, bool) { + if state, found := s.diffsMap[blkID]; found { + return state, found + } + return s.baseState, blkID == s.baseState.GetLastAccepted() +} + +func (s *sysUnderTest) addDiffOnTop() error { + newTopBlkID := ids.Empty.Prefix(atomic.AddUint64(&s.diffBlkIDSeed, 1)) + var topBlkID ids.ID + if len(s.sortedDiffIDs) == 0 { + topBlkID = s.baseState.GetLastAccepted() + } else { + topBlkID = s.sortedDiffIDs[len(s.sortedDiffIDs)-1] + } + newTopDiff, err := NewDiff(topBlkID, s) + if err != nil { + return err + } + s.sortedDiffIDs = append(s.sortedDiffIDs, newTopBlkID) + s.diffsMap[newTopBlkID] = newTopDiff + return nil +} + +// getTopChainState returns top diff or baseState +func (s *sysUnderTest) getTopChainState() Chain { + var topChainStateID ids.ID + if len(s.sortedDiffIDs) != 0 { + topChainStateID = s.sortedDiffIDs[len(s.sortedDiffIDs)-1] + } else { + topChainStateID = s.baseState.GetLastAccepted() + } + + topChainState, _ := s.GetState(topChainStateID) + return topChainState +} + +// flushBottomDiff applies bottom diff if available +func (s *sysUnderTest) flushBottomDiff() (bool, error) { + if len(s.sortedDiffIDs) == 0 { + return false, nil + } + bottomDiffID := s.sortedDiffIDs[0] + diffToApply := s.diffsMap[bottomDiffID] + + err := diffToApply.Apply(s.baseState) + if err != nil { + return true, err + } + s.baseState.SetLastAccepted(bottomDiffID) + + s.sortedDiffIDs = s.sortedDiffIDs[1:] + delete(s.diffsMap, bottomDiffID) + return true, nil +} From 1a136c5f1ebd0470c17749ea9b151e641d49a636 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Thu, 6 Jul 2023 16:20:11 +0200 Subject: [PATCH 048/132] nits --- vms/platformvm/state/diff_test.go | 27 +++++-------- vms/platformvm/state/stakers_helpers_test.go | 3 -- .../state/stakers_model_storage_test.go | 40 +++++++++++++++++-- .../state/stakers_properties_test.go | 5 +++ 4 files changed, 51 insertions(+), 24 deletions(-) diff --git a/vms/platformvm/state/diff_test.go b/vms/platformvm/state/diff_test.go index bdde20ce055f..1bb3374ca272 100644 --- a/vms/platformvm/state/diff_test.go +++ b/vms/platformvm/state/diff_test.go @@ -83,8 +83,7 @@ func TestDiffCurrentValidator(t *testing.T) { lastAcceptedID := ids.GenerateTestID() state := NewMockState(ctrl) - // Called in NewDiff - state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) + state.EXPECT().GetTimestamp().Return(time.Now()) // Called in NewDiff states := NewMockVersions(ctrl) states.EXPECT().GetState(lastAcceptedID).Return(state, true).AnyTimes() @@ -121,8 +120,7 @@ func TestDiffPendingValidator(t *testing.T) { lastAcceptedID := ids.GenerateTestID() state := NewMockState(ctrl) - // Called in NewDiff - state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) + state.EXPECT().GetTimestamp().Return(time.Now()) // Called in NewDiff states := NewMockVersions(ctrl) states.EXPECT().GetState(lastAcceptedID).Return(state, true).AnyTimes() @@ -164,8 +162,7 @@ func TestDiffCurrentDelegator(t *testing.T) { } state := NewMockState(ctrl) - // Called in NewDiff - state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) + state.EXPECT().GetTimestamp().Return(time.Now()) // Called in NewDiff states := NewMockVersions(ctrl) lastAcceptedID := ids.GenerateTestID() @@ -214,8 +211,7 @@ func TestDiffPendingDelegator(t *testing.T) { } state := NewMockState(ctrl) - // Called in NewDiff - state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) + state.EXPECT().GetTimestamp().Return(time.Now()) // Called in NewDiff states := NewMockVersions(ctrl) lastAcceptedID := ids.GenerateTestID() @@ -258,8 +254,7 @@ func TestDiffSubnet(t *testing.T) { defer ctrl.Finish() state := NewMockState(ctrl) - // Called in NewDiff - state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) + state.EXPECT().GetTimestamp().Return(time.Now()) // Called in NewDiff states := NewMockVersions(ctrl) lastAcceptedID := ids.GenerateTestID() @@ -289,8 +284,7 @@ func TestDiffChain(t *testing.T) { defer ctrl.Finish() state := NewMockState(ctrl) - // Called in NewDiff - state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) + state.EXPECT().GetTimestamp().Return(time.Now()) // Called in NewDiff states := NewMockVersions(ctrl) lastAcceptedID := ids.GenerateTestID() @@ -329,8 +323,7 @@ func TestDiffTx(t *testing.T) { defer ctrl.Finish() state := NewMockState(ctrl) - // Called in NewDiff - state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) + state.EXPECT().GetTimestamp().Return(time.Now()) // Called in NewDiff states := NewMockVersions(ctrl) lastAcceptedID := ids.GenerateTestID() @@ -380,8 +373,7 @@ func TestDiffRewardUTXO(t *testing.T) { defer ctrl.Finish() state := NewMockState(ctrl) - // Called in NewDiff - state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) + state.EXPECT().GetTimestamp().Return(time.Now()) // Called in NewDiff states := NewMockVersions(ctrl) lastAcceptedID := ids.GenerateTestID() @@ -426,8 +418,7 @@ func TestDiffUTXO(t *testing.T) { defer ctrl.Finish() state := NewMockState(ctrl) - // Called in NewDiff - state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) + state.EXPECT().GetTimestamp().Return(time.Now()) // Called in NewDiff states := NewMockVersions(ctrl) lastAcceptedID := ids.GenerateTestID() diff --git a/vms/platformvm/state/stakers_helpers_test.go b/vms/platformvm/state/stakers_helpers_test.go index 17f20c73cdf6..0c8a1edf5546 100644 --- a/vms/platformvm/state/stakers_helpers_test.go +++ b/vms/platformvm/state/stakers_helpers_test.go @@ -39,9 +39,6 @@ var ( defaultValidateStartTime = defaultGenesisTime defaultValidateEndTime = defaultValidateStartTime.Add(10 * defaultMinStakingDuration) defaultTxFee = uint64(100) - - pending stakerStatus = 0 - current stakerStatus = 1 ) type stakerStatus int diff --git a/vms/platformvm/state/stakers_model_storage_test.go b/vms/platformvm/state/stakers_model_storage_test.go index ce91bed11452..f3ee8ab3971e 100644 --- a/vms/platformvm/state/stakers_model_storage_test.go +++ b/vms/platformvm/state/stakers_model_storage_test.go @@ -44,12 +44,18 @@ var ( // 2. applying the sequence to both our stakersStorageModel and the production-like system. // 3. checking that both stakersStorageModel and the production-like system have // the same state after each operation. - +// +// The following invariants are required for stakers state to properly work: +// 1. No stakers add/update/delete ops are performed directly on baseState, but on at least a diff +// 2. Any number of stakers ops can be carried out on a single diff +// 3. Diffs work in FIFO fashion: they are added on top of current state and only +// bottom diff is applied to base state. +// 4. The bottom diff applied to base state is immediately committed. func TestStateAndDiffComparisonToStorageModel(t *testing.T) { properties := gopter.NewProperties(nil) - // to reproduce a given scenario do something like this: - // parameters := gopter.DefaultTestParametersWithSeed(1680269995295922009) + // // to reproduce a given scenario do something like this: + // parameters := gopter.DefaultTestParametersWithSeed(1688641048828490074) // properties := gopter.NewProperties(parameters) properties.Property("state comparison to storage model", commands.Prop(stakersCommands)) @@ -136,6 +142,10 @@ func (cmd *putCurrentValidatorCommand) Run(sut commands.SystemUnderTest) command sTx := cmd.sTx sys := sut.(*sysUnderTest) + if err := sys.checkThereIsADiff(); err != nil { + return sys // state checks later on should spot missing validator + } + stakerTx := sTx.Unsigned.(txs.StakerTx) currentVal, err := NewCurrentStaker(sTx.ID(), stakerTx, uint64(1000)) if err != nil { @@ -213,6 +223,11 @@ type deleteCurrentValidatorCommand struct { func (cmd *deleteCurrentValidatorCommand) Run(sut commands.SystemUnderTest) commands.Result { // delete first validator without delegators, if any sys := sut.(*sysUnderTest) + + if err := sys.checkThereIsADiff(); err != nil { + return sys // state checks later on should spot missing validator + } + topDiff := sys.getTopChainState() stakerIt, err := topDiff.GetCurrentStakerIterator() @@ -334,6 +349,11 @@ type putCurrentDelegatorCommand struct { func (cmd *putCurrentDelegatorCommand) Run(sut commands.SystemUnderTest) commands.Result { candidateDelegator := cmd.sTx sys := sut.(*sysUnderTest) + + if err := sys.checkThereIsADiff(); err != nil { + return sys // state checks later on should spot missing validator + } + err := addCurrentDelegatorInSystem(sys, candidateDelegator.Unsigned) if err != nil { cmd.err = err @@ -497,6 +517,11 @@ type deleteCurrentDelegatorCommand struct { func (cmd *deleteCurrentDelegatorCommand) Run(sut commands.SystemUnderTest) commands.Result { // delete first delegator, if any sys := sut.(*sysUnderTest) + + if err := sys.checkThereIsADiff(); err != nil { + return sys // state checks later on should spot missing validator + } + _, err := deleteCurrentDelegator(sys) if err != nil { cmd.err = err @@ -926,3 +951,12 @@ func (s *sysUnderTest) flushBottomDiff() (bool, error) { delete(s.diffsMap, bottomDiffID) return true, nil } + +// getTopChainState returns top diff or baseState +func (s *sysUnderTest) checkThereIsADiff() error { + if len(s.sortedDiffIDs) != 0 { + return nil // there is a diff + } + + return s.addDiffOnTop() +} diff --git a/vms/platformvm/state/stakers_properties_test.go b/vms/platformvm/state/stakers_properties_test.go index 9c565df1e3c1..83b3ddb6049d 100644 --- a/vms/platformvm/state/stakers_properties_test.go +++ b/vms/platformvm/state/stakers_properties_test.go @@ -23,6 +23,11 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) +const ( + pending stakerStatus = 0 + current stakerStatus = 1 +) + // TestGeneralStakerContainersProperties checks that State and Diff conform our stakersStorageModel. // TestGeneralStakerContainersProperties tests State and Diff in isolation, over simple operations. // TestStateAndDiffComparisonToStorageModel carries a more involved verification over a production-like From c220b515f182b410cb83ecdceb2fec93a6b986d4 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Thu, 6 Jul 2023 16:46:28 +0200 Subject: [PATCH 049/132] fixed UTs data race --- .../state/stakers_model_storage_test.go | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/vms/platformvm/state/stakers_model_storage_test.go b/vms/platformvm/state/stakers_model_storage_test.go index f3ee8ab3971e..f37c7aa40af2 100644 --- a/vms/platformvm/state/stakers_model_storage_test.go +++ b/vms/platformvm/state/stakers_model_storage_test.go @@ -250,11 +250,13 @@ func (cmd *deleteCurrentValidatorCommand) Run(sut commands.SystemUnderTest) comm stakerIt.Release() return sys } - if !delIt.Next() { - found = true + + found := !delIt.Next() + delIt.Release() + if !found { break } else { - continue + continue // checks next validator } } } @@ -292,11 +294,13 @@ func (cmd *deleteCurrentValidatorCommand) NextState(cmdState commands.State) com stakerIt.Release() return model } - if !delIt.Next() { - found = true + + found := !delIt.Next() + delIt.Release() + if !found { break } else { - continue + continue // checks next validator } } } From 8fcfb76225e42797777b6e226a469ad50747449b Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Fri, 7 Jul 2023 09:21:00 +0200 Subject: [PATCH 050/132] nit --- .../state/stakers_model_storage_test.go | 103 ++++++++++++------ 1 file changed, 69 insertions(+), 34 deletions(-) diff --git a/vms/platformvm/state/stakers_model_storage_test.go b/vms/platformvm/state/stakers_model_storage_test.go index f37c7aa40af2..e17fbe96ac4c 100644 --- a/vms/platformvm/state/stakers_model_storage_test.go +++ b/vms/platformvm/state/stakers_model_storage_test.go @@ -185,6 +185,10 @@ func (cmd *putCurrentValidatorCommand) PostCondition(cmdState commands.State, re return &gopter.PropResult{Status: gopter.PropFalse} } + if !checkValidatorSetContent(res) { + return &gopter.PropResult{Status: gopter.PropFalse} + } + return &gopter.PropResult{Status: gopter.PropTrue} } @@ -240,24 +244,27 @@ func (cmd *deleteCurrentValidatorCommand) Run(sut commands.SystemUnderTest) comm found = false validator *Staker ) - for !found && stakerIt.Next() { + for stakerIt.Next() { validator = stakerIt.Value() - if validator.Priority.IsCurrentValidator() { - // check validators has no delegators - delIt, err := topDiff.GetCurrentDelegatorIterator(validator.SubnetID, validator.NodeID) - if err != nil { - cmd.err = err - stakerIt.Release() - return sys - } + if !validator.Priority.IsCurrentValidator() { + continue // checks next validator + } - found := !delIt.Next() - delIt.Release() - if !found { - break - } else { - continue // checks next validator - } + // check validator has no delegators + delIt, err := topDiff.GetCurrentDelegatorIterator(validator.SubnetID, validator.NodeID) + if err != nil { + cmd.err = err + stakerIt.Release() + return sys + } + + hadDelegator := delIt.Next() + delIt.Release() + if !hadDelegator { + found = true + break // found + } else { + continue // checks next validator } } @@ -277,33 +284,37 @@ func (cmd *deleteCurrentValidatorCommand) NextState(cmdState commands.State) com stakerIt, err := model.GetCurrentStakerIterator() if err != nil { cmd.err = err - return model + return cmdState } var ( found = false validator *Staker ) - for !found && stakerIt.Next() { + for stakerIt.Next() { validator = stakerIt.Value() - if validator.Priority.IsCurrentValidator() { - // check validators has no delegators - delIt, err := model.GetCurrentDelegatorIterator(validator.SubnetID, validator.NodeID) - if err != nil { - cmd.err = err - stakerIt.Release() - return model - } + if !validator.Priority.IsCurrentValidator() { + continue // checks next validator + } - found := !delIt.Next() - delIt.Release() - if !found { - break - } else { - continue // checks next validator - } + // check validator has no delegators + delIt, err := model.GetCurrentDelegatorIterator(validator.SubnetID, validator.NodeID) + if err != nil { + cmd.err = err + stakerIt.Release() + return cmdState + } + + hadDelegator := delIt.Next() + delIt.Release() + if !hadDelegator { + found = true + break // found + } else { + continue // checks next validator } } + if !found { stakerIt.Release() return cmdState // no current validator to add delegator to @@ -329,6 +340,10 @@ func (cmd *deleteCurrentValidatorCommand) PostCondition(cmdState commands.State, return &gopter.PropResult{Status: gopter.PropFalse} } + if !checkValidatorSetContent(res) { + return &gopter.PropResult{Status: gopter.PropFalse} + } + return &gopter.PropResult{Status: gopter.PropTrue} } @@ -485,6 +500,10 @@ func (cmd *putCurrentDelegatorCommand) PostCondition(cmdState commands.State, re return &gopter.PropResult{Status: gopter.PropFalse} } + if !checkValidatorSetContent(res) { + return &gopter.PropResult{Status: gopter.PropFalse} + } + return &gopter.PropResult{Status: gopter.PropTrue} } @@ -605,6 +624,10 @@ func (cmd *deleteCurrentDelegatorCommand) PostCondition(cmdState commands.State, return &gopter.PropResult{Status: gopter.PropFalse} } + if !checkValidatorSetContent(res) { + return &gopter.PropResult{Status: gopter.PropFalse} + } + return &gopter.PropResult{Status: gopter.PropTrue} } @@ -652,6 +675,10 @@ func (cmd *addTopDiffCommand) PostCondition(cmdState commands.State, res command return &gopter.PropResult{Status: gopter.PropFalse} } + if !checkValidatorSetContent(res) { + return &gopter.PropResult{Status: gopter.PropFalse} + } + return &gopter.PropResult{Status: gopter.PropTrue} } @@ -705,6 +732,10 @@ func (cmd *applyAndCommitBottomDiffCommand) PostCondition(cmdState commands.Stat return &gopter.PropResult{Status: gopter.PropFalse} } + if !checkValidatorSetContent(res) { + return &gopter.PropResult{Status: gopter.PropFalse} + } + return &gopter.PropResult{Status: gopter.PropTrue} } @@ -842,6 +873,8 @@ func checkSystemAndModelContent(cmdState commands.State, res commands.Result) bo return true } +// checkValidatorSetContent compares ValidatorsSet with P-chain base-state data and +// makes sure they are coherent. func checkValidatorSetContent(res commands.Result) bool { sys := res.(*sysUnderTest) valSet := sys.baseState.(*state).cfg.Validators @@ -956,7 +989,9 @@ func (s *sysUnderTest) flushBottomDiff() (bool, error) { return true, nil } -// getTopChainState returns top diff or baseState +// checkThereIsADiff must be called before any stakers op. It makes +// sure that ops are carried out on at least a diff, as it happens +// in production code. func (s *sysUnderTest) checkThereIsADiff() error { if len(s.sortedDiffIDs) != 0 { return nil // there is a diff From d681698b648360b5d3323285e00d64cc2a5cd075 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Thu, 20 Jul 2023 12:06:51 +0200 Subject: [PATCH 051/132] wip: creating pchain state based on merkleDB --- vms/platformvm/state/merkle_state.go | 392 +++++++++++++++++++++++++++ 1 file changed, 392 insertions(+) create mode 100644 vms/platformvm/state/merkle_state.go diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go new file mode 100644 index 000000000000..62dbe380c30d --- /dev/null +++ b/vms/platformvm/state/merkle_state.go @@ -0,0 +1,392 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package state + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/prefixdb" + "github.com/ava-labs/avalanchego/database/versiondb" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/trace" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/platformvm/blocks" + "github.com/ava-labs/avalanchego/vms/platformvm/status" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/x/merkledb" +) + +const ( + HistoryLength = int(256) // from HyperSDK + NodeCacheSize = int(65_536) // from HyperSDK +) + +var ( + _ State = (*merkleState)(nil) + + ErrNotYetImplemented = errors.New("not yet implemented") + + merkleStatePrefix = []byte{0x0} + merkleBlockPrefix = []byte{0x1} +) + +func NewMerkleState(rawDB database.Database) (Chain, error) { + var ( + baseDB = versiondb.New(rawDB) + baseMerkleDB = prefixdb.New(merkleStatePrefix, baseDB) + blockDB = prefixdb.New(merkleBlockPrefix, baseDB) + ) + + ctx := context.TODO() + noOpTracer, err := trace.New(trace.Config{Enabled: false}) + if err != nil { + return nil, fmt.Errorf("failed creating noOpTraces: %w", err) + } + + merkleDB, err := merkledb.New(ctx, baseMerkleDB, merkledb.Config{ + HistoryLength: HistoryLength, + NodeCacheSize: NodeCacheSize, + Reg: prometheus.NewRegistry(), + Tracer: noOpTracer, + }) + if err != nil { + return nil, fmt.Errorf("failed creating merkleDB: %w", err) + } + + res := &merkleState{ + baseDB: baseDB, + baseMerkleDB: baseMerkleDB, + merkleDB: merkleDB, + blockDB: blockDB, + + currentStakers: newBaseStakers(), + pendingStakers: newBaseStakers(), + + ordinaryUTXOs: make(map[ids.ID]*avax.UTXO), + rewardUTXOs: make(map[ids.ID][]*avax.UTXO), + + supplies: make(map[ids.ID]uint64), + + subnets: make([]*txs.Tx, 0), + transformedSubnets: make(map[ids.ID]*txs.Tx), + + chains: make(map[ids.ID][]*txs.Tx), + + txs: make(map[ids.ID]*txAndStatus), + } + return res, nil +} + +type merkleState struct { + baseDB database.Database + baseMerkleDB database.Database + merkleDB merkledb.MerkleDB // meklelized state + blockDB database.Database // all the rest, prolly just blocks?? + + // TODO ABENEGIA: in sections below there should ofter be three elements + // in-memory element to track non-committed diffs + // a cache of the DB + // the DB + // For now the in-memory diff and cache are treated the same. I'll introduce + // as soon as there will be persistence/commit + + // stakers section (missing Delegatee piece) + // TODO: Consider moving delegatee to UTXOs section + currentStakers *baseStakers + pendingStakers *baseStakers + + // UTXOs section + ordinaryUTXOs map[ids.ID]*avax.UTXO // map of UTXO ID -> *UTXO + rewardUTXOs map[ids.ID][]*avax.UTXO // map of txID -> []*UTXO + + // Metadata section + chainTime time.Time + supplies map[ids.ID]uint64 // map of subnetID -> current supply + lastAcceptedBlkID ids.ID + lastAcceptedHeight uint64 + + // Subnets section + subnets []*txs.Tx + transformedSubnets map[ids.ID]*txs.Tx // map of subnetID -> transformSubnetTx + + // Chains section + chains map[ids.ID][]*txs.Tx // maps subnetID -> subnet's chains + + // Txs section + // FIND a way to reduce use of these. No use in verification of txs + // a limited windows to support APIs + txs map[ids.ID]*txAndStatus + + // Blocks section + blocks map[ids.ID]blocks.Block // map of blockID -> Block +} + +// STAKERS section +func (ms *merkleState) GetCurrentValidator(subnetID ids.ID, nodeID ids.NodeID) (*Staker, error) { + return ms.currentStakers.GetValidator(subnetID, nodeID) +} + +func (ms *merkleState) PutCurrentValidator(staker *Staker) { + ms.currentStakers.PutValidator(staker) +} + +func (ms *merkleState) DeleteCurrentValidator(staker *Staker) { + ms.currentStakers.DeleteValidator(staker) +} + +func (ms *merkleState) GetCurrentDelegatorIterator(subnetID ids.ID, nodeID ids.NodeID) (StakerIterator, error) { + return ms.currentStakers.GetDelegatorIterator(subnetID, nodeID), nil +} + +func (ms *merkleState) PutCurrentDelegator(staker *Staker) { + ms.currentStakers.PutDelegator(staker) +} + +func (ms *merkleState) DeleteCurrentDelegator(staker *Staker) { + ms.currentStakers.DeleteDelegator(staker) +} + +func (ms *merkleState) GetCurrentStakerIterator() (StakerIterator, error) { + return ms.currentStakers.GetStakerIterator(), nil +} + +func (ms *merkleState) GetPendingValidator(subnetID ids.ID, nodeID ids.NodeID) (*Staker, error) { + return ms.pendingStakers.GetValidator(subnetID, nodeID) +} + +func (ms *merkleState) PutPendingValidator(staker *Staker) { + ms.pendingStakers.PutValidator(staker) +} + +func (ms *merkleState) DeletePendingValidator(staker *Staker) { + ms.pendingStakers.DeleteValidator(staker) +} + +func (ms *merkleState) GetPendingDelegatorIterator(subnetID ids.ID, nodeID ids.NodeID) (StakerIterator, error) { + return ms.pendingStakers.GetDelegatorIterator(subnetID, nodeID), nil +} + +func (ms *merkleState) PutPendingDelegator(staker *Staker) { + ms.pendingStakers.PutDelegator(staker) +} + +func (ms *merkleState) DeletePendingDelegator(staker *Staker) { + ms.pendingStakers.DeleteDelegator(staker) +} + +func (ms *merkleState) GetPendingStakerIterator() (StakerIterator, error) { + return ms.pendingStakers.GetStakerIterator(), nil +} + +func (*merkleState) GetDelegateeReward( /*subnetID*/ ids.ID /*vdrID*/, ids.NodeID) (amount uint64, err error) { + return 0, ErrNotYetImplemented +} + +func (*merkleState) SetDelegateeReward( /*subnetID*/ ids.ID /*vdrID*/, ids.NodeID /*amount*/, uint64) error { + return ErrNotYetImplemented +} + +// UTXOs section +func (ms *merkleState) GetUTXO(utxoID ids.ID) (*avax.UTXO, error) { + if utxo, exists := ms.ordinaryUTXOs[utxoID]; exists { + if utxo == nil { + return nil, database.ErrNotFound + } + return utxo, nil + } + return nil, fmt.Errorf("utxos not stored: %w", ErrNotYetImplemented) +} + +func (*merkleState) UTXOIDs( /*addr*/ []byte /*start*/, ids.ID /*limit*/, int) ([]ids.ID, error) { + return nil, fmt.Errorf("utxos iteration not yet implemented: %w", ErrNotYetImplemented) +} + +func (ms *merkleState) AddUTXO(utxo *avax.UTXO) { + ms.ordinaryUTXOs[utxo.InputID()] = utxo +} + +func (ms *merkleState) DeleteUTXO(utxoID ids.ID) { + ms.ordinaryUTXOs[utxoID] = nil +} + +func (ms *merkleState) GetRewardUTXOs(txID ids.ID) ([]*avax.UTXO, error) { + if utxos, exists := ms.rewardUTXOs[txID]; exists { + return utxos, nil + } + + return nil, fmt.Errorf("reward utxos not stored: %w", ErrNotYetImplemented) +} + +func (ms *merkleState) AddRewardUTXO(txID ids.ID, utxo *avax.UTXO) { + ms.rewardUTXOs[txID] = append(ms.rewardUTXOs[txID], utxo) +} + +// METADATA Section +func (ms *merkleState) GetTimestamp() time.Time { + return ms.chainTime +} + +func (ms *merkleState) SetTimestamp(tm time.Time) { + ms.chainTime = tm +} + +func (ms *merkleState) GetLastAccepted() ids.ID { + return ms.lastAcceptedBlkID +} + +func (ms *merkleState) SetLastAccepted(lastAccepted ids.ID) { + ms.lastAcceptedBlkID = lastAccepted +} + +func (ms *merkleState) SetHeight(height uint64) { + ms.lastAcceptedHeight = height +} + +func (ms *merkleState) GetCurrentSupply(subnetID ids.ID) (uint64, error) { + supply, ok := ms.supplies[subnetID] + if ok { + return supply, nil + } + + return supply, fmt.Errorf("supply not stored: %w", ErrNotYetImplemented) +} + +func (ms *merkleState) SetCurrentSupply(subnetID ids.ID, cs uint64) { + ms.supplies[subnetID] = cs +} + +// SUBNETS Section +func (ms *merkleState) GetSubnets() ([]*txs.Tx, error) { + if ms.subnets != nil { + return ms.subnets, nil + } + + return nil, fmt.Errorf("subnets not stored: %w", ErrNotYetImplemented) +} + +func (ms *merkleState) AddSubnet(createSubnetTx *txs.Tx) { + ms.subnets = append(ms.subnets, createSubnetTx) +} + +func (ms *merkleState) GetSubnetTransformation(subnetID ids.ID) (*txs.Tx, error) { + if tx, exists := ms.transformedSubnets[subnetID]; exists { + return tx, nil + } + + return nil, fmt.Errorf("transformed subnets not stored: %w", ErrNotYetImplemented) +} + +func (ms *merkleState) AddSubnetTransformation(transformSubnetTxIntf *txs.Tx) { + transformSubnetTx := transformSubnetTxIntf.Unsigned.(*txs.TransformSubnetTx) + ms.transformedSubnets[transformSubnetTx.Subnet] = transformSubnetTxIntf +} + +// CHAINS Section +func (ms *merkleState) GetChains(subnetID ids.ID) ([]*txs.Tx, error) { + if chains, cached := ms.chains[subnetID]; cached { + return chains, nil + } + + return nil, fmt.Errorf("chains not stored: %w", ErrNotYetImplemented) +} + +func (ms *merkleState) AddChain(createChainTxIntf *txs.Tx) { + createChainTx := createChainTxIntf.Unsigned.(*txs.CreateChainTx) + subnetID := createChainTx.SubnetID + + ms.chains[subnetID] = append(ms.chains[subnetID], createChainTxIntf) +} + +// TXs Section +func (ms *merkleState) GetTx(txID ids.ID) (*txs.Tx, status.Status, error) { + if tx, exists := ms.txs[txID]; exists { + return tx.tx, tx.status, nil + } + + return nil, status.Unknown, fmt.Errorf("txs not stored: %w", ErrNotYetImplemented) +} + +func (ms *merkleState) AddTx(tx *txs.Tx, status status.Status) { + ms.txs[tx.ID()] = &txAndStatus{ + tx: tx, + status: status, + } +} + +// BLOCKs Section +func (ms *merkleState) GetStatelessBlock(blockID ids.ID) (blocks.Block, error) { + if blk, exists := ms.blocks[blockID]; exists { + return blk, nil + } + + return nil, fmt.Errorf("blocks not stored: %w", ErrNotYetImplemented) +} + +func (ms *merkleState) AddStatelessBlock(block blocks.Block) { + ms.blocks[block.ID()] = block +} + +// UPTIMES SECTION +func (*merkleState) GetUptime( + /*nodeID*/ ids.NodeID, + /*subnetID*/ ids.ID, +) (upDuration time.Duration, lastUpdated time.Time, err error) { + return 0, time.Time{}, fmt.Errorf("MerkleDB GetUptime: %w", ErrNotYetImplemented) +} + +func (*merkleState) SetUptime( + /*nodeID*/ ids.NodeID, + /*subnetID*/ ids.ID, + /*upDuration*/ time.Duration, + /*lastUpdated*/ time.Time, +) error { + return fmt.Errorf("MerkleDB SetUptime: %w", ErrNotYetImplemented) +} + +func (*merkleState) GetStartTime( + /*nodeID*/ ids.NodeID, + /*subnetID*/ ids.ID, +) (startTime time.Time, err error) { + return time.Time{}, fmt.Errorf("MerkleDB GetStartTime: %w", ErrNotYetImplemented) +} + +// VALIDATORS Section +func (*merkleState) ValidatorSet( /*subnetID*/ ids.ID /*vdrs*/, validators.Set) error { + return fmt.Errorf("MerkleDB ValidatorSet: %w", ErrNotYetImplemented) +} + +func (*merkleState) GetValidatorWeightDiffs( /*height*/ uint64 /*subnetID*/, ids.ID) (map[ids.NodeID]*ValidatorWeightDiff, error) { + return nil, fmt.Errorf("MerkleDB GetValidatorWeightDiffs: %w", ErrNotYetImplemented) +} + +func (*merkleState) GetValidatorPublicKeyDiffs( /*height*/ uint64) (map[ids.NodeID]*bls.PublicKey, error) { + return nil, fmt.Errorf("MerkleDB GetValidatorPublicKeyDiffs: %w", ErrNotYetImplemented) +} + +// DB Operations +func (*merkleState) Abort() {} + +func (*merkleState) Commit() error { + return fmt.Errorf("MerkleDB Commit: %w", ErrNotYetImplemented) +} + +func (*merkleState) CommitBatch() (database.Batch, error) { + return nil, fmt.Errorf("MerkleDB CommitBatch: %w", ErrNotYetImplemented) +} + +func (*merkleState) Checksum() ids.ID { + return ids.Empty +} + +func (*merkleState) Close() error { + return fmt.Errorf("MerkleDB Close: %w", ErrNotYetImplemented) +} From be24571ce3137800d3892a3913077b1d2d0b5408 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Thu, 20 Jul 2023 15:42:52 +0200 Subject: [PATCH 052/132] wip: merkleDB pchain state --- vms/platformvm/state/merkle_state.go | 493 +++++++++++++++++++++++---- 1 file changed, 433 insertions(+), 60 deletions(-) diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index 62dbe380c30d..3c18daa59f1b 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -11,6 +11,8 @@ import ( "github.com/prometheus/client_golang/prometheus" + "github.com/ava-labs/avalanchego/cache" + "github.com/ava-labs/avalanchego/cache/metercacher" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/database/versiondb" @@ -18,6 +20,7 @@ import ( "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/trace" "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/blocks" "github.com/ava-labs/avalanchego/vms/platformvm/status" @@ -33,17 +36,32 @@ const ( var ( _ State = (*merkleState)(nil) - ErrNotYetImplemented = errors.New("not yet implemented") + errNotYetImplemented = errors.New("not yet implemented") merkleStatePrefix = []byte{0x0} merkleBlockPrefix = []byte{0x1} + merkleTxPrefix = []byte{0x2} + + // merkle db sections + metadataSectionPrefix = []byte("m") + merkleChainTimeKey = append(metadataSectionPrefix, []byte("t")...) + merkleLastAcceptedBlkIDKey = append(metadataSectionPrefix, []byte("b")...) + merkleSuppliesPrefix = append(metadataSectionPrefix, []byte("s")...) + + permissionedSubnetSectionPrefix = []byte("s") + elasticSubnetSectionPrefix = []byte("e") + chainsSectionPrefix = []byte("c") ) -func NewMerkleState(rawDB database.Database) (Chain, error) { +func NewMerkleState( + rawDB database.Database, + metricsReg prometheus.Registerer, +) (Chain, error) { var ( baseDB = versiondb.New(rawDB) baseMerkleDB = prefixdb.New(merkleStatePrefix, baseDB) blockDB = prefixdb.New(merkleBlockPrefix, baseDB) + txDB = prefixdb.New(merkleTxPrefix, baseDB) ) ctx := context.TODO() @@ -62,11 +80,57 @@ func NewMerkleState(rawDB database.Database) (Chain, error) { return nil, fmt.Errorf("failed creating merkleDB: %w", err) } + suppliesCache, err := metercacher.New[ids.ID, *uint64]( + "supply_cache", + metricsReg, + &cache.LRU[ids.ID, *uint64]{Size: chainCacheSize}, + ) + if err != nil { + return nil, err + } + + transformedSubnetCache, err := metercacher.New( + "transformed_subnet_cache", + metricsReg, + cache.NewSizedLRU[ids.ID, *txs.Tx](transformedSubnetTxCacheSize, txSize), + ) + if err != nil { + return nil, err + } + + chainCache, err := metercacher.New[ids.ID, []*txs.Tx]( + "chain_cache", + metricsReg, + &cache.LRU[ids.ID, []*txs.Tx]{Size: chainCacheSize}, + ) + if err != nil { + return nil, err + } + + blockCache, err := metercacher.New[ids.ID, blocks.Block]( + "block_cache", + metricsReg, + cache.NewSizedLRU[ids.ID, blocks.Block](blockCacheSize, blockSize), + ) + if err != nil { + return nil, err + } + + txCache, err := metercacher.New( + "tx_cache", + metricsReg, + cache.NewSizedLRU[ids.ID, *txAndStatus](txCacheSize, txAndStatusSize), + ) + if err != nil { + return nil, err + } + res := &merkleState{ baseDB: baseDB, baseMerkleDB: baseMerkleDB, merkleDB: merkleDB, blockDB: blockDB, + txDB: txDB, currentStakers: newBaseStakers(), pendingStakers: newBaseStakers(), @@ -74,30 +138,32 @@ func NewMerkleState(rawDB database.Database) (Chain, error) { ordinaryUTXOs: make(map[ids.ID]*avax.UTXO), rewardUTXOs: make(map[ids.ID][]*avax.UTXO), - supplies: make(map[ids.ID]uint64), + supplies: make(map[ids.ID]uint64), + suppliesCache: suppliesCache, - subnets: make([]*txs.Tx, 0), - transformedSubnets: make(map[ids.ID]*txs.Tx), + addedPermissionedSubnets: make([]*txs.Tx, 0), + permissionedSubnetCache: make([]*txs.Tx, 0), + addedElasticSubnets: make(map[ids.ID]*txs.Tx), + elasticSubnetCache: transformedSubnetCache, - chains: make(map[ids.ID][]*txs.Tx), + addedChains: make(map[ids.ID][]*txs.Tx), + chainCache: chainCache, - txs: make(map[ids.ID]*txAndStatus), + addedTxs: make(map[ids.ID]*txAndStatus), + txCache: txCache, + + addedBlocks: make(map[ids.ID]blocks.Block), + blockCache: blockCache, } return res, nil } type merkleState struct { - baseDB database.Database + baseDB *versiondb.Database baseMerkleDB database.Database merkleDB merkledb.MerkleDB // meklelized state - blockDB database.Database // all the rest, prolly just blocks?? - - // TODO ABENEGIA: in sections below there should ofter be three elements - // in-memory element to track non-committed diffs - // a cache of the DB - // the DB - // For now the in-memory diff and cache are treated the same. I'll introduce - // as soon as there will be persistence/commit + blockDB database.Database + txDB database.Database // stakers section (missing Delegatee piece) // TODO: Consider moving delegatee to UTXOs section @@ -110,24 +176,30 @@ type merkleState struct { // Metadata section chainTime time.Time - supplies map[ids.ID]uint64 // map of subnetID -> current supply lastAcceptedBlkID ids.ID - lastAcceptedHeight uint64 + lastAcceptedHeight uint64 // Should this be written to state?? + supplies map[ids.ID]uint64 // map of subnetID -> current supply + suppliesCache cache.Cacher[ids.ID, *uint64] // cache of subnetID -> current supply if the entry is nil, it is not in the database // Subnets section - subnets []*txs.Tx - transformedSubnets map[ids.ID]*txs.Tx // map of subnetID -> transformSubnetTx + addedPermissionedSubnets []*txs.Tx // added SubnetTxs, waiting to be committed + permissionedSubnetCache []*txs.Tx // nil if the subnets haven't been loaded + addedElasticSubnets map[ids.ID]*txs.Tx // map of subnetID -> transformSubnetTx + elasticSubnetCache cache.Cacher[ids.ID, *txs.Tx] // cache of subnetID -> transformSubnetTx if the entry is nil, it is not in the database // Chains section - chains map[ids.ID][]*txs.Tx // maps subnetID -> subnet's chains + addedChains map[ids.ID][]*txs.Tx // maps subnetID -> the newly added chains to the subnet + chainCache cache.Cacher[ids.ID, []*txs.Tx] // cache of subnetID -> the chains after all local modifications []*txs.Tx // Txs section - // FIND a way to reduce use of these. No use in verification of txs + // FIND a way to reduce use of these. No use in verification of addedTxs // a limited windows to support APIs - txs map[ids.ID]*txAndStatus + addedTxs map[ids.ID]*txAndStatus // map of txID -> {*txs.Tx, Status} + txCache cache.Cacher[ids.ID, *txAndStatus] // txID -> {*txs.Tx, Status}. If the entry is nil, it isn't in the database // Blocks section - blocks map[ids.ID]blocks.Block // map of blockID -> Block + addedBlocks map[ids.ID]blocks.Block // map of blockID -> Block + blockCache cache.Cacher[ids.ID, blocks.Block] // cache of blockID -> Block. If the entry is nil, it is not in the database } // STAKERS section @@ -188,11 +260,11 @@ func (ms *merkleState) GetPendingStakerIterator() (StakerIterator, error) { } func (*merkleState) GetDelegateeReward( /*subnetID*/ ids.ID /*vdrID*/, ids.NodeID) (amount uint64, err error) { - return 0, ErrNotYetImplemented + return 0, errNotYetImplemented } func (*merkleState) SetDelegateeReward( /*subnetID*/ ids.ID /*vdrID*/, ids.NodeID /*amount*/, uint64) error { - return ErrNotYetImplemented + return errNotYetImplemented } // UTXOs section @@ -203,11 +275,11 @@ func (ms *merkleState) GetUTXO(utxoID ids.ID) (*avax.UTXO, error) { } return utxo, nil } - return nil, fmt.Errorf("utxos not stored: %w", ErrNotYetImplemented) + return nil, fmt.Errorf("utxos not stored: %w", errNotYetImplemented) } func (*merkleState) UTXOIDs( /*addr*/ []byte /*start*/, ids.ID /*limit*/, int) ([]ids.ID, error) { - return nil, fmt.Errorf("utxos iteration not yet implemented: %w", ErrNotYetImplemented) + return nil, fmt.Errorf("utxos iteration not yet implemented: %w", errNotYetImplemented) } func (ms *merkleState) AddUTXO(utxo *avax.UTXO) { @@ -223,7 +295,7 @@ func (ms *merkleState) GetRewardUTXOs(txID ids.ID) ([]*avax.UTXO, error) { return utxos, nil } - return nil, fmt.Errorf("reward utxos not stored: %w", ErrNotYetImplemented) + return nil, fmt.Errorf("reward utxos not stored: %w", errNotYetImplemented) } func (ms *merkleState) AddRewardUTXO(txID ids.ID, utxo *avax.UTXO) { @@ -257,7 +329,7 @@ func (ms *merkleState) GetCurrentSupply(subnetID ids.ID) (uint64, error) { return supply, nil } - return supply, fmt.Errorf("supply not stored: %w", ErrNotYetImplemented) + return supply, fmt.Errorf("supply not stored: %w", errNotYetImplemented) } func (ms *merkleState) SetCurrentSupply(subnetID ids.ID, cs uint64) { @@ -266,57 +338,156 @@ func (ms *merkleState) SetCurrentSupply(subnetID ids.ID, cs uint64) { // SUBNETS Section func (ms *merkleState) GetSubnets() ([]*txs.Tx, error) { - if ms.subnets != nil { - return ms.subnets, nil + // Note: we want all subnets, so we don't look at addedSubnets + // which are only part of them + if ms.permissionedSubnetCache != nil { + return ms.permissionedSubnetCache, nil } - return nil, fmt.Errorf("subnets not stored: %w", ErrNotYetImplemented) + subnets := make([]*txs.Tx, 0) + subnetDBIt := ms.merkleDB.NewIteratorWithPrefix(permissionedSubnetSectionPrefix) + defer subnetDBIt.Release() + + for subnetDBIt.Next() { + subnetTxBytes := subnetDBIt.Value() + subnetTx, err := txs.Parse(txs.GenesisCodec, subnetTxBytes) + if err != nil { + return nil, err + } + subnets = append(subnets, subnetTx) + } + if err := subnetDBIt.Error(); err != nil { + return nil, err + } + subnets = append(subnets, ms.addedPermissionedSubnets...) + ms.permissionedSubnetCache = subnets + return subnets, nil } func (ms *merkleState) AddSubnet(createSubnetTx *txs.Tx) { - ms.subnets = append(ms.subnets, createSubnetTx) + ms.addedPermissionedSubnets = append(ms.addedPermissionedSubnets, createSubnetTx) } func (ms *merkleState) GetSubnetTransformation(subnetID ids.ID) (*txs.Tx, error) { - if tx, exists := ms.transformedSubnets[subnetID]; exists { + if tx, exists := ms.addedElasticSubnets[subnetID]; exists { + return tx, nil + } + + if tx, cached := ms.elasticSubnetCache.Get(subnetID); cached { + if tx == nil { + return nil, database.ErrNotFound + } return tx, nil } - return nil, fmt.Errorf("transformed subnets not stored: %w", ErrNotYetImplemented) + subnetIDKey := make([]byte, 0, len(elasticSubnetSectionPrefix)+len(subnetID[:])) + copy(subnetIDKey, merkleSuppliesPrefix) + subnetIDKey = append(subnetIDKey, subnetID[:]...) + + transformSubnetTxID, err := database.GetID(ms.merkleDB, subnetIDKey) + switch err { + case nil: + transformSubnetTx, _, err := ms.GetTx(transformSubnetTxID) + if err != nil { + return nil, err + } + ms.elasticSubnetCache.Put(subnetID, transformSubnetTx) + return transformSubnetTx, nil + + case database.ErrNotFound: + ms.elasticSubnetCache.Put(subnetID, nil) + return nil, database.ErrNotFound + + default: + return nil, err + } } func (ms *merkleState) AddSubnetTransformation(transformSubnetTxIntf *txs.Tx) { transformSubnetTx := transformSubnetTxIntf.Unsigned.(*txs.TransformSubnetTx) - ms.transformedSubnets[transformSubnetTx.Subnet] = transformSubnetTxIntf + ms.addedElasticSubnets[transformSubnetTx.Subnet] = transformSubnetTxIntf } // CHAINS Section func (ms *merkleState) GetChains(subnetID ids.ID) ([]*txs.Tx, error) { - if chains, cached := ms.chains[subnetID]; cached { + if chains, cached := ms.chainCache.Get(subnetID); cached { return chains, nil } - - return nil, fmt.Errorf("chains not stored: %w", ErrNotYetImplemented) + chains := make([]*txs.Tx, 0) + + prefix := make([]byte, 0, len(chainsSectionPrefix)+len(subnetID[:])) + copy(prefix, chainsSectionPrefix) + prefix = append(prefix, subnetID[:]...) + + chainDBIt := ms.merkleDB.NewIteratorWithPrefix(prefix) + defer chainDBIt.Release() + for chainDBIt.Next() { + chainTxBytes := chainDBIt.Value() + chainTx, err := txs.Parse(txs.GenesisCodec, chainTxBytes) + if err != nil { + return nil, err + } + chains = append(chains, chainTx) + } + if err := chainDBIt.Error(); err != nil { + return nil, err + } + chains = append(chains, ms.addedChains[subnetID]...) + ms.chainCache.Put(subnetID, chains) + return chains, nil } func (ms *merkleState) AddChain(createChainTxIntf *txs.Tx) { createChainTx := createChainTxIntf.Unsigned.(*txs.CreateChainTx) subnetID := createChainTx.SubnetID - ms.chains[subnetID] = append(ms.chains[subnetID], createChainTxIntf) + ms.addedChains[subnetID] = append(ms.addedChains[subnetID], createChainTxIntf) } // TXs Section func (ms *merkleState) GetTx(txID ids.ID) (*txs.Tx, status.Status, error) { - if tx, exists := ms.txs[txID]; exists { + if tx, exists := ms.addedTxs[txID]; exists { + return tx.tx, tx.status, nil + } + if tx, cached := ms.txCache.Get(txID); cached { + if tx == nil { + return nil, status.Unknown, database.ErrNotFound + } return tx.tx, tx.status, nil } - return nil, status.Unknown, fmt.Errorf("txs not stored: %w", ErrNotYetImplemented) + txBytes, err := ms.txDB.Get(txID[:]) + switch err { + case nil: + stx := txBytesAndStatus{} + if _, err := txs.GenesisCodec.Unmarshal(txBytes, &stx); err != nil { + return nil, status.Unknown, err + } + + tx, err := txs.Parse(txs.GenesisCodec, stx.Tx) + if err != nil { + return nil, status.Unknown, err + } + + ptx := &txAndStatus{ + tx: tx, + status: stx.Status, + } + + ms.txCache.Put(txID, ptx) + return ptx.tx, ptx.status, nil + + case database.ErrNotFound: + ms.txCache.Put(txID, nil) + return nil, status.Unknown, database.ErrNotFound + + default: + return nil, status.Unknown, err + } } func (ms *merkleState) AddTx(tx *txs.Tx, status status.Status) { - ms.txs[tx.ID()] = &txAndStatus{ + ms.addedTxs[tx.ID()] = &txAndStatus{ tx: tx, status: status, } @@ -324,15 +495,41 @@ func (ms *merkleState) AddTx(tx *txs.Tx, status status.Status) { // BLOCKs Section func (ms *merkleState) GetStatelessBlock(blockID ids.ID) (blocks.Block, error) { - if blk, exists := ms.blocks[blockID]; exists { + if blk, exists := ms.addedBlocks[blockID]; exists { + return blk, nil + } + + if blk, cached := ms.blockCache.Get(blockID); cached { + if blk == nil { + return nil, database.ErrNotFound + } + return blk, nil } - return nil, fmt.Errorf("blocks not stored: %w", ErrNotYetImplemented) + blkBytes, err := ms.blockDB.Get(blockID[:]) + switch err { + case nil: + // Note: stored blocks are verified, so it's safe to unmarshal them with GenesisCodec + blk, err := blocks.Parse(blocks.GenesisCodec, blkBytes) + if err != nil { + return nil, err + } + + ms.blockCache.Put(blockID, blk) + return blk, nil + + case database.ErrNotFound: + ms.blockCache.Put(blockID, nil) + return nil, database.ErrNotFound + + default: + return nil, err + } } func (ms *merkleState) AddStatelessBlock(block blocks.Block) { - ms.blocks[block.ID()] = block + ms.addedBlocks[block.ID()] = block } // UPTIMES SECTION @@ -340,7 +537,7 @@ func (*merkleState) GetUptime( /*nodeID*/ ids.NodeID, /*subnetID*/ ids.ID, ) (upDuration time.Duration, lastUpdated time.Time, err error) { - return 0, time.Time{}, fmt.Errorf("MerkleDB GetUptime: %w", ErrNotYetImplemented) + return 0, time.Time{}, fmt.Errorf("MerkleDB GetUptime: %w", errNotYetImplemented) } func (*merkleState) SetUptime( @@ -349,44 +546,220 @@ func (*merkleState) SetUptime( /*upDuration*/ time.Duration, /*lastUpdated*/ time.Time, ) error { - return fmt.Errorf("MerkleDB SetUptime: %w", ErrNotYetImplemented) + return fmt.Errorf("MerkleDB SetUptime: %w", errNotYetImplemented) } func (*merkleState) GetStartTime( /*nodeID*/ ids.NodeID, /*subnetID*/ ids.ID, ) (startTime time.Time, err error) { - return time.Time{}, fmt.Errorf("MerkleDB GetStartTime: %w", ErrNotYetImplemented) + return time.Time{}, fmt.Errorf("MerkleDB GetStartTime: %w", errNotYetImplemented) } // VALIDATORS Section func (*merkleState) ValidatorSet( /*subnetID*/ ids.ID /*vdrs*/, validators.Set) error { - return fmt.Errorf("MerkleDB ValidatorSet: %w", ErrNotYetImplemented) + return fmt.Errorf("MerkleDB ValidatorSet: %w", errNotYetImplemented) } func (*merkleState) GetValidatorWeightDiffs( /*height*/ uint64 /*subnetID*/, ids.ID) (map[ids.NodeID]*ValidatorWeightDiff, error) { - return nil, fmt.Errorf("MerkleDB GetValidatorWeightDiffs: %w", ErrNotYetImplemented) + return nil, fmt.Errorf("MerkleDB GetValidatorWeightDiffs: %w", errNotYetImplemented) } func (*merkleState) GetValidatorPublicKeyDiffs( /*height*/ uint64) (map[ids.NodeID]*bls.PublicKey, error) { - return nil, fmt.Errorf("MerkleDB GetValidatorPublicKeyDiffs: %w", ErrNotYetImplemented) + return nil, fmt.Errorf("MerkleDB GetValidatorPublicKeyDiffs: %w", errNotYetImplemented) } // DB Operations -func (*merkleState) Abort() {} +func (ms *merkleState) Abort() { + ms.baseDB.Abort() +} -func (*merkleState) Commit() error { - return fmt.Errorf("MerkleDB Commit: %w", ErrNotYetImplemented) +func (ms *merkleState) Commit() error { + defer ms.Abort() + batch, err := ms.CommitBatch() + if err != nil { + return err + } + return batch.Write() } -func (*merkleState) CommitBatch() (database.Batch, error) { - return nil, fmt.Errorf("MerkleDB CommitBatch: %w", ErrNotYetImplemented) +func (ms *merkleState) CommitBatch() (database.Batch, error) { + // updateValidators is set to true here so that the validator manager is + // kept up to date with the last accepted state. + if err := ms.write(true /*=updateValidators*/, ms.lastAcceptedHeight); err != nil { + return nil, err + } + return ms.baseDB.CommitBatch() } func (*merkleState) Checksum() ids.ID { return ids.Empty } -func (*merkleState) Close() error { - return fmt.Errorf("MerkleDB Close: %w", ErrNotYetImplemented) +func (ms *merkleState) Close() error { + errs := wrappers.Errs{} + errs.Add( + ms.blockDB.Close(), + ms.merkleDB.Close(), + ms.baseMerkleDB.Close(), + ) + return errs.Err +} + +func (ms *merkleState) write( /*updateValidators*/ bool /*height*/, uint64) error { + errs := wrappers.Errs{} + errs.Add( + ms.writeMerkleState(), + ms.writeBlocks(), + ms.writeTXs(), + ) + return errs.Err +} + +func (ms *merkleState) writeMerkleState() error { + errs := wrappers.Errs{} + view, err := ms.merkleDB.NewView() + if err != nil { + return err + } + + ctx := context.TODO() + errs.Add( + ms.writeMetadata(view, ctx), + ms.writePermissionedSubnets(view, ctx), + ms.writeElasticSubnets(view, ctx), + ms.writeChains(view, ctx), + ) + if errs.Err != nil { + return err + } + + return view.CommitToDB(ctx) +} + +func (ms *merkleState) writeMetadata(view merkledb.TrieView, ctx context.Context) error { + encodedChainTime, err := ms.chainTime.MarshalBinary() + if err != nil { + return fmt.Errorf("failed to encoding chainTime: %w", err) + } + if err := view.Insert(ctx, merkleChainTimeKey, encodedChainTime); err != nil { + return fmt.Errorf("failed to write chainTime: %w", err) + } + + if err := view.Insert(ctx, merkleLastAcceptedBlkIDKey, ms.lastAcceptedBlkID[:]); err != nil { + return fmt.Errorf("failed to write last accepted: %w", err) + } + + // lastAcceptedBlockHeight not persisted yet in merkleDB state. + // TODO: Consider if it should be + + for subnetID, supply := range ms.supplies { + supply := supply + delete(ms.supplies, subnetID) + ms.suppliesCache.Put(subnetID, &supply) + + key := make([]byte, 0, len(merkleSuppliesPrefix)+len(subnetID[:])) + copy(key, merkleSuppliesPrefix) + key = append(key, subnetID[:]...) + if err := view.Insert(ctx, key, database.PackUInt64(supply)); err != nil { + return fmt.Errorf("failed to write subnet %v supply: %w", subnetID, err) + } + } + return nil +} + +func (ms *merkleState) writePermissionedSubnets(view merkledb.TrieView, ctx context.Context) error { + for _, subnetTx := range ms.addedPermissionedSubnets { + subnetID := subnetTx.ID() + + key := make([]byte, 0, len(permissionedSubnetSectionPrefix)+len(subnetID[:])) + copy(key, permissionedSubnetSectionPrefix) + key = append(key, subnetID[:]...) + + if err := view.Insert(ctx, key, subnetTx.Bytes()); err != nil { + return fmt.Errorf("failed to write subnetTx: %w", err) + } + } + ms.addedPermissionedSubnets = nil + return nil +} + +func (ms *merkleState) writeElasticSubnets(view merkledb.TrieView, ctx context.Context) error { + for _, subnetTx := range ms.addedElasticSubnets { + subnetID := subnetTx.ID() + + key := make([]byte, 0, len(elasticSubnetSectionPrefix)+len(subnetID[:])) + copy(key, elasticSubnetSectionPrefix) + key = append(key, subnetID[:]...) + + if err := view.Insert(ctx, key, subnetTx.Bytes()); err != nil { + return fmt.Errorf("failed to write subnetTx: %w", err) + } + } + ms.addedElasticSubnets = nil + return nil +} + +func (ms *merkleState) writeChains(view merkledb.TrieView, ctx context.Context) error { + for subnetID, chains := range ms.addedChains { + for _, chainTx := range chains { + chainID := chainTx.ID() + + key := make([]byte, 0, len(chainsSectionPrefix)+len(subnetID[:])) + copy(key, chainsSectionPrefix) + key = append(key, subnetID[:]...) + key = append(key, chainID[:]...) + + if err := view.Insert(ctx, key, chainTx.Bytes()); err != nil { + return fmt.Errorf("failed to write chain: %w", err) + } + } + delete(ms.addedChains, subnetID) + } + return nil +} + +func (ms *merkleState) writeBlocks() error { + for blkID, blk := range ms.addedBlocks { + blkID := blkID + + delete(ms.addedBlocks, blkID) + // Note: Evict is used rather than Put here because blk may end up + // referencing additional data (because of shared byte slices) that + // would not be properly accounted for in the cache sizing. + ms.blockCache.Evict(blkID) + + if err := ms.blockDB.Put(blkID[:], blk.Bytes()); err != nil { + return fmt.Errorf("failed to write block %s: %w", blkID, err) + } + } + return nil +} + +func (ms *merkleState) writeTXs() error { + for txID, txStatus := range ms.addedTxs { + txID := txID + + stx := txBytesAndStatus{ + Tx: txStatus.tx.Bytes(), + Status: txStatus.status, + } + + // Note that we're serializing a [txBytesAndStatus] here, not a + // *txs.Tx, so we don't use [txs.Codec]. + txBytes, err := txs.GenesisCodec.Marshal(txs.Version, &stx) + if err != nil { + return fmt.Errorf("failed to serialize tx: %w", err) + } + + delete(ms.addedTxs, txID) + // Note: Evict is used rather than Put here because stx may end up + // referencing additional data (because of shared byte slices) that + // would not be properly accounted for in the cache sizing. + ms.txCache.Evict(txID) + if err := ms.txDB.Put(txID[:], txBytes); err != nil { + return fmt.Errorf("failed to add tx: %w", err) + } + } + return nil } From e2fbc07abebe06c805f39be16cbe3cf1f00804f0 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Thu, 20 Jul 2023 21:16:44 +0200 Subject: [PATCH 053/132] wip: merkleDB pchain state --- vms/platformvm/state/merkle_state.go | 309 +++++++++++++++++++++++---- 1 file changed, 267 insertions(+), 42 deletions(-) diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index 3c18daa59f1b..d518ae5564aa 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -31,6 +31,8 @@ import ( const ( HistoryLength = int(256) // from HyperSDK NodeCacheSize = int(65_536) // from HyperSDK + + utxoCacheSize = 8192 // from avax/utxo_state.go ) var ( @@ -38,19 +40,22 @@ var ( errNotYetImplemented = errors.New("not yet implemented") - merkleStatePrefix = []byte{0x0} - merkleBlockPrefix = []byte{0x1} - merkleTxPrefix = []byte{0x2} + merkleStatePrefix = []byte{0x0} + merkleBlockPrefix = []byte{0x1} + merkleTxPrefix = []byte{0x2} + merkleIndexUTXOsPrefix = []byte{0x3} // to serve UTXOIDs(addr) // merkle db sections - metadataSectionPrefix = []byte("m") - merkleChainTimeKey = append(metadataSectionPrefix, []byte("t")...) - merkleLastAcceptedBlkIDKey = append(metadataSectionPrefix, []byte("b")...) - merkleSuppliesPrefix = append(metadataSectionPrefix, []byte("s")...) - - permissionedSubnetSectionPrefix = []byte("s") - elasticSubnetSectionPrefix = []byte("e") - chainsSectionPrefix = []byte("c") + metadataSectionPrefix = []byte{0x0} + merkleChainTimeKey = append(metadataSectionPrefix, []byte{0x0}...) + merkleLastAcceptedBlkIDKey = append(metadataSectionPrefix, []byte{0x1}...) + merkleSuppliesPrefix = append(metadataSectionPrefix, []byte{0x2}...) + + permissionedSubnetSectionPrefix = []byte{0x1} + elasticSubnetSectionPrefix = []byte{0x2} + chainsSectionPrefix = []byte{0x3} + utxosSectionPrefix = []byte{0x4} + rewardUtxosSectionPrefix = []byte{0x5} ) func NewMerkleState( @@ -58,10 +63,11 @@ func NewMerkleState( metricsReg prometheus.Registerer, ) (Chain, error) { var ( - baseDB = versiondb.New(rawDB) - baseMerkleDB = prefixdb.New(merkleStatePrefix, baseDB) - blockDB = prefixdb.New(merkleBlockPrefix, baseDB) - txDB = prefixdb.New(merkleTxPrefix, baseDB) + baseDB = versiondb.New(rawDB) + baseMerkleDB = prefixdb.New(merkleStatePrefix, baseDB) + blockDB = prefixdb.New(merkleBlockPrefix, baseDB) + txDB = prefixdb.New(merkleTxPrefix, baseDB) + indexedUTXOsDB = prefixdb.New(merkleIndexUTXOsPrefix, baseDB) ) ctx := context.TODO() @@ -80,6 +86,15 @@ func NewMerkleState( return nil, fmt.Errorf("failed creating merkleDB: %w", err) } + rewardUTXOsCache, err := metercacher.New[ids.ID, []*avax.UTXO]( + "reward_utxos_cache", + metricsReg, + &cache.LRU[ids.ID, []*avax.UTXO]{Size: rewardUTXOsCacheSize}, + ) + if err != nil { + return nil, err + } + suppliesCache, err := metercacher.New[ids.ID, *uint64]( "supply_cache", metricsReg, @@ -126,17 +141,20 @@ func NewMerkleState( } res := &merkleState{ - baseDB: baseDB, - baseMerkleDB: baseMerkleDB, - merkleDB: merkleDB, - blockDB: blockDB, - txDB: txDB, + baseDB: baseDB, + baseMerkleDB: baseMerkleDB, + merkleDB: merkleDB, + blockDB: blockDB, + txDB: txDB, + indexedUTXOsDB: indexedUTXOsDB, currentStakers: newBaseStakers(), pendingStakers: newBaseStakers(), - ordinaryUTXOs: make(map[ids.ID]*avax.UTXO), - rewardUTXOs: make(map[ids.ID][]*avax.UTXO), + modifiedUTXOs: make(map[ids.ID]*avax.UTXO), + utxoCache: &cache.LRU[ids.ID, *avax.UTXO]{Size: utxoCacheSize}, + addedRewardUTXOs: make(map[ids.ID][]*avax.UTXO), + rewardUTXOsCache: rewardUTXOsCache, supplies: make(map[ids.ID]uint64), suppliesCache: suppliesCache, @@ -159,11 +177,12 @@ func NewMerkleState( } type merkleState struct { - baseDB *versiondb.Database - baseMerkleDB database.Database - merkleDB merkledb.MerkleDB // meklelized state - blockDB database.Database - txDB database.Database + baseDB *versiondb.Database + baseMerkleDB database.Database + merkleDB merkledb.MerkleDB // meklelized state + blockDB database.Database + txDB database.Database + indexedUTXOsDB database.Database // stakers section (missing Delegatee piece) // TODO: Consider moving delegatee to UTXOs section @@ -171,8 +190,11 @@ type merkleState struct { pendingStakers *baseStakers // UTXOs section - ordinaryUTXOs map[ids.ID]*avax.UTXO // map of UTXO ID -> *UTXO - rewardUTXOs map[ids.ID][]*avax.UTXO // map of txID -> []*UTXO + modifiedUTXOs map[ids.ID]*avax.UTXO // map of UTXO ID -> *UTXO + utxoCache cache.Cacher[ids.ID, *avax.UTXO] // UTXO ID -> *UTXO. If the *UTXO is nil the UTXO doesn't exist + + addedRewardUTXOs map[ids.ID][]*avax.UTXO // map of txID -> []*UTXO + rewardUTXOsCache cache.Cacher[ids.ID, []*avax.UTXO] // txID -> []*UTXO // Metadata section chainTime time.Time @@ -269,37 +291,107 @@ func (*merkleState) SetDelegateeReward( /*subnetID*/ ids.ID /*vdrID*/, ids.NodeI // UTXOs section func (ms *merkleState) GetUTXO(utxoID ids.ID) (*avax.UTXO, error) { - if utxo, exists := ms.ordinaryUTXOs[utxoID]; exists { + if utxo, exists := ms.modifiedUTXOs[utxoID]; exists { + if utxo == nil { + return nil, database.ErrNotFound + } + return utxo, nil + } + if utxo, found := ms.utxoCache.Get(utxoID); found { if utxo == nil { return nil, database.ErrNotFound } return utxo, nil } - return nil, fmt.Errorf("utxos not stored: %w", errNotYetImplemented) + + key := make([]byte, 0, len(utxosSectionPrefix)+len(utxoID)) + copy(key, utxosSectionPrefix) + key = append(key, utxoID[:]...) + + switch bytes, err := ms.merkleDB.Get(key); err { + case nil: + utxo := &avax.UTXO{} + if _, err := txs.GenesisCodec.Unmarshal(bytes, utxo); err != nil { + return nil, err + } + ms.utxoCache.Put(utxoID, utxo) + return utxo, nil + + case database.ErrNotFound: + ms.utxoCache.Put(utxoID, nil) + return nil, database.ErrNotFound + + default: + return nil, err + } } -func (*merkleState) UTXOIDs( /*addr*/ []byte /*start*/, ids.ID /*limit*/, int) ([]ids.ID, error) { - return nil, fmt.Errorf("utxos iteration not yet implemented: %w", errNotYetImplemented) +func (ms *merkleState) UTXOIDs(addr []byte, start ids.ID, limit int) ([]ids.ID, error) { + startKey := make([]byte, 0, len(addr)+len(start)) + copy(startKey, addr) + startKey = append(startKey, start[:]...) + + iter := ms.indexedUTXOsDB.NewIteratorWithStart(startKey) + defer iter.Release() + + utxoIDs := []ids.ID(nil) + for len(utxoIDs) < limit && iter.Next() { + utxoID, err := ids.ToID(iter.Key()) + if err != nil { + return nil, err + } + if utxoID == start { + continue + } + + start = ids.Empty + utxoIDs = append(utxoIDs, utxoID) + } + return utxoIDs, iter.Error() } func (ms *merkleState) AddUTXO(utxo *avax.UTXO) { - ms.ordinaryUTXOs[utxo.InputID()] = utxo + ms.modifiedUTXOs[utxo.InputID()] = utxo } func (ms *merkleState) DeleteUTXO(utxoID ids.ID) { - ms.ordinaryUTXOs[utxoID] = nil + ms.modifiedUTXOs[utxoID] = nil } func (ms *merkleState) GetRewardUTXOs(txID ids.ID) ([]*avax.UTXO, error) { - if utxos, exists := ms.rewardUTXOs[txID]; exists { + if utxos, exists := ms.addedRewardUTXOs[txID]; exists { + return utxos, nil + } + if utxos, exists := ms.rewardUTXOsCache.Get(txID); exists { return utxos, nil } - return nil, fmt.Errorf("reward utxos not stored: %w", errNotYetImplemented) + utxos := make([]*avax.UTXO, 0) + + prefix := make([]byte, 0, len(rewardUtxosSectionPrefix)+len(txID)) + copy(prefix, rewardUtxosSectionPrefix) + prefix = append(prefix, txID[:]...) + + it := ms.merkleDB.NewIteratorWithPrefix(prefix) + defer it.Release() + + for it.Next() { + utxo := &avax.UTXO{} + if _, err := txs.Codec.Unmarshal(it.Value(), utxo); err != nil { + return nil, err + } + utxos = append(utxos, utxo) + } + if err := it.Error(); err != nil { + return nil, err + } + + ms.rewardUTXOsCache.Put(txID, utxos) + return utxos, nil } func (ms *merkleState) AddRewardUTXO(txID ids.ID, utxo *avax.UTXO) { - ms.rewardUTXOs[txID] = append(ms.rewardUTXOs[txID], utxo) + ms.addedRewardUTXOs[txID] = append(ms.addedRewardUTXOs[txID], utxo) } // METADATA Section @@ -328,8 +420,34 @@ func (ms *merkleState) GetCurrentSupply(subnetID ids.ID) (uint64, error) { if ok { return supply, nil } + cachedSupply, ok := ms.suppliesCache.Get(subnetID) + if ok { + if cachedSupply == nil { + return 0, database.ErrNotFound + } + return *cachedSupply, nil + } + + key := make([]byte, 0, len(merkleSuppliesPrefix)+len(subnetID[:])) + copy(key, merkleSuppliesPrefix) + key = append(key, subnetID[:]...) - return supply, fmt.Errorf("supply not stored: %w", errNotYetImplemented) + switch supplyBytes, err := ms.merkleDB.Get(key); err { + case nil: + supply, err := database.ParseUInt64(supplyBytes) + if err != nil { + return 0, fmt.Errorf("failed parsing supply: %w", err) + } + ms.suppliesCache.Put(subnetID, &supply) + return supply, nil + + case database.ErrNotFound: + ms.suppliesCache.Put(subnetID, nil) + return 0, database.ErrNotFound + + default: + return 0, err + } } func (ms *merkleState) SetCurrentSupply(subnetID ids.ID, cs uint64) { @@ -629,6 +747,8 @@ func (ms *merkleState) writeMerkleState() error { ms.writePermissionedSubnets(view, ctx), ms.writeElasticSubnets(view, ctx), ms.writeChains(view, ctx), + ms.writeUTXOs(view, ctx), + ms.writeRewardUTXOs(view, ctx), ) if errs.Err != nil { return err @@ -702,12 +822,15 @@ func (ms *merkleState) writeElasticSubnets(view merkledb.TrieView, ctx context.C func (ms *merkleState) writeChains(view merkledb.TrieView, ctx context.Context) error { for subnetID, chains := range ms.addedChains { + prefixKey := make([]byte, 0, len(chainsSectionPrefix)+len(subnetID[:])) + copy(prefixKey, chainsSectionPrefix) + prefixKey = append(prefixKey, subnetID[:]...) + for _, chainTx := range chains { chainID := chainTx.ID() - key := make([]byte, 0, len(chainsSectionPrefix)+len(subnetID[:])) - copy(key, chainsSectionPrefix) - key = append(key, subnetID[:]...) + key := make([]byte, 0, len(prefixKey)+len(chainID)) + copy(key, prefixKey) key = append(key, chainID[:]...) if err := view.Insert(ctx, key, chainTx.Bytes()); err != nil { @@ -719,6 +842,82 @@ func (ms *merkleState) writeChains(view merkledb.TrieView, ctx context.Context) return nil } +func (ms *merkleState) writeUTXOs(view merkledb.TrieView, ctx context.Context) error { + for utxoID, utxo := range ms.modifiedUTXOs { + delete(ms.modifiedUTXOs, utxoID) + + key := make([]byte, 0, len(utxosSectionPrefix)+len(utxoID)) + copy(key, utxosSectionPrefix) + key = append(key, utxoID[:]...) + + if utxo == nil { // delete the UTXO + switch _, err := ms.GetUTXO(utxoID); err { + case nil: + ms.utxoCache.Put(utxoID, nil) + if err := view.Remove(ctx, key); err != nil { + return err + } + + // store the index + if err := ms.writeUTXOsIndex(utxo, false /*insertUtxo*/); err != nil { + return err + } + + case database.ErrNotFound: + return nil + + default: + return err + } + continue + } + + // insert the UTXO + utxoBytes, err := txs.GenesisCodec.Marshal(txs.Version, utxo) + if err != nil { + return err + } + if err := view.Insert(ctx, key, utxoBytes); err != nil { + return err + } + + // store the index + if err := ms.writeUTXOsIndex(utxo, true /*insertUtxo*/); err != nil { + return err + } + } + return nil +} + +func (ms *merkleState) writeRewardUTXOs(view merkledb.TrieView, ctx context.Context) error { + for txID, utxos := range ms.addedRewardUTXOs { + delete(ms.addedRewardUTXOs, txID) + ms.rewardUTXOsCache.Put(txID, utxos) + + prefix := make([]byte, 0, len(rewardUtxosSectionPrefix)+len(txID)) + copy(prefix, rewardUtxosSectionPrefix) + prefix = append(prefix, txID[:]...) + + for _, utxo := range utxos { + utxoID := utxo.InputID() + + key := make([]byte, 0, len(prefix)+len(utxoID)) + copy(key, prefix) + key = append(key, utxoID[:]...) + + utxoBytes, err := txs.GenesisCodec.Marshal(txs.Version, utxo) + if err != nil { + return fmt.Errorf("failed to serialize reward UTXO: %w", err) + } + + if err := view.Insert(ctx, key, utxoBytes); err != nil { + return fmt.Errorf("failed to add reward UTXO: %w", err) + } + } + } + return nil +} + func (ms *merkleState) writeBlocks() error { for blkID, blk := range ms.addedBlocks { blkID := blkID @@ -763,3 +962,29 @@ func (ms *merkleState) writeTXs() error { } return nil } + +func (ms *merkleState) writeUTXOsIndex(utxo *avax.UTXO, insertUtxo bool) error { + utxoID := utxo.InputID() + addressable, ok := utxo.Out.(avax.Addressable) + if !ok { + return nil + } + addresses := addressable.Addresses() + + for _, addr := range addresses { + key := make([]byte, 0, len(addr)+len(utxoID)) + copy(key, addr) + key = append(key, utxoID[:]...) + + if insertUtxo { + if err := ms.indexedUTXOsDB.Put(key, nil); err != nil { + return err + } + } else { + if err := ms.indexedUTXOsDB.Delete(key); err != nil { + return err + } + } + } + return nil +} From 6642d176914734edfea62e63b505baa883deb47b Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Thu, 20 Jul 2023 23:11:32 +0200 Subject: [PATCH 054/132] wip: merkleDB pchain state --- vms/platformvm/state/merkle_state.go | 146 +++++++++++++++++++++------ 1 file changed, 114 insertions(+), 32 deletions(-) diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index d518ae5564aa..ae5e4dc2eb0c 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -20,6 +20,7 @@ import ( "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/trace" "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/blocks" @@ -44,6 +45,7 @@ var ( merkleBlockPrefix = []byte{0x1} merkleTxPrefix = []byte{0x2} merkleIndexUTXOsPrefix = []byte{0x3} // to serve UTXOIDs(addr) + merkleUptimesPrefix = []byte{0x4} // merkle db sections metadataSectionPrefix = []byte{0x0} @@ -68,6 +70,7 @@ func NewMerkleState( blockDB = prefixdb.New(merkleBlockPrefix, baseDB) txDB = prefixdb.New(merkleTxPrefix, baseDB) indexedUTXOsDB = prefixdb.New(merkleIndexUTXOsPrefix, baseDB) + localUptimesDB = prefixdb.New(merkleUptimesPrefix, baseDB) ) ctx := context.TODO() @@ -141,12 +144,9 @@ func NewMerkleState( } res := &merkleState{ - baseDB: baseDB, - baseMerkleDB: baseMerkleDB, - merkleDB: merkleDB, - blockDB: blockDB, - txDB: txDB, - indexedUTXOsDB: indexedUTXOsDB, + baseDB: baseDB, + baseMerkleDB: baseMerkleDB, + merkleDB: merkleDB, currentStakers: newBaseStakers(), pendingStakers: newBaseStakers(), @@ -169,20 +169,25 @@ func NewMerkleState( addedTxs: make(map[ids.ID]*txAndStatus), txCache: txCache, + txDB: txDB, addedBlocks: make(map[ids.ID]blocks.Block), blockCache: blockCache, + blockDB: blockDB, + + indexedUTXOsDB: indexedUTXOsDB, + + localUptimesCache: make(map[ids.NodeID]map[ids.ID]*uptimes), + modifiedLocalUptimes: make(map[ids.NodeID]set.Set[ids.ID]), + localUptimesDB: localUptimesDB, } return res, nil } type merkleState struct { - baseDB *versiondb.Database - baseMerkleDB database.Database - merkleDB merkledb.MerkleDB // meklelized state - blockDB database.Database - txDB database.Database - indexedUTXOsDB database.Database + baseDB *versiondb.Database + baseMerkleDB database.Database + merkleDB merkledb.MerkleDB // meklelized state // stakers section (missing Delegatee piece) // TODO: Consider moving delegatee to UTXOs section @@ -218,10 +223,26 @@ type merkleState struct { // a limited windows to support APIs addedTxs map[ids.ID]*txAndStatus // map of txID -> {*txs.Tx, Status} txCache cache.Cacher[ids.ID, *txAndStatus] // txID -> {*txs.Tx, Status}. If the entry is nil, it isn't in the database + txDB database.Database // Blocks section addedBlocks map[ids.ID]blocks.Block // map of blockID -> Block blockCache cache.Cacher[ids.ID, blocks.Block] // cache of blockID -> Block. If the entry is nil, it is not in the database + blockDB database.Database + + indexedUTXOsDB database.Database + + localUptimesCache map[ids.NodeID]map[ids.ID]*uptimes // vdrID -> subnetID -> metadata + modifiedLocalUptimes map[ids.NodeID]set.Set[ids.ID] // vdrID -> subnetIDs + localUptimesDB database.Database +} + +type uptimes struct { + Duration time.Duration `serialize:"true"` + LastUpdated uint64 `serialize:"true"` // Unix time in seconds + + // txID ids.ID // TODO ABENEGIA: is it needed by delegators and not validators? + lastUpdated time.Time } // STAKERS section @@ -651,32 +672,66 @@ func (ms *merkleState) AddStatelessBlock(block blocks.Block) { } // UPTIMES SECTION -func (*merkleState) GetUptime( - /*nodeID*/ ids.NodeID, - /*subnetID*/ ids.ID, -) (upDuration time.Duration, lastUpdated time.Time, err error) { - return 0, time.Time{}, fmt.Errorf("MerkleDB GetUptime: %w", errNotYetImplemented) +func (ms *merkleState) GetUptime(vdrID ids.NodeID, subnetID ids.ID) (upDuration time.Duration, lastUpdated time.Time, err error) { + nodeUptimes, exists := ms.localUptimesCache[vdrID] + if !exists { + return 0, time.Time{}, database.ErrNotFound + } + uptime, exists := nodeUptimes[subnetID] + if !exists { + return 0, time.Time{}, database.ErrNotFound + } + + return uptime.Duration, uptime.lastUpdated, nil } -func (*merkleState) SetUptime( - /*nodeID*/ ids.NodeID, - /*subnetID*/ ids.ID, - /*upDuration*/ time.Duration, - /*lastUpdated*/ time.Time, -) error { - return fmt.Errorf("MerkleDB SetUptime: %w", errNotYetImplemented) +func (ms *merkleState) SetUptime(vdrID ids.NodeID, subnetID ids.ID, upDuration time.Duration, lastUpdated time.Time) error { + nodeUptimes, exists := ms.localUptimesCache[vdrID] + if !exists { + nodeUptimes = map[ids.ID]*uptimes{} + ms.localUptimesCache[vdrID] = nodeUptimes + } + + nodeUptimes[subnetID].Duration = upDuration + nodeUptimes[subnetID].lastUpdated = lastUpdated + + // track diff + updatedNodeUptimes, ok := ms.modifiedLocalUptimes[vdrID] + if !ok { + updatedNodeUptimes = set.Set[ids.ID]{} + ms.modifiedLocalUptimes[vdrID] = updatedNodeUptimes + } + updatedNodeUptimes.Add(subnetID) + return nil } -func (*merkleState) GetStartTime( - /*nodeID*/ ids.NodeID, - /*subnetID*/ ids.ID, -) (startTime time.Time, err error) { - return time.Time{}, fmt.Errorf("MerkleDB GetStartTime: %w", errNotYetImplemented) +func (ms *merkleState) GetStartTime(nodeID ids.NodeID, subnetID ids.ID) (time.Time, error) { + staker, err := ms.currentStakers.GetValidator(subnetID, nodeID) + if err != nil { + return time.Time{}, err + } + return staker.StartTime, nil } // VALIDATORS Section -func (*merkleState) ValidatorSet( /*subnetID*/ ids.ID /*vdrs*/, validators.Set) error { - return fmt.Errorf("MerkleDB ValidatorSet: %w", errNotYetImplemented) +func (ms *merkleState) ValidatorSet(subnetID ids.ID, vdrs validators.Set) error { + for nodeID, validator := range ms.currentStakers.validators[subnetID] { + staker := validator.validator + if err := vdrs.Add(nodeID, staker.PublicKey, staker.TxID, staker.Weight); err != nil { + return err + } + + delegatorIterator := NewTreeIterator(validator.delegators) + for delegatorIterator.Next() { + staker := delegatorIterator.Value() + if err := vdrs.AddWeight(nodeID, staker.Weight); err != nil { + delegatorIterator.Release() + return err + } + } + delegatorIterator.Release() + } + return nil } func (*merkleState) GetValidatorWeightDiffs( /*height*/ uint64 /*subnetID*/, ids.ID) (map[ids.NodeID]*ValidatorWeightDiff, error) { @@ -704,7 +759,7 @@ func (ms *merkleState) Commit() error { func (ms *merkleState) CommitBatch() (database.Batch, error) { // updateValidators is set to true here so that the validator manager is // kept up to date with the last accepted state. - if err := ms.write(true /*=updateValidators*/, ms.lastAcceptedHeight); err != nil { + if err := ms.write(true /*updateValidators*/, ms.lastAcceptedHeight); err != nil { return nil, err } return ms.baseDB.CommitBatch() @@ -717,6 +772,9 @@ func (*merkleState) Checksum() ids.ID { func (ms *merkleState) Close() error { errs := wrappers.Errs{} errs.Add( + ms.localUptimesDB.Close(), + ms.indexedUTXOsDB.Close(), + ms.txDB.Close(), ms.blockDB.Close(), ms.merkleDB.Close(), ms.baseMerkleDB.Close(), @@ -730,6 +788,7 @@ func (ms *merkleState) write( /*updateValidators*/ bool /*height*/, uint64) erro ms.writeMerkleState(), ms.writeBlocks(), ms.writeTXs(), + ms.writelocalUptimes(), ) return errs.Err } @@ -988,3 +1047,26 @@ func (ms *merkleState) writeUTXOsIndex(utxo *avax.UTXO, insertUtxo bool) error { } return nil } + +func (ms *merkleState) writelocalUptimes() error { + for vdrID, updatedSubnets := range ms.modifiedLocalUptimes { + for subnetID := range updatedSubnets { + key := make([]byte, 0, len(vdrID)+len(subnetID)) + copy(key, vdrID[:]) + key = append(key, subnetID[:]...) + + uptimes := ms.localUptimesCache[vdrID][subnetID] + uptimes.LastUpdated = uint64(uptimes.lastUpdated.Unix()) + uptimeBytes, err := txs.GenesisCodec.Marshal(txs.Version, uptimes) + if err != nil { + return err + } + + if err := ms.localUptimesDB.Put(key, uptimeBytes); err != nil { + return fmt.Errorf("failed to add local uptimes: %w", err) + } + } + delete(ms.modifiedLocalUptimes, vdrID) + } + return nil +} From 8c37259553ae254a6666823bb22fdc022d16e6bc Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Fri, 21 Jul 2023 15:14:03 +0200 Subject: [PATCH 055/132] wip: scaffolding and cleanup --- vms/platformvm/state/merkle_state.go | 244 +++++++++++++------ vms/platformvm/state/merkle_state_helpers.go | 93 +++++++ 2 files changed, 265 insertions(+), 72 deletions(-) create mode 100644 vms/platformvm/state/merkle_state_helpers.go diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index ae5e4dc2eb0c..cae1f2a9f24c 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -58,6 +58,8 @@ var ( chainsSectionPrefix = []byte{0x3} utxosSectionPrefix = []byte{0x4} rewardUtxosSectionPrefix = []byte{0x5} + currentStakersSectionPrefix = []byte{0x6} + pendingStakersSectionPrefix = []byte{0x7} ) func NewMerkleState( @@ -325,9 +327,7 @@ func (ms *merkleState) GetUTXO(utxoID ids.ID) (*avax.UTXO, error) { return utxo, nil } - key := make([]byte, 0, len(utxosSectionPrefix)+len(utxoID)) - copy(key, utxosSectionPrefix) - key = append(key, utxoID[:]...) + key := merkleUtxoIDKey(utxoID) switch bytes, err := ms.merkleDB.Get(key); err { case nil: @@ -348,11 +348,9 @@ func (ms *merkleState) GetUTXO(utxoID ids.ID) (*avax.UTXO, error) { } func (ms *merkleState) UTXOIDs(addr []byte, start ids.ID, limit int) ([]ids.ID, error) { - startKey := make([]byte, 0, len(addr)+len(start)) - copy(startKey, addr) - startKey = append(startKey, start[:]...) + key := merkleUtxoIndexKey(addr, start) - iter := ms.indexedUTXOsDB.NewIteratorWithStart(startKey) + iter := ms.indexedUTXOsDB.NewIteratorWithStart(key) defer iter.Release() utxoIDs := []ids.ID(nil) @@ -389,9 +387,7 @@ func (ms *merkleState) GetRewardUTXOs(txID ids.ID) ([]*avax.UTXO, error) { utxos := make([]*avax.UTXO, 0) - prefix := make([]byte, 0, len(rewardUtxosSectionPrefix)+len(txID)) - copy(prefix, rewardUtxosSectionPrefix) - prefix = append(prefix, txID[:]...) + prefix := merkleRewardUtxosIDPrefix(txID) it := ms.merkleDB.NewIteratorWithPrefix(prefix) defer it.Release() @@ -449,9 +445,7 @@ func (ms *merkleState) GetCurrentSupply(subnetID ids.ID) (uint64, error) { return *cachedSupply, nil } - key := make([]byte, 0, len(merkleSuppliesPrefix)+len(subnetID[:])) - copy(key, merkleSuppliesPrefix) - key = append(key, subnetID[:]...) + key := merkleSuppliesKey(subnetID) switch supplyBytes, err := ms.merkleDB.Get(key); err { case nil: @@ -519,11 +513,9 @@ func (ms *merkleState) GetSubnetTransformation(subnetID ids.ID) (*txs.Tx, error) return tx, nil } - subnetIDKey := make([]byte, 0, len(elasticSubnetSectionPrefix)+len(subnetID[:])) - copy(subnetIDKey, merkleSuppliesPrefix) - subnetIDKey = append(subnetIDKey, subnetID[:]...) + key := merkleElasticSubnetKey(subnetID) - transformSubnetTxID, err := database.GetID(ms.merkleDB, subnetIDKey) + transformSubnetTxID, err := database.GetID(ms.merkleDB, key) switch err { case nil: transformSubnetTx, _, err := ms.GetTx(transformSubnetTxID) @@ -554,9 +546,7 @@ func (ms *merkleState) GetChains(subnetID ids.ID) ([]*txs.Tx, error) { } chains := make([]*txs.Tx, 0) - prefix := make([]byte, 0, len(chainsSectionPrefix)+len(subnetID[:])) - copy(prefix, chainsSectionPrefix) - prefix = append(prefix, subnetID[:]...) + prefix := merkleChainPrefix(subnetID) chainDBIt := ms.merkleDB.NewIteratorWithPrefix(prefix) defer chainDBIt.Release() @@ -783,17 +773,135 @@ func (ms *merkleState) Close() error { } func (ms *merkleState) write( /*updateValidators*/ bool /*height*/, uint64) error { + currentData, err := ms.processCurrentStakers() + if err != nil { + return err + } + pendingData, err := ms.processPendingStakers() + if err != nil { + return err + } + errs := wrappers.Errs{} errs.Add( - ms.writeMerkleState(), + ms.writeMerkleState(currentData, pendingData), ms.writeBlocks(), ms.writeTXs(), - ms.writelocalUptimes(), + ms.writeLocalUptimes(), ) return errs.Err } -func (ms *merkleState) writeMerkleState() error { +type stakersData struct { + TxBytes []byte `serialize:"true"` + IsCurrent bool `serialize:"true"` + PotentialReward uint64 `serialize:"true"` +} + +func (ms *merkleState) processCurrentStakers() (map[ids.ID]*stakersData, error) { + output := make(map[ids.ID]*stakersData) + for subnetID, subnetValidatorDiffs := range ms.currentStakers.validatorDiffs { + delete(ms.currentStakers.validatorDiffs, subnetID) + for _, validatorDiff := range subnetValidatorDiffs { + switch validatorDiff.validatorStatus { + case added: + var ( + txID = validatorDiff.validator.TxID + potentialReward = validatorDiff.validator.PotentialReward + ) + tx, _, err := ms.GetTx(txID) + if err != nil { + return nil, fmt.Errorf("failed loading current validator tx, %w", err) + } + output[txID] = &stakersData{ + TxBytes: tx.Bytes(), + IsCurrent: false, + PotentialReward: potentialReward, + } + case deleted: + txID := validatorDiff.validator.TxID + output[txID] = &stakersData{ + TxBytes: nil, + } + } + + addedDelegatorIterator := NewTreeIterator(validatorDiff.addedDelegators) + defer addedDelegatorIterator.Release() + for addedDelegatorIterator.Next() { + staker := addedDelegatorIterator.Value() + tx, _, err := ms.GetTx(staker.TxID) + if err != nil { + return nil, fmt.Errorf("failed loading current delegator tx, %w", err) + } + output[staker.TxID] = &stakersData{ + TxBytes: tx.Bytes(), + IsCurrent: false, + PotentialReward: staker.PotentialReward, + } + } + + for _, staker := range validatorDiff.deletedDelegators { + txID := staker.TxID + output[txID] = &stakersData{ + TxBytes: nil, + } + } + } + } + return output, nil +} + +func (ms *merkleState) processPendingStakers() (map[ids.ID]*stakersData, error) { + output := make(map[ids.ID]*stakersData) + for subnetID, subnetValidatorDiffs := range ms.pendingStakers.validatorDiffs { + delete(ms.pendingStakers.validatorDiffs, subnetID) + for _, validatorDiff := range subnetValidatorDiffs { + switch validatorDiff.validatorStatus { + case added: + txID := validatorDiff.validator.TxID + tx, _, err := ms.GetTx(txID) + if err != nil { + return nil, fmt.Errorf("failed loading pending validator tx, %w", err) + } + output[txID] = &stakersData{ + TxBytes: tx.Bytes(), + IsCurrent: false, + PotentialReward: 0, + } + case deleted: + txID := validatorDiff.validator.TxID + output[txID] = &stakersData{ + TxBytes: nil, + } + } + + addedDelegatorIterator := NewTreeIterator(validatorDiff.addedDelegators) + defer addedDelegatorIterator.Release() + for addedDelegatorIterator.Next() { + staker := addedDelegatorIterator.Value() + tx, _, err := ms.GetTx(staker.TxID) + if err != nil { + return nil, fmt.Errorf("failed loading pending delegator tx, %w", err) + } + output[staker.TxID] = &stakersData{ + TxBytes: tx.Bytes(), + IsCurrent: false, + PotentialReward: 0, + } + } + + for _, staker := range validatorDiff.deletedDelegators { + txID := staker.TxID + output[txID] = &stakersData{ + TxBytes: nil, + } + } + } + } + return output, nil +} + +func (ms *merkleState) writeMerkleState(currentData, pendingData map[ids.ID]*stakersData) error { errs := wrappers.Errs{} view, err := ms.merkleDB.NewView() if err != nil { @@ -806,6 +914,8 @@ func (ms *merkleState) writeMerkleState() error { ms.writePermissionedSubnets(view, ctx), ms.writeElasticSubnets(view, ctx), ms.writeChains(view, ctx), + ms.writeCurrentStakers(view, ctx, currentData), + ms.writePendingStakers(view, ctx, pendingData), ms.writeUTXOs(view, ctx), ms.writeRewardUTXOs(view, ctx), ) @@ -837,9 +947,7 @@ func (ms *merkleState) writeMetadata(view merkledb.TrieView, ctx context.Context delete(ms.supplies, subnetID) ms.suppliesCache.Put(subnetID, &supply) - key := make([]byte, 0, len(merkleSuppliesPrefix)+len(subnetID[:])) - copy(key, merkleSuppliesPrefix) - key = append(key, subnetID[:]...) + key := merkleSuppliesKey(subnetID) if err := view.Insert(ctx, key, database.PackUInt64(supply)); err != nil { return fmt.Errorf("failed to write subnet %v supply: %w", subnetID, err) } @@ -849,12 +957,7 @@ func (ms *merkleState) writeMetadata(view merkledb.TrieView, ctx context.Context func (ms *merkleState) writePermissionedSubnets(view merkledb.TrieView, ctx context.Context) error { for _, subnetTx := range ms.addedPermissionedSubnets { - subnetID := subnetTx.ID() - - key := make([]byte, 0, len(permissionedSubnetSectionPrefix)+len(subnetID[:])) - copy(key, permissionedSubnetSectionPrefix) - key = append(key, subnetID[:]...) - + key := merklePermissionedSubnetKey(subnetTx.ID()) if err := view.Insert(ctx, key, subnetTx.Bytes()); err != nil { return fmt.Errorf("failed to write subnetTx: %w", err) } @@ -865,12 +968,7 @@ func (ms *merkleState) writePermissionedSubnets(view merkledb.TrieView, ctx cont func (ms *merkleState) writeElasticSubnets(view merkledb.TrieView, ctx context.Context) error { for _, subnetTx := range ms.addedElasticSubnets { - subnetID := subnetTx.ID() - - key := make([]byte, 0, len(elasticSubnetSectionPrefix)+len(subnetID[:])) - copy(key, elasticSubnetSectionPrefix) - key = append(key, subnetID[:]...) - + key := merkleElasticSubnetKey(subnetTx.ID()) if err := view.Insert(ctx, key, subnetTx.Bytes()); err != nil { return fmt.Errorf("failed to write subnetTx: %w", err) } @@ -881,17 +979,8 @@ func (ms *merkleState) writeElasticSubnets(view merkledb.TrieView, ctx context.C func (ms *merkleState) writeChains(view merkledb.TrieView, ctx context.Context) error { for subnetID, chains := range ms.addedChains { - prefixKey := make([]byte, 0, len(chainsSectionPrefix)+len(subnetID[:])) - copy(prefixKey, chainsSectionPrefix) - prefixKey = append(prefixKey, subnetID[:]...) - for _, chainTx := range chains { - chainID := chainTx.ID() - - key := make([]byte, 0, len(prefixKey)+len(chainID)) - copy(key, prefixKey) - key = append(key, chainID[:]...) - + key := merkleChainKey(subnetID, chainTx.ID()) if err := view.Insert(ctx, key, chainTx.Bytes()); err != nil { return fmt.Errorf("failed to write chain: %w", err) } @@ -901,14 +990,40 @@ func (ms *merkleState) writeChains(view merkledb.TrieView, ctx context.Context) return nil } +func (*merkleState) writeCurrentStakers(view merkledb.TrieView, ctx context.Context, currentData map[ids.ID]*stakersData) error { + for stakerTxID, data := range currentData { + key := merkleCurrentStakersKey(stakerTxID) + + dataBytes, err := txs.GenesisCodec.Marshal(txs.Version, data) + if err != nil { + return fmt.Errorf("failed to serialize current stakers data, stakerTxID%v : %w", stakerTxID, err) + } + if err := view.Insert(ctx, key, dataBytes); err != nil { + return fmt.Errorf("failed to write current stakers data, stakerTxID%v : %w", stakerTxID, err) + } + } + return nil +} + +func (*merkleState) writePendingStakers(view merkledb.TrieView, ctx context.Context, pendingData map[ids.ID]*stakersData) error { + for stakerTxID, data := range pendingData { + key := merklePendingStakersKey(stakerTxID) + + dataBytes, err := txs.GenesisCodec.Marshal(txs.Version, data) + if err != nil { + return fmt.Errorf("failed to serialize pending stakers data, stakerTxID%v : %w", stakerTxID, err) + } + if err := view.Insert(ctx, key, dataBytes); err != nil { + return fmt.Errorf("failed to write pending stakers data, stakerTxID%v : %w", stakerTxID, err) + } + } + return nil +} + func (ms *merkleState) writeUTXOs(view merkledb.TrieView, ctx context.Context) error { for utxoID, utxo := range ms.modifiedUTXOs { delete(ms.modifiedUTXOs, utxoID) - - key := make([]byte, 0, len(utxosSectionPrefix)+len(utxoID)) - copy(key, utxosSectionPrefix) - key = append(key, utxoID[:]...) - + key := merkleUtxoIDKey(utxoID) if utxo == nil { // delete the UTXO switch _, err := ms.GetUTXO(utxoID); err { case nil: @@ -952,23 +1067,13 @@ func (ms *merkleState) writeRewardUTXOs(view merkledb.TrieView, ctx context.Cont for txID, utxos := range ms.addedRewardUTXOs { delete(ms.addedRewardUTXOs, txID) ms.rewardUTXOsCache.Put(txID, utxos) - - prefix := make([]byte, 0, len(rewardUtxosSectionPrefix)+len(txID)) - copy(prefix, rewardUtxosSectionPrefix) - prefix = append(prefix, txID[:]...) - for _, utxo := range utxos { - utxoID := utxo.InputID() - - key := make([]byte, 0, len(prefix)+len(utxoID)) - copy(key, prefix) - key = append(key, utxoID[:]...) - utxoBytes, err := txs.GenesisCodec.Marshal(txs.Version, utxo) if err != nil { return fmt.Errorf("failed to serialize reward UTXO: %w", err) } + key := merkleRewardUtxoIDKey(txID, utxo.InputID()) if err := view.Insert(ctx, key, utxoBytes); err != nil { return fmt.Errorf("failed to add reward UTXO: %w", err) } @@ -1023,7 +1128,6 @@ func (ms *merkleState) writeTXs() error { } func (ms *merkleState) writeUTXOsIndex(utxo *avax.UTXO, insertUtxo bool) error { - utxoID := utxo.InputID() addressable, ok := utxo.Out.(avax.Addressable) if !ok { return nil @@ -1031,9 +1135,7 @@ func (ms *merkleState) writeUTXOsIndex(utxo *avax.UTXO, insertUtxo bool) error { addresses := addressable.Addresses() for _, addr := range addresses { - key := make([]byte, 0, len(addr)+len(utxoID)) - copy(key, addr) - key = append(key, utxoID[:]...) + key := merkleUtxoIndexKey(addr, utxo.InputID()) if insertUtxo { if err := ms.indexedUTXOsDB.Put(key, nil); err != nil { @@ -1048,12 +1150,10 @@ func (ms *merkleState) writeUTXOsIndex(utxo *avax.UTXO, insertUtxo bool) error { return nil } -func (ms *merkleState) writelocalUptimes() error { +func (ms *merkleState) writeLocalUptimes() error { for vdrID, updatedSubnets := range ms.modifiedLocalUptimes { for subnetID := range updatedSubnets { - key := make([]byte, 0, len(vdrID)+len(subnetID)) - copy(key, vdrID[:]) - key = append(key, subnetID[:]...) + key := merkleLocalUptimesKey(vdrID, subnetID) uptimes := ms.localUptimesCache[vdrID][subnetID] uptimes.LastUpdated = uint64(uptimes.lastUpdated.Unix()) diff --git a/vms/platformvm/state/merkle_state_helpers.go b/vms/platformvm/state/merkle_state_helpers.go new file mode 100644 index 000000000000..6a079357dee0 --- /dev/null +++ b/vms/platformvm/state/merkle_state_helpers.go @@ -0,0 +1,93 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package state + +import "github.com/ava-labs/avalanchego/ids" + +func merkleSuppliesKey(subnetID ids.ID) []byte { + key := make([]byte, 0, len(merkleSuppliesPrefix)+len(subnetID[:])) + copy(key, merkleSuppliesPrefix) + key = append(key, subnetID[:]...) + return key +} + +func merklePermissionedSubnetKey(subnetID ids.ID) []byte { + key := make([]byte, 0, len(permissionedSubnetSectionPrefix)+len(subnetID[:])) + copy(key, permissionedSubnetSectionPrefix) + key = append(key, subnetID[:]...) + return key +} + +func merkleElasticSubnetKey(subnetID ids.ID) []byte { + key := make([]byte, 0, len(elasticSubnetSectionPrefix)+len(subnetID[:])) + copy(key, elasticSubnetSectionPrefix) + key = append(key, subnetID[:]...) + return key +} + +func merkleChainPrefix(subnetID ids.ID) []byte { + prefix := make([]byte, 0, len(chainsSectionPrefix)+len(subnetID[:])) + copy(prefix, chainsSectionPrefix) + prefix = append(prefix, subnetID[:]...) + return prefix +} + +func merkleChainKey(subnetID ids.ID, chainID ids.ID) []byte { + prefix := merkleChainPrefix(subnetID) + + key := make([]byte, 0, len(prefix)+len(chainID)) + copy(key, prefix) + key = append(key, chainID[:]...) + return key +} + +func merkleUtxoIDKey(utxoID ids.ID) []byte { + key := make([]byte, 0, len(utxosSectionPrefix)+len(utxoID)) + copy(key, utxosSectionPrefix) + key = append(key, utxoID[:]...) + return key +} + +func merkleRewardUtxosIDPrefix(txID ids.ID) []byte { + prefix := make([]byte, 0, len(rewardUtxosSectionPrefix)+len(txID)) + copy(prefix, rewardUtxosSectionPrefix) + prefix = append(prefix, txID[:]...) + return prefix +} + +func merkleRewardUtxoIDKey(txID, utxoID ids.ID) []byte { + prefix := merkleRewardUtxosIDPrefix(txID) + key := make([]byte, 0, len(prefix)+len(utxoID)) + copy(key, prefix) + key = append(key, utxoID[:]...) + return key +} + +func merkleUtxoIndexKey(address []byte, utxoID ids.ID) []byte { + key := make([]byte, 0, len(address)+len(utxoID)) + copy(key, address) + key = append(key, utxoID[:]...) + return key +} + +func merkleLocalUptimesKey(nodeID ids.NodeID, subnetID ids.ID) []byte { + key := make([]byte, 0, len(nodeID)+len(subnetID)) + copy(key, nodeID[:]) + key = append(key, subnetID[:]...) + return key +} + +func merkleCurrentStakersKey(txID ids.ID) []byte { + key := make([]byte, 0, len(currentStakersSectionPrefix)+len(txID)) + copy(key, currentStakersSectionPrefix) + key = append(key, txID[:]...) + return key +} + +func merklePendingStakersKey(txID ids.ID) []byte { + key := make([]byte, 0, len(pendingStakersSectionPrefix)+len(txID)) + copy(key, pendingStakersSectionPrefix) + key = append(key, txID[:]...) + return key +} From eabc916d5f2b8e927764e4ddb4740d43ebee1161 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Fri, 21 Jul 2023 19:09:33 +0200 Subject: [PATCH 056/132] wip: some more work on merkleDB --- vms/platformvm/state/merkle_state.go | 54 ++++++++++++++++++-- vms/platformvm/state/merkle_state_helpers.go | 8 +++ 2 files changed, 58 insertions(+), 4 deletions(-) diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index cae1f2a9f24c..038e8446c315 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -60,6 +60,7 @@ var ( rewardUtxosSectionPrefix = []byte{0x5} currentStakersSectionPrefix = []byte{0x6} pendingStakersSectionPrefix = []byte{0x7} + delegateeRewardsPrefix = []byte{0x8} ) func NewMerkleState( @@ -153,6 +154,9 @@ func NewMerkleState( currentStakers: newBaseStakers(), pendingStakers: newBaseStakers(), + delegateeRewardCache: make(map[ids.NodeID]map[ids.ID]uint64), + modifiedDelegateeReward: make(map[ids.NodeID]set.Set[ids.ID]), + modifiedUTXOs: make(map[ids.ID]*avax.UTXO), utxoCache: &cache.LRU[ids.ID, *avax.UTXO]{Size: utxoCacheSize}, addedRewardUTXOs: make(map[ids.ID][]*avax.UTXO), @@ -196,6 +200,9 @@ type merkleState struct { currentStakers *baseStakers pendingStakers *baseStakers + delegateeRewardCache map[ids.NodeID]map[ids.ID]uint64 + modifiedDelegateeReward map[ids.NodeID]set.Set[ids.ID] + // UTXOs section modifiedUTXOs map[ids.ID]*avax.UTXO // map of UTXO ID -> *UTXO utxoCache cache.Cacher[ids.ID, *avax.UTXO] // UTXO ID -> *UTXO. If the *UTXO is nil the UTXO doesn't exist @@ -304,12 +311,34 @@ func (ms *merkleState) GetPendingStakerIterator() (StakerIterator, error) { return ms.pendingStakers.GetStakerIterator(), nil } -func (*merkleState) GetDelegateeReward( /*subnetID*/ ids.ID /*vdrID*/, ids.NodeID) (amount uint64, err error) { - return 0, errNotYetImplemented +func (ms *merkleState) GetDelegateeReward(subnetID ids.ID, vdrID ids.NodeID) (uint64, error) { + nodeDelegateeRewards, exists := ms.delegateeRewardCache[vdrID] + if !exists { + return 0, database.ErrNotFound + } + delegateeReward, exists := nodeDelegateeRewards[subnetID] + if !exists { + return 0, database.ErrNotFound + } + return delegateeReward, nil } -func (*merkleState) SetDelegateeReward( /*subnetID*/ ids.ID /*vdrID*/, ids.NodeID /*amount*/, uint64) error { - return errNotYetImplemented +func (ms *merkleState) SetDelegateeReward(subnetID ids.ID, vdrID ids.NodeID, amount uint64) error { + nodeDelegateeRewards, exists := ms.delegateeRewardCache[vdrID] + if !exists { + nodeDelegateeRewards = make(map[ids.ID]uint64) + ms.delegateeRewardCache[vdrID] = nodeDelegateeRewards + } + nodeDelegateeRewards[subnetID] = amount + + // track diff + updatedDelegateeRewards, ok := ms.modifiedDelegateeReward[vdrID] + if !ok { + updatedDelegateeRewards = set.Set[ids.ID]{} + ms.modifiedDelegateeReward[vdrID] = updatedDelegateeRewards + } + updatedDelegateeRewards.Add(subnetID) + return nil } // UTXOs section @@ -916,6 +945,7 @@ func (ms *merkleState) writeMerkleState(currentData, pendingData map[ids.ID]*sta ms.writeChains(view, ctx), ms.writeCurrentStakers(view, ctx, currentData), ms.writePendingStakers(view, ctx, pendingData), + ms.writeDelegateeRewards(view, ctx), ms.writeUTXOs(view, ctx), ms.writeRewardUTXOs(view, ctx), ) @@ -1082,6 +1112,22 @@ func (ms *merkleState) writeRewardUTXOs(view merkledb.TrieView, ctx context.Cont return nil } +func (ms *merkleState) writeDelegateeRewards(view merkledb.TrieView, ctx context.Context) error { + for nodeID, nodeDelegateeRewards := range ms.modifiedDelegateeReward { + nodeDelegateeRewardsList := nodeDelegateeRewards.List() + for _, subnetID := range nodeDelegateeRewardsList { + delegateeReward := ms.delegateeRewardCache[nodeID][subnetID] + + key := merkleDelegateeRewardsKey(nodeID, subnetID) + if err := view.Insert(ctx, key, database.PackUInt64(delegateeReward)); err != nil { + return fmt.Errorf("failed to add reward UTXO: %w", err) + } + } + delete(ms.modifiedDelegateeReward, nodeID) + } + return nil +} + func (ms *merkleState) writeBlocks() error { for blkID, blk := range ms.addedBlocks { blkID := blkID diff --git a/vms/platformvm/state/merkle_state_helpers.go b/vms/platformvm/state/merkle_state_helpers.go index 6a079357dee0..7f91da2cf636 100644 --- a/vms/platformvm/state/merkle_state_helpers.go +++ b/vms/platformvm/state/merkle_state_helpers.go @@ -91,3 +91,11 @@ func merklePendingStakersKey(txID ids.ID) []byte { key = append(key, txID[:]...) return key } + +func merkleDelegateeRewardsKey(nodeID ids.NodeID, subnetID ids.ID) []byte { + key := make([]byte, 0, len(delegateeRewardsPrefix)+len(nodeID)+len(subnetID)) + copy(key, delegateeRewardsPrefix) + key = append(key, nodeID[:]...) + key = append(key, subnetID[:]...) + return key +} From ab0b712b256fc3db4062e8e6935f80d74c9b9d47 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Fri, 21 Jul 2023 21:14:53 +0200 Subject: [PATCH 057/132] wip: some more work on merkleDB --- vms/platformvm/state/merkle_state.go | 282 +++++++++++++++++-- vms/platformvm/state/merkle_state_helpers.go | 54 +++- 2 files changed, 308 insertions(+), 28 deletions(-) diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index 038e8446c315..af5240ae9de4 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -5,7 +5,6 @@ package state import ( "context" - "errors" "fmt" "time" @@ -19,11 +18,13 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/trace" + "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/blocks" + "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/x/merkledb" @@ -39,13 +40,13 @@ const ( var ( _ State = (*merkleState)(nil) - errNotYetImplemented = errors.New("not yet implemented") - merkleStatePrefix = []byte{0x0} merkleBlockPrefix = []byte{0x1} merkleTxPrefix = []byte{0x2} merkleIndexUTXOsPrefix = []byte{0x3} // to serve UTXOIDs(addr) - merkleUptimesPrefix = []byte{0x4} + merkleUptimesPrefix = []byte{0x4} // locally measured uptimes + merkleWeightDiffPrefix = []byte{0x5} // non-merklelized validators weight diff. TODO: should we merklelize them? + merkleBlsKeyDiffPrefix = []byte{0x6} // merkle db sections metadataSectionPrefix = []byte{0x0} @@ -64,16 +65,19 @@ var ( ) func NewMerkleState( + cfg *config.Config, rawDB database.Database, metricsReg prometheus.Registerer, ) (Chain, error) { var ( - baseDB = versiondb.New(rawDB) - baseMerkleDB = prefixdb.New(merkleStatePrefix, baseDB) - blockDB = prefixdb.New(merkleBlockPrefix, baseDB) - txDB = prefixdb.New(merkleTxPrefix, baseDB) - indexedUTXOsDB = prefixdb.New(merkleIndexUTXOsPrefix, baseDB) - localUptimesDB = prefixdb.New(merkleUptimesPrefix, baseDB) + baseDB = versiondb.New(rawDB) + baseMerkleDB = prefixdb.New(merkleStatePrefix, baseDB) + blockDB = prefixdb.New(merkleBlockPrefix, baseDB) + txDB = prefixdb.New(merkleTxPrefix, baseDB) + indexedUTXOsDB = prefixdb.New(merkleIndexUTXOsPrefix, baseDB) + localUptimesDB = prefixdb.New(merkleUptimesPrefix, baseDB) + localWeightDiffDB = prefixdb.New(merkleWeightDiffPrefix, baseDB) + localBlsKeyDiffDB = prefixdb.New(merkleBlsKeyDiffPrefix, baseDB) ) ctx := context.TODO() @@ -146,7 +150,26 @@ func NewMerkleState( return nil, err } + validatorWeightDiffsCache, err := metercacher.New[heightWithSubnet, map[ids.NodeID]*ValidatorWeightDiff]( + "validator_weight_diffs_cache", + metricsReg, + &cache.LRU[heightWithSubnet, map[ids.NodeID]*ValidatorWeightDiff]{Size: validatorDiffsCacheSize}, + ) + if err != nil { + return nil, err + } + + validatorBlsKeyDiffsCache, err := metercacher.New[uint64, map[ids.NodeID]*bls.PublicKey]( + "validator_pub_key_diffs_cache", + metricsReg, + &cache.LRU[uint64, map[ids.NodeID]*bls.PublicKey]{Size: validatorDiffsCacheSize}, + ) + if err != nil { + return nil, err + } + res := &merkleState{ + cfg: cfg, baseDB: baseDB, baseMerkleDB: baseMerkleDB, merkleDB: merkleDB, @@ -186,11 +209,19 @@ func NewMerkleState( localUptimesCache: make(map[ids.NodeID]map[ids.ID]*uptimes), modifiedLocalUptimes: make(map[ids.NodeID]set.Set[ids.ID]), localUptimesDB: localUptimesDB, + + validatorWeightDiffsCache: validatorWeightDiffsCache, + localWeightDiffDB: localWeightDiffDB, + + validatorBlsKeyDiffsCache: validatorBlsKeyDiffsCache, + localBlsKeyDiffDB: localBlsKeyDiffDB, } return res, nil } type merkleState struct { + cfg *config.Config + baseDB *versiondb.Database baseMerkleDB database.Database merkleDB merkledb.MerkleDB // meklelized state @@ -244,6 +275,12 @@ type merkleState struct { localUptimesCache map[ids.NodeID]map[ids.ID]*uptimes // vdrID -> subnetID -> metadata modifiedLocalUptimes map[ids.NodeID]set.Set[ids.ID] // vdrID -> subnetIDs localUptimesDB database.Database + + validatorWeightDiffsCache cache.Cacher[heightWithSubnet, map[ids.NodeID]*ValidatorWeightDiff] // heightWithSubnet -> map[ids.NodeID]*ValidatorWeightDiff + localWeightDiffDB database.Database + + validatorBlsKeyDiffsCache cache.Cacher[uint64, map[ids.NodeID]*bls.PublicKey] // cache of height -> map[ids.NodeID]*bls.PublicKey + localBlsKeyDiffDB database.Database } type uptimes struct { @@ -753,12 +790,62 @@ func (ms *merkleState) ValidatorSet(subnetID ids.ID, vdrs validators.Set) error return nil } -func (*merkleState) GetValidatorWeightDiffs( /*height*/ uint64 /*subnetID*/, ids.ID) (map[ids.NodeID]*ValidatorWeightDiff, error) { - return nil, fmt.Errorf("MerkleDB GetValidatorWeightDiffs: %w", errNotYetImplemented) +// TODO: very inefficient implementation until ValidatorDiff optimization is merged in +func (ms *merkleState) GetValidatorWeightDiffs(height uint64, subnetID ids.ID) (map[ids.NodeID]*ValidatorWeightDiff, error) { + cacheKey := heightWithSubnet{ + Height: height, + SubnetID: subnetID, + } + if weightDiffs, ok := ms.validatorWeightDiffsCache.Get(cacheKey); ok { + return weightDiffs, nil + } + + // here check in the db + res := make(map[ids.NodeID]*ValidatorWeightDiff) + iter := ms.localWeightDiffDB.NewIteratorWithPrefix(subnetID[:]) + defer iter.Release() + for iter.Next() { + keyBytes := iter.Key() + _, nodeID, retrievedHeight := splitMerkleWeightDiffKey(keyBytes) + if retrievedHeight != height { + continue // loop them all, we'll worry about efficiency after correctness + } + + val := &ValidatorWeightDiff{} + if _, err := blocks.GenesisCodec.Unmarshal(iter.Value(), val); err != nil { + return nil, err + } + + res[nodeID] = val + } + return res, iter.Error() } -func (*merkleState) GetValidatorPublicKeyDiffs( /*height*/ uint64) (map[ids.NodeID]*bls.PublicKey, error) { - return nil, fmt.Errorf("MerkleDB GetValidatorPublicKeyDiffs: %w", errNotYetImplemented) +// TODO: very inefficient implementation until ValidatorDiff optimization is merged in +func (ms *merkleState) GetValidatorPublicKeyDiffs(height uint64) (map[ids.NodeID]*bls.PublicKey, error) { + if weightDiffs, ok := ms.validatorBlsKeyDiffsCache.Get(height); ok { + return weightDiffs, nil + } + + // here check in the db + res := make(map[ids.NodeID]*bls.PublicKey) + iter := ms.localBlsKeyDiffDB.NewIterator() + defer iter.Release() + for iter.Next() { + keyBytes := iter.Key() + nodeID, retrievedHeight := splitMerkleBlsKeyDiffKey(keyBytes) + if retrievedHeight != height { + continue // loop them all, we'll worry about efficiency after correctness + } + + pkBytes := iter.Value() + val, err := bls.PublicKeyFromBytes(pkBytes) + if err != nil { + return nil, err + } + res[nodeID] = val + } + return res, iter.Error() } // DB Operations @@ -801,8 +888,8 @@ func (ms *merkleState) Close() error { return errs.Err } -func (ms *merkleState) write( /*updateValidators*/ bool /*height*/, uint64) error { - currentData, err := ms.processCurrentStakers() +func (ms *merkleState) write(updateValidators bool, height uint64) error { + currentData, weightDiffs, blsKeyDiffs, valSetDiff, err := ms.processCurrentStakers() if err != nil { return err } @@ -817,6 +904,9 @@ func (ms *merkleState) write( /*updateValidators*/ bool /*height*/, uint64) erro ms.writeBlocks(), ms.writeTXs(), ms.writeLocalUptimes(), + ms.writeWeightDiffs(height, weightDiffs), + ms.writeBlsKeyDiffs(height, blsKeyDiffs), + ms.updateValidatorSet(updateValidators, valSetDiff, weightDiffs), ) return errs.Err } @@ -827,31 +917,73 @@ type stakersData struct { PotentialReward uint64 `serialize:"true"` } -func (ms *merkleState) processCurrentStakers() (map[ids.ID]*stakersData, error) { - output := make(map[ids.ID]*stakersData) +type weightDiffKey struct { + subnetID ids.ID + nodeID ids.NodeID +} + +func (ms *merkleState) processCurrentStakers() ( + map[ids.ID]*stakersData, + map[weightDiffKey]*ValidatorWeightDiff, + map[ids.NodeID]*bls.PublicKey, + map[weightDiffKey]*diffValidator, + error, +) { + var ( + outputStakers = make(map[ids.ID]*stakersData) + outputWeights = make(map[weightDiffKey]*ValidatorWeightDiff) + outputBlsKey = make(map[ids.NodeID]*bls.PublicKey) + outputValSet = make(map[weightDiffKey]*diffValidator) + ) + for subnetID, subnetValidatorDiffs := range ms.currentStakers.validatorDiffs { delete(ms.currentStakers.validatorDiffs, subnetID) - for _, validatorDiff := range subnetValidatorDiffs { + for nodeID, validatorDiff := range subnetValidatorDiffs { + weightKey := weightDiffKey{ + subnetID: subnetID, + nodeID: nodeID, + } + outputValSet[weightKey] = validatorDiff + switch validatorDiff.validatorStatus { case added: var ( txID = validatorDiff.validator.TxID potentialReward = validatorDiff.validator.PotentialReward + weight = validatorDiff.validator.Weight ) tx, _, err := ms.GetTx(txID) if err != nil { - return nil, fmt.Errorf("failed loading current validator tx, %w", err) + return nil, nil, nil, nil, fmt.Errorf("failed loading current validator tx, %w", err) } - output[txID] = &stakersData{ + + outputStakers[txID] = &stakersData{ TxBytes: tx.Bytes(), IsCurrent: false, PotentialReward: potentialReward, } + outputWeights[weightKey] = &ValidatorWeightDiff{ + Decrease: false, + Amount: weight, + } + case deleted: - txID := validatorDiff.validator.TxID - output[txID] = &stakersData{ + var ( + txID = validatorDiff.validator.TxID + weight = validatorDiff.validator.Weight + blkKey = validatorDiff.validator.PublicKey + ) + + outputStakers[txID] = &stakersData{ TxBytes: nil, } + outputWeights[weightKey] = &ValidatorWeightDiff{ + Decrease: true, + Amount: weight, + } + if blkKey != nil { + outputBlsKey[nodeID] = blkKey + } } addedDelegatorIterator := NewTreeIterator(validatorDiff.addedDelegators) @@ -860,24 +992,32 @@ func (ms *merkleState) processCurrentStakers() (map[ids.ID]*stakersData, error) staker := addedDelegatorIterator.Value() tx, _, err := ms.GetTx(staker.TxID) if err != nil { - return nil, fmt.Errorf("failed loading current delegator tx, %w", err) + return nil, nil, nil, nil, fmt.Errorf("failed loading current delegator tx, %w", err) } - output[staker.TxID] = &stakersData{ + + outputStakers[staker.TxID] = &stakersData{ TxBytes: tx.Bytes(), IsCurrent: false, PotentialReward: staker.PotentialReward, } + if err := outputWeights[weightKey].Add(false, staker.Weight); err != nil { + return nil, nil, nil, nil, fmt.Errorf("failed to increase node weight diff: %w", err) + } } for _, staker := range validatorDiff.deletedDelegators { txID := staker.TxID - output[txID] = &stakersData{ + + outputStakers[txID] = &stakersData{ TxBytes: nil, } + if err := outputWeights[weightKey].Add(true, staker.Weight); err != nil { + return nil, nil, nil, nil, fmt.Errorf("failed to decrease node weight diff: %w", err) + } } } } - return output, nil + return outputStakers, outputWeights, outputBlsKey, outputValSet, nil } func (ms *merkleState) processPendingStakers() (map[ids.ID]*stakersData, error) { @@ -1216,3 +1356,91 @@ func (ms *merkleState) writeLocalUptimes() error { } return nil } + +func (ms *merkleState) writeWeightDiffs(height uint64, weightDiffs map[weightDiffKey]*ValidatorWeightDiff) error { + for weightKey, weightDiff := range weightDiffs { + if weightDiff.Amount == 0 { + // No weight change to record; go to next validator. + continue + } + + key := merkleWeightDiffKey(weightKey.subnetID, weightKey.nodeID, height) + weightDiffBytes, err := blocks.GenesisCodec.Marshal(blocks.Version, weightDiff) + if err != nil { + return fmt.Errorf("failed to serialize validator weight diff: %w", err) + } + + if err := ms.localWeightDiffDB.Put(key, weightDiffBytes); err != nil { + return fmt.Errorf("failed to add weight diffs: %w", err) + } + + // update the cache + cacheKey := heightWithSubnet{ + Height: height, + SubnetID: weightKey.subnetID, + } + cacheValue := map[ids.NodeID]*ValidatorWeightDiff{ + weightKey.nodeID: weightDiff, + } + ms.validatorWeightDiffsCache.Put(cacheKey, cacheValue) + } + return nil +} + +func (ms *merkleState) writeBlsKeyDiffs(height uint64, blsKeyDiffs map[ids.NodeID]*bls.PublicKey) error { + for nodeID, blsKey := range blsKeyDiffs { + key := merkleBlsKeytDiffKey(nodeID, height) + blsKeyBytes := bls.PublicKeyToBytes(blsKey) + + if err := ms.localBlsKeyDiffDB.Put(key, blsKeyBytes); err != nil { + return fmt.Errorf("failed to add bls key diffs: %w", err) + } + } + return nil +} + +func (ms *merkleState) updateValidatorSet( + updateValidators bool, + valSetDiff map[weightDiffKey]*diffValidator, + weightDiffs map[weightDiffKey]*ValidatorWeightDiff, +) error { + if !updateValidators { + return nil + } + + for weightKey, weightDiff := range weightDiffs { + var ( + subnetID = weightKey.subnetID + nodeID = weightKey.nodeID + validatorDiff = valSetDiff[weightKey] + err error + ) + + // We only track the current validator set of tracked subnets. + if subnetID != constants.PrimaryNetworkID && !ms.cfg.TrackedSubnets.Contains(subnetID) { + continue + } + + if weightDiff.Decrease { + err = validators.RemoveWeight(ms.cfg.Validators, subnetID, nodeID, weightDiff.Amount) + } else { + if validatorDiff.validatorStatus == added { + staker := validatorDiff.validator + err = validators.Add( + ms.cfg.Validators, + subnetID, + nodeID, + staker.PublicKey, + staker.TxID, + weightDiff.Amount, + ) + } else { + err = validators.AddWeight(ms.cfg.Validators, subnetID, nodeID, weightDiff.Amount) + } + } + if err != nil { + return fmt.Errorf("failed to update validator weight: %w", err) + } + } + return nil +} diff --git a/vms/platformvm/state/merkle_state_helpers.go b/vms/platformvm/state/merkle_state_helpers.go index 7f91da2cf636..354a9d1672de 100644 --- a/vms/platformvm/state/merkle_state_helpers.go +++ b/vms/platformvm/state/merkle_state_helpers.go @@ -3,7 +3,10 @@ package state -import "github.com/ava-labs/avalanchego/ids" +import ( + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" +) func merkleSuppliesKey(subnetID ids.ID) []byte { key := make([]byte, 0, len(merkleSuppliesPrefix)+len(subnetID[:])) @@ -99,3 +102,52 @@ func merkleDelegateeRewardsKey(nodeID ids.NodeID, subnetID ids.ID) []byte { key = append(key, subnetID[:]...) return key } + +func merkleWeightDiffKey(subnetID ids.ID, nodeID ids.NodeID, height uint64) []byte { + key := make([]byte, 0, len(nodeID)+len(subnetID)) // missing height part + key = append(key, subnetID[:]...) + key = append(key, nodeID[:]...) + key = append(key, database.PackUInt64(height)...) + return key +} + +// TODO: remove when ValidatorDiff optimization is merged in +func splitMerkleWeightDiffKey(key []byte) (ids.ID, ids.NodeID, uint64) { + subnetIDLenght := 32 + nodeIDLenght := 20 + + subnetID := ids.Empty + copy(subnetID[:], key[0:subnetIDLenght]) + + nodeID := ids.EmptyNodeID + copy(nodeID[:], key[subnetIDLenght:subnetIDLenght+nodeIDLenght]) + + height, err := database.ParseUInt64(key[subnetIDLenght+nodeIDLenght:]) + if err != nil { + panic("failed splitting MerkleWeightDiffKey") + } + + return subnetID, nodeID, height +} + +func merkleBlsKeytDiffKey(nodeID ids.NodeID, height uint64) []byte { + key := make([]byte, 0, len(nodeID)) // missing height part + key = append(key, nodeID[:]...) + key = append(key, database.PackUInt64(height)...) + return key +} + +// TODO: remove when ValidatorDiff optimization is merged in +func splitMerkleBlsKeyDiffKey(key []byte) (ids.NodeID, uint64) { + nodeIDLenght := 20 + + nodeID := ids.EmptyNodeID + copy(nodeID[:], key[0:nodeIDLenght]) + + height, err := database.ParseUInt64(key[nodeIDLenght:]) + if err != nil { + panic("failed splitting MerkleWeightDiffKey") + } + + return nodeID, height +} From dffd68d77f59f2cef38944bfb8d563cef5637872 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Fri, 21 Jul 2023 23:18:43 +0200 Subject: [PATCH 058/132] wip: coded sync genesis --- vms/platformvm/state/merkle_state.go | 41 +++-- vms/platformvm/state/merkle_state_load_ops.go | 143 ++++++++++++++++++ 2 files changed, 173 insertions(+), 11 deletions(-) create mode 100644 vms/platformvm/state/merkle_state_load_ops.go diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index af5240ae9de4..1441b98d70cd 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -16,6 +16,7 @@ import ( "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/trace" "github.com/ava-labs/avalanchego/utils/constants" @@ -25,6 +26,7 @@ import ( "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/blocks" "github.com/ava-labs/avalanchego/vms/platformvm/config" + "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/x/merkledb" @@ -41,12 +43,13 @@ var ( _ State = (*merkleState)(nil) merkleStatePrefix = []byte{0x0} - merkleBlockPrefix = []byte{0x1} - merkleTxPrefix = []byte{0x2} - merkleIndexUTXOsPrefix = []byte{0x3} // to serve UTXOIDs(addr) - merkleUptimesPrefix = []byte{0x4} // locally measured uptimes - merkleWeightDiffPrefix = []byte{0x5} // non-merklelized validators weight diff. TODO: should we merklelize them? - merkleBlsKeyDiffPrefix = []byte{0x6} + merkleSingletonPrefix = []byte{0x1} + merkleBlockPrefix = []byte{0x2} + merkleTxPrefix = []byte{0x3} + merkleIndexUTXOsPrefix = []byte{0x4} // to serve UTXOIDs(addr) + merkleUptimesPrefix = []byte{0x5} // locally measured uptimes + merkleWeightDiffPrefix = []byte{0x6} // non-merklelized validators weight diff. TODO: should we merklelize them? + merkleBlsKeyDiffPrefix = []byte{0x7} // merkle db sections metadataSectionPrefix = []byte{0x0} @@ -65,13 +68,17 @@ var ( ) func NewMerkleState( - cfg *config.Config, rawDB database.Database, + genesisBytes []byte, + cfg *config.Config, + ctx *snow.Context, metricsReg prometheus.Registerer, + rewards reward.Calculator, ) (Chain, error) { var ( baseDB = versiondb.New(rawDB) baseMerkleDB = prefixdb.New(merkleStatePrefix, baseDB) + singletonDB = prefixdb.New(merkleSingletonPrefix, baseDB) blockDB = prefixdb.New(merkleBlockPrefix, baseDB) txDB = prefixdb.New(merkleTxPrefix, baseDB) indexedUTXOsDB = prefixdb.New(merkleIndexUTXOsPrefix, baseDB) @@ -80,13 +87,13 @@ func NewMerkleState( localBlsKeyDiffDB = prefixdb.New(merkleBlsKeyDiffPrefix, baseDB) ) - ctx := context.TODO() + traceCtx := context.TODO() noOpTracer, err := trace.New(trace.Config{Enabled: false}) if err != nil { return nil, fmt.Errorf("failed creating noOpTraces: %w", err) } - merkleDB, err := merkledb.New(ctx, baseMerkleDB, merkledb.Config{ + merkleDB, err := merkledb.New(traceCtx, baseMerkleDB, merkledb.Config{ HistoryLength: HistoryLength, NodeCacheSize: NodeCacheSize, Reg: prometheus.NewRegistry(), @@ -170,9 +177,12 @@ func NewMerkleState( res := &merkleState{ cfg: cfg, + ctx: ctx, + rewards: rewards, baseDB: baseDB, baseMerkleDB: baseMerkleDB, merkleDB: merkleDB, + singletonDB: singletonDB, currentStakers: newBaseStakers(), pendingStakers: newBaseStakers(), @@ -216,13 +226,23 @@ func NewMerkleState( validatorBlsKeyDiffsCache: validatorBlsKeyDiffsCache, localBlsKeyDiffDB: localBlsKeyDiffDB, } + + if err := res.sync(genesisBytes); err != nil { + // Drop any errors on close to return the first error + _ = res.Close() + return nil, err + } + return res, nil } type merkleState struct { - cfg *config.Config + cfg *config.Config + ctx *snow.Context + rewards reward.Calculator baseDB *versiondb.Database + singletonDB database.Database baseMerkleDB database.Database merkleDB merkledb.MerkleDB // meklelized state @@ -282,7 +302,6 @@ type merkleState struct { validatorBlsKeyDiffsCache cache.Cacher[uint64, map[ids.NodeID]*bls.PublicKey] // cache of height -> map[ids.NodeID]*bls.PublicKey localBlsKeyDiffDB database.Database } - type uptimes struct { Duration time.Duration `serialize:"true"` LastUpdated uint64 `serialize:"true"` // Unix time in seconds diff --git a/vms/platformvm/state/merkle_state_load_ops.go b/vms/platformvm/state/merkle_state_load_ops.go new file mode 100644 index 000000000000..a4be8031a7e4 --- /dev/null +++ b/vms/platformvm/state/merkle_state_load_ops.go @@ -0,0 +1,143 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package state + +import ( + "fmt" + "time" + + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/hashing" + "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/platformvm/blocks" + "github.com/ava-labs/avalanchego/vms/platformvm/genesis" + "github.com/ava-labs/avalanchego/vms/platformvm/status" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" +) + +func (ms *merkleState) sync(genesis []byte) error { + shouldInit, err := ms.shouldInit() + if err != nil { + return fmt.Errorf( + "failed to check if the database is initialized: %w", + err, + ) + } + + // If the database is empty, create the platform chain anew using the + // provided genesis state + if shouldInit { + if err := ms.init(genesis); err != nil { + return fmt.Errorf( + "failed to initialize the database: %w", + err, + ) + } + } + + return nil /*ms.load()*/ +} + +func (ms *merkleState) shouldInit() (bool, error) { + has, err := ms.singletonDB.Has(initializedKey) + return !has, err +} + +func (ms *merkleState) doneInit() error { + return ms.singletonDB.Put(initializedKey, nil) +} + +func (ms *merkleState) init(genesisBytes []byte) error { + // Create the genesis block and save it as being accepted (We don't do + // genesisBlock.Accept() because then it'd look for genesisBlock's + // non-existent parent) + genesisID := hashing.ComputeHash256Array(genesisBytes) + genesisBlock, err := blocks.NewApricotCommitBlock(genesisID, 0 /*height*/) + if err != nil { + return err + } + + genesisState, err := genesis.ParseState(genesisBytes) + if err != nil { + return err + } + if err := ms.syncGenesis(genesisBlock, genesisState); err != nil { + return err + } + + if err := ms.doneInit(); err != nil { + return err + } + + return ms.Commit() +} + +func (ms *merkleState) syncGenesis(genesisBlk blocks.Block, genesis *genesis.State) error { + genesisBlkID := genesisBlk.ID() + ms.SetLastAccepted(genesisBlkID) + ms.SetTimestamp(time.Unix(int64(genesis.Timestamp), 0)) + ms.SetCurrentSupply(constants.PrimaryNetworkID, genesis.InitialSupply) + ms.AddStatelessBlock(genesisBlk) + + // Persist UTXOs that exist at genesis + for _, utxo := range genesis.UTXOs { + ms.AddUTXO(utxo) + } + + // Persist primary network validator set at genesis + for _, vdrTx := range genesis.Validators { + tx, ok := vdrTx.Unsigned.(*txs.AddValidatorTx) + if !ok { + return fmt.Errorf("expected tx type *txs.AddValidatorTx but got %T", vdrTx.Unsigned) + } + + stakeAmount := tx.Validator.Wght + stakeDuration := tx.Validator.Duration() + currentSupply, err := ms.GetCurrentSupply(constants.PrimaryNetworkID) + if err != nil { + return err + } + + potentialReward := ms.rewards.Calculate( + stakeDuration, + stakeAmount, + currentSupply, + ) + newCurrentSupply, err := math.Add64(currentSupply, potentialReward) + if err != nil { + return err + } + + staker, err := NewCurrentStaker(vdrTx.ID(), tx, potentialReward) + if err != nil { + return err + } + + ms.PutCurrentValidator(staker) + ms.AddTx(vdrTx, status.Committed) + ms.SetCurrentSupply(constants.PrimaryNetworkID, newCurrentSupply) + } + + for _, chain := range genesis.Chains { + unsignedChain, ok := chain.Unsigned.(*txs.CreateChainTx) + if !ok { + return fmt.Errorf("expected tx type *txs.CreateChainTx but got %T", chain.Unsigned) + } + + // Ensure all chains that the genesis bytes say to create have the right + // network ID + if unsignedChain.NetworkID != ms.ctx.NetworkID { + return avax.ErrWrongNetworkID + } + + ms.AddChain(chain) + ms.AddTx(chain, status.Committed) + } + + // updateValidators is set to false here to maintain the invariant that the + // primary network's validator set is empty before the validator sets are + // initialized. + return ms.write(false /*=updateValidators*/, 0) +} From 95037b542807b7415493011a71a2c03d5d05ef0a Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Fri, 21 Jul 2023 23:30:43 +0200 Subject: [PATCH 059/132] wip: load up data from disk --- vms/platformvm/state/merkle_state.go | 78 ++++---- vms/platformvm/state/merkle_state_helpers.go | 53 +++-- vms/platformvm/state/merkle_state_load_ops.go | 188 +++++++++++++++++- 3 files changed, 260 insertions(+), 59 deletions(-) diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index 1441b98d70cd..b73ad5de2690 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -19,6 +19,7 @@ import ( "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/trace" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/set" @@ -26,6 +27,7 @@ import ( "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/blocks" "github.com/ava-labs/avalanchego/vms/platformvm/config" + "github.com/ava-labs/avalanchego/vms/platformvm/metrics" "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" @@ -69,11 +71,13 @@ var ( func NewMerkleState( rawDB database.Database, + metrics metrics.Metrics, genesisBytes []byte, cfg *config.Config, ctx *snow.Context, metricsReg prometheus.Registerer, rewards reward.Calculator, + bootstrapped *utils.Atomic[bool], ) (Chain, error) { var ( baseDB = versiondb.New(rawDB) @@ -178,7 +182,9 @@ func NewMerkleState( res := &merkleState{ cfg: cfg, ctx: ctx, + metrics: metrics, rewards: rewards, + bootstrapped: bootstrapped, baseDB: baseDB, baseMerkleDB: baseMerkleDB, merkleDB: merkleDB, @@ -237,9 +243,11 @@ func NewMerkleState( } type merkleState struct { - cfg *config.Config - ctx *snow.Context - rewards reward.Calculator + cfg *config.Config + ctx *snow.Context + metrics metrics.Metrics + rewards reward.Calculator + bootstrapped *utils.Atomic[bool] baseDB *versiondb.Database singletonDB database.Database @@ -302,13 +310,6 @@ type merkleState struct { validatorBlsKeyDiffsCache cache.Cacher[uint64, map[ids.NodeID]*bls.PublicKey] // cache of height -> map[ids.NodeID]*bls.PublicKey localBlsKeyDiffDB database.Database } -type uptimes struct { - Duration time.Duration `serialize:"true"` - LastUpdated uint64 `serialize:"true"` // Unix time in seconds - - // txID ids.ID // TODO ABENEGIA: is it needed by delegators and not validators? - lastUpdated time.Time -} // STAKERS section func (ms *merkleState) GetCurrentValidator(subnetID ids.ID, nodeID ids.NodeID) (*Staker, error) { @@ -824,18 +825,20 @@ func (ms *merkleState) GetValidatorWeightDiffs(height uint64, subnetID ids.ID) ( iter := ms.localWeightDiffDB.NewIteratorWithPrefix(subnetID[:]) defer iter.Release() for iter.Next() { - keyBytes := iter.Key() - _, nodeID, retrievedHeight := splitMerkleWeightDiffKey(keyBytes) - if retrievedHeight != height { + _, nodeID, retrievedHeight, err := splitMerkleWeightDiffKey(iter.Key()) + switch { + case err != nil: + return nil, err + case retrievedHeight != height: continue // loop them all, we'll worry about efficiency after correctness - } + default: + val := &ValidatorWeightDiff{} + if _, err := blocks.GenesisCodec.Unmarshal(iter.Value(), val); err != nil { + return nil, err + } - val := &ValidatorWeightDiff{} - if _, err := blocks.GenesisCodec.Unmarshal(iter.Value(), val); err != nil { - return nil, err + res[nodeID] = val } - - res[nodeID] = val } return res, iter.Error() } @@ -851,18 +854,20 @@ func (ms *merkleState) GetValidatorPublicKeyDiffs(height uint64) (map[ids.NodeID iter := ms.localBlsKeyDiffDB.NewIterator() defer iter.Release() for iter.Next() { - keyBytes := iter.Key() - nodeID, retrievedHeight := splitMerkleBlsKeyDiffKey(keyBytes) - if retrievedHeight != height { - continue // loop them all, we'll worry about efficiency after correctness - } - - pkBytes := iter.Value() - val, err := bls.PublicKeyFromBytes(pkBytes) - if err != nil { + nodeID, retrievedHeight, err := splitMerkleBlsKeyDiffKey(iter.Key()) + switch { + case err != nil: return nil, err + case retrievedHeight != height: + continue // loop them all, we'll worry about efficiency after correctness + default: + pkBytes := iter.Value() + val, err := bls.PublicKeyFromBytes(pkBytes) + if err != nil { + return nil, err + } + res[nodeID] = val } - res[nodeID] = val } return res, iter.Error() } @@ -930,17 +935,6 @@ func (ms *merkleState) write(updateValidators bool, height uint64) error { return errs.Err } -type stakersData struct { - TxBytes []byte `serialize:"true"` - IsCurrent bool `serialize:"true"` - PotentialReward uint64 `serialize:"true"` -} - -type weightDiffKey struct { - subnetID ids.ID - nodeID ids.NodeID -} - func (ms *merkleState) processCurrentStakers() ( map[ids.ID]*stakersData, map[weightDiffKey]*ValidatorWeightDiff, @@ -978,7 +972,6 @@ func (ms *merkleState) processCurrentStakers() ( outputStakers[txID] = &stakersData{ TxBytes: tx.Bytes(), - IsCurrent: false, PotentialReward: potentialReward, } outputWeights[weightKey] = &ValidatorWeightDiff{ @@ -1016,7 +1009,6 @@ func (ms *merkleState) processCurrentStakers() ( outputStakers[staker.TxID] = &stakersData{ TxBytes: tx.Bytes(), - IsCurrent: false, PotentialReward: staker.PotentialReward, } if err := outputWeights[weightKey].Add(false, staker.Weight); err != nil { @@ -1053,7 +1045,6 @@ func (ms *merkleState) processPendingStakers() (map[ids.ID]*stakersData, error) } output[txID] = &stakersData{ TxBytes: tx.Bytes(), - IsCurrent: false, PotentialReward: 0, } case deleted: @@ -1073,7 +1064,6 @@ func (ms *merkleState) processPendingStakers() (map[ids.ID]*stakersData, error) } output[staker.TxID] = &stakersData{ TxBytes: tx.Bytes(), - IsCurrent: false, PotentialReward: 0, } } diff --git a/vms/platformvm/state/merkle_state_helpers.go b/vms/platformvm/state/merkle_state_helpers.go index 354a9d1672de..708d50b932cb 100644 --- a/vms/platformvm/state/merkle_state_helpers.go +++ b/vms/platformvm/state/merkle_state_helpers.go @@ -4,17 +4,50 @@ package state import ( + "time" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" ) +// helpers types to store data on merkleDB +type uptimes struct { + Duration time.Duration `serialize:"true"` + LastUpdated uint64 `serialize:"true"` // Unix time in seconds + + // txID ids.ID // TODO ABENEGIA: is it needed by delegators and not validators? + lastUpdated time.Time +} + +type stakersData struct { + TxBytes []byte `serialize:"true"` + PotentialReward uint64 `serialize:"true"` +} + +type weightDiffKey struct { + subnetID ids.ID + nodeID ids.NodeID +} + +func merkleSuppliesKeyPrefix() []byte { + prefix := make([]byte, 0, len(merkleSuppliesPrefix)) + copy(prefix, merkleSuppliesPrefix) + return prefix +} + func merkleSuppliesKey(subnetID ids.ID) []byte { - key := make([]byte, 0, len(merkleSuppliesPrefix)+len(subnetID[:])) - copy(key, merkleSuppliesPrefix) + key := merkleSuppliesKeyPrefix() key = append(key, subnetID[:]...) return key } +func splitMerkleSuppliesKey(b []byte) ([]byte, ids.ID) { + prefix := b[:len(merkleSuppliesPrefix)] + subnetID := ids.Empty + copy(subnetID[:], b[len(merkleSuppliesPrefix):]) + return prefix, subnetID +} + func merklePermissionedSubnetKey(subnetID ids.ID) []byte { key := make([]byte, 0, len(permissionedSubnetSectionPrefix)+len(subnetID[:])) copy(key, permissionedSubnetSectionPrefix) @@ -112,7 +145,7 @@ func merkleWeightDiffKey(subnetID ids.ID, nodeID ids.NodeID, height uint64) []by } // TODO: remove when ValidatorDiff optimization is merged in -func splitMerkleWeightDiffKey(key []byte) (ids.ID, ids.NodeID, uint64) { +func splitMerkleWeightDiffKey(key []byte) (ids.ID, ids.NodeID, uint64, error) { subnetIDLenght := 32 nodeIDLenght := 20 @@ -123,11 +156,7 @@ func splitMerkleWeightDiffKey(key []byte) (ids.ID, ids.NodeID, uint64) { copy(nodeID[:], key[subnetIDLenght:subnetIDLenght+nodeIDLenght]) height, err := database.ParseUInt64(key[subnetIDLenght+nodeIDLenght:]) - if err != nil { - panic("failed splitting MerkleWeightDiffKey") - } - - return subnetID, nodeID, height + return subnetID, nodeID, height, err } func merkleBlsKeytDiffKey(nodeID ids.NodeID, height uint64) []byte { @@ -138,16 +167,12 @@ func merkleBlsKeytDiffKey(nodeID ids.NodeID, height uint64) []byte { } // TODO: remove when ValidatorDiff optimization is merged in -func splitMerkleBlsKeyDiffKey(key []byte) (ids.NodeID, uint64) { +func splitMerkleBlsKeyDiffKey(key []byte) (ids.NodeID, uint64, error) { nodeIDLenght := 20 nodeID := ids.EmptyNodeID copy(nodeID[:], key[0:nodeIDLenght]) height, err := database.ParseUInt64(key[nodeIDLenght:]) - if err != nil { - panic("failed splitting MerkleWeightDiffKey") - } - - return nodeID, height + return nodeID, height, err } diff --git a/vms/platformvm/state/merkle_state_load_ops.go b/vms/platformvm/state/merkle_state_load_ops.go index a4be8031a7e4..0554286f7a16 100644 --- a/vms/platformvm/state/merkle_state_load_ops.go +++ b/vms/platformvm/state/merkle_state_load_ops.go @@ -7,9 +7,15 @@ import ( "fmt" "time" + "github.com/google/btree" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/blocks" "github.com/ava-labs/avalanchego/vms/platformvm/genesis" @@ -17,6 +23,8 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) +// var errNotYetImplemented = errors.New("NOT YET IMPLEMENTED") + func (ms *merkleState) sync(genesis []byte) error { shouldInit, err := ms.shouldInit() if err != nil { @@ -37,7 +45,7 @@ func (ms *merkleState) sync(genesis []byte) error { } } - return nil /*ms.load()*/ + return ms.load() } func (ms *merkleState) shouldInit() (bool, error) { @@ -141,3 +149,181 @@ func (ms *merkleState) syncGenesis(genesisBlk blocks.Block, genesis *genesis.Sta // initialized. return ms.write(false /*=updateValidators*/, 0) } + +// Load pulls data previously stored on disk that is expected to be in memory. +func (ms *merkleState) load() error { + errs := wrappers.Errs{} + errs.Add( + // ms.loadMetadata(), + // ms.loadCurrentValidators(), + // ms.loadPendingValidators(), + ms.loadMerkleMetadata(), + ms.loadCurrentStakers(), + ms.loadPendingStakers(), + ms.initValidatorSets(), + ) + return errs.Err +} + +func (ms *merkleState) loadMerkleMetadata() error { + // load chainTime + chainTimeBytes, err := ms.merkleDB.Get(merkleChainTimeKey) + if err != nil { + return err + } + chainTime := time.Time{} + if err := chainTime.UnmarshalBinary(chainTimeBytes); err != nil { + return err + } + ms.SetTimestamp(chainTime) + + // load last accepted block + blkIDBytes, err := ms.merkleDB.Get(merkleLastAcceptedBlkIDKey) + if err != nil { + return err + } + lastAcceptedBlkID := ids.Empty + copy(lastAcceptedBlkID[:], blkIDBytes) + ms.SetLastAccepted(lastAcceptedBlkID) + + // load supplies + suppliedPrefix := merkleSuppliesKeyPrefix() + iter := ms.merkleDB.NewIteratorWithPrefix(suppliedPrefix) + defer iter.Release() + for iter.Next() { + _, subnetID := splitMerkleSuppliesKey(iter.Key()) + supply, err := database.ParseUInt64(iter.Value()) + if err != nil { + return err + } + ms.supplies[subnetID] = supply + } + return iter.Error() +} + +func (ms *merkleState) loadCurrentStakers() error { + // TODO ABENEGIA: Check missing metadata + ms.currentStakers = newBaseStakers() + + prefix := make([]byte, len(currentStakersSectionPrefix)) + copy(prefix, currentStakersSectionPrefix) + + iter := ms.merkleDB.NewIteratorWithPrefix(prefix) + defer iter.Release() + for iter.Next() { + data := &stakersData{} + if _, err := txs.GenesisCodec.Unmarshal(iter.Value(), data); err != nil { + return fmt.Errorf("failed to deserialize current stakers data: %w", err) + } + + tx, err := txs.Parse(txs.GenesisCodec, data.TxBytes) + if err != nil { + return fmt.Errorf("failed to parsing current stakerTx: %w", err) + } + stakerTx, ok := tx.Unsigned.(txs.Staker) + if !ok { + return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned) + } + + staker, err := NewCurrentStaker(tx.ID(), stakerTx, data.PotentialReward) + if err != nil { + return err + } + if staker.Priority.IsValidator() { + validator := ms.currentStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID) + validator.validator = staker + ms.currentStakers.stakers.ReplaceOrInsert(staker) + } else { + validator := ms.currentStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID) + if validator.delegators == nil { + validator.delegators = btree.NewG(defaultTreeDegree, (*Staker).Less) + } + validator.delegators.ReplaceOrInsert(staker) + ms.currentStakers.stakers.ReplaceOrInsert(staker) + } + } + return iter.Error() +} + +func (ms *merkleState) loadPendingStakers() error { + // TODO ABENEGIA: Check missing metadata + ms.pendingStakers = newBaseStakers() + + prefix := make([]byte, len(pendingStakersSectionPrefix)) + copy(prefix, pendingStakersSectionPrefix) + + iter := ms.merkleDB.NewIteratorWithPrefix(prefix) + defer iter.Release() + for iter.Next() { + data := &stakersData{} + if _, err := txs.GenesisCodec.Unmarshal(iter.Value(), data); err != nil { + return fmt.Errorf("failed to deserialize pending stakers data: %w", err) + } + + tx, err := txs.Parse(txs.GenesisCodec, data.TxBytes) + if err != nil { + return fmt.Errorf("failed to parsing pending stakerTx: %w", err) + } + stakerTx, ok := tx.Unsigned.(txs.Staker) + if !ok { + return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned) + } + + staker, err := NewPendingStaker(tx.ID(), stakerTx) + if err != nil { + return err + } + if staker.Priority.IsValidator() { + validator := ms.pendingStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID) + validator.validator = staker + ms.pendingStakers.stakers.ReplaceOrInsert(staker) + } else { + validator := ms.pendingStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID) + if validator.delegators == nil { + validator.delegators = btree.NewG(defaultTreeDegree, (*Staker).Less) + } + validator.delegators.ReplaceOrInsert(staker) + ms.pendingStakers.stakers.ReplaceOrInsert(staker) + } + } + return iter.Error() +} + +// Invariant: initValidatorSets requires loadCurrentValidators to have already +// been called. +func (ms *merkleState) initValidatorSets() error { + primaryValidators, ok := ms.cfg.Validators.Get(constants.PrimaryNetworkID) + if !ok { + return errMissingValidatorSet + } + if primaryValidators.Len() != 0 { + // Enforce the invariant that the validator set is empty here. + return errValidatorSetAlreadyPopulated + } + err := ms.ValidatorSet(constants.PrimaryNetworkID, primaryValidators) + if err != nil { + return err + } + + vl := validators.NewLogger(ms.ctx.Log, ms.bootstrapped, constants.PrimaryNetworkID, ms.ctx.NodeID) + primaryValidators.RegisterCallbackListener(vl) + + ms.metrics.SetLocalStake(primaryValidators.GetWeight(ms.ctx.NodeID)) + ms.metrics.SetTotalStake(primaryValidators.Weight()) + + for subnetID := range ms.cfg.TrackedSubnets { + subnetValidators := validators.NewSet() + err := ms.ValidatorSet(subnetID, subnetValidators) + if err != nil { + return err + } + + if !ms.cfg.Validators.Add(subnetID, subnetValidators) { + return fmt.Errorf("%w: %s", errDuplicateValidatorSet, subnetID) + } + + vl := validators.NewLogger(ms.ctx.Log, ms.bootstrapped, subnetID, ms.ctx.NodeID) + subnetValidators.RegisterCallbackListener(vl) + } + return nil +} From df5bbd15a0c3507cf28b33e7cc22e7c70bd79a77 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Sat, 22 Jul 2023 11:00:17 +0200 Subject: [PATCH 060/132] wip: hooked up merkleDB. Fixing bugs --- vms/platformvm/state/merkle_state.go | 6 +- vms/platformvm/state/merkle_state_helpers.go | 44 +++-- vms/platformvm/state/merkle_state_test.go | 188 +++++++++++++++++++ vms/platformvm/vm.go | 8 +- 4 files changed, 216 insertions(+), 30 deletions(-) create mode 100644 vms/platformvm/state/merkle_state_test.go diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index b73ad5de2690..53ab8b57d5bc 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -78,7 +78,7 @@ func NewMerkleState( metricsReg prometheus.Registerer, rewards reward.Calculator, bootstrapped *utils.Atomic[bool], -) (Chain, error) { +) (State, error) { var ( baseDB = versiondb.New(rawDB) baseMerkleDB = prefixdb.New(merkleStatePrefix, baseDB) @@ -272,7 +272,7 @@ type merkleState struct { // Metadata section chainTime time.Time lastAcceptedBlkID ids.ID - lastAcceptedHeight uint64 // Should this be written to state?? + lastAcceptedHeight uint64 // TODO: Should this be written to state?? supplies map[ids.ID]uint64 // map of subnetID -> current supply suppliesCache cache.Cacher[ids.ID, *uint64] // cache of subnetID -> current supply if the entry is nil, it is not in the database @@ -1141,7 +1141,7 @@ func (ms *merkleState) writePermissionedSubnets(view merkledb.TrieView, ctx cont return fmt.Errorf("failed to write subnetTx: %w", err) } } - ms.addedPermissionedSubnets = nil + ms.addedPermissionedSubnets = make([]*txs.Tx, 0) return nil } diff --git a/vms/platformvm/state/merkle_state_helpers.go b/vms/platformvm/state/merkle_state_helpers.go index 708d50b932cb..d762c2e2ce83 100644 --- a/vms/platformvm/state/merkle_state_helpers.go +++ b/vms/platformvm/state/merkle_state_helpers.go @@ -30,7 +30,7 @@ type weightDiffKey struct { } func merkleSuppliesKeyPrefix() []byte { - prefix := make([]byte, 0, len(merkleSuppliesPrefix)) + prefix := make([]byte, len(merkleSuppliesPrefix)) copy(prefix, merkleSuppliesPrefix) return prefix } @@ -42,94 +42,90 @@ func merkleSuppliesKey(subnetID ids.ID) []byte { } func splitMerkleSuppliesKey(b []byte) ([]byte, ids.ID) { - prefix := b[:len(merkleSuppliesPrefix)] + prefix := make([]byte, len(merkleSuppliesPrefix)) + copy(prefix, b) subnetID := ids.Empty copy(subnetID[:], b[len(merkleSuppliesPrefix):]) return prefix, subnetID } func merklePermissionedSubnetKey(subnetID ids.ID) []byte { - key := make([]byte, 0, len(permissionedSubnetSectionPrefix)+len(subnetID[:])) + key := make([]byte, len(permissionedSubnetSectionPrefix), len(permissionedSubnetSectionPrefix)+len(subnetID[:])) copy(key, permissionedSubnetSectionPrefix) key = append(key, subnetID[:]...) return key } func merkleElasticSubnetKey(subnetID ids.ID) []byte { - key := make([]byte, 0, len(elasticSubnetSectionPrefix)+len(subnetID[:])) + key := make([]byte, len(elasticSubnetSectionPrefix), len(elasticSubnetSectionPrefix)+len(subnetID[:])) copy(key, elasticSubnetSectionPrefix) key = append(key, subnetID[:]...) return key } func merkleChainPrefix(subnetID ids.ID) []byte { - prefix := make([]byte, 0, len(chainsSectionPrefix)+len(subnetID[:])) + prefix := make([]byte, len(chainsSectionPrefix), len(chainsSectionPrefix)+len(subnetID[:])) copy(prefix, chainsSectionPrefix) prefix = append(prefix, subnetID[:]...) return prefix } func merkleChainKey(subnetID ids.ID, chainID ids.ID) []byte { - prefix := merkleChainPrefix(subnetID) - - key := make([]byte, 0, len(prefix)+len(chainID)) - copy(key, prefix) + key := merkleChainPrefix(subnetID) key = append(key, chainID[:]...) return key } func merkleUtxoIDKey(utxoID ids.ID) []byte { - key := make([]byte, 0, len(utxosSectionPrefix)+len(utxoID)) + key := make([]byte, len(utxosSectionPrefix), len(utxosSectionPrefix)+len(utxoID)) copy(key, utxosSectionPrefix) key = append(key, utxoID[:]...) return key } func merkleRewardUtxosIDPrefix(txID ids.ID) []byte { - prefix := make([]byte, 0, len(rewardUtxosSectionPrefix)+len(txID)) + prefix := make([]byte, len(rewardUtxosSectionPrefix), len(rewardUtxosSectionPrefix)+len(txID)) copy(prefix, rewardUtxosSectionPrefix) prefix = append(prefix, txID[:]...) return prefix } func merkleRewardUtxoIDKey(txID, utxoID ids.ID) []byte { - prefix := merkleRewardUtxosIDPrefix(txID) - key := make([]byte, 0, len(prefix)+len(utxoID)) - copy(key, prefix) + key := merkleRewardUtxosIDPrefix(txID) key = append(key, utxoID[:]...) return key } func merkleUtxoIndexKey(address []byte, utxoID ids.ID) []byte { - key := make([]byte, 0, len(address)+len(utxoID)) + key := make([]byte, len(address), len(address)+len(utxoID)) copy(key, address) key = append(key, utxoID[:]...) return key } func merkleLocalUptimesKey(nodeID ids.NodeID, subnetID ids.ID) []byte { - key := make([]byte, 0, len(nodeID)+len(subnetID)) + key := make([]byte, len(nodeID), len(nodeID)+len(subnetID)) copy(key, nodeID[:]) key = append(key, subnetID[:]...) return key } func merkleCurrentStakersKey(txID ids.ID) []byte { - key := make([]byte, 0, len(currentStakersSectionPrefix)+len(txID)) + key := make([]byte, len(currentStakersSectionPrefix), len(currentStakersSectionPrefix)+len(txID)) copy(key, currentStakersSectionPrefix) key = append(key, txID[:]...) return key } func merklePendingStakersKey(txID ids.ID) []byte { - key := make([]byte, 0, len(pendingStakersSectionPrefix)+len(txID)) + key := make([]byte, len(pendingStakersSectionPrefix), len(pendingStakersSectionPrefix)+len(txID)) copy(key, pendingStakersSectionPrefix) key = append(key, txID[:]...) return key } func merkleDelegateeRewardsKey(nodeID ids.NodeID, subnetID ids.ID) []byte { - key := make([]byte, 0, len(delegateeRewardsPrefix)+len(nodeID)+len(subnetID)) + key := make([]byte, len(delegateeRewardsPrefix), len(delegateeRewardsPrefix)+len(nodeID)+len(subnetID)) copy(key, delegateeRewardsPrefix) key = append(key, nodeID[:]...) key = append(key, subnetID[:]...) @@ -137,10 +133,11 @@ func merkleDelegateeRewardsKey(nodeID ids.NodeID, subnetID ids.ID) []byte { } func merkleWeightDiffKey(subnetID ids.ID, nodeID ids.NodeID, height uint64) []byte { - key := make([]byte, 0, len(nodeID)+len(subnetID)) // missing height part + packedHeight := database.PackUInt64(height) + key := make([]byte, 0, len(nodeID)+len(subnetID)+len(packedHeight)) key = append(key, subnetID[:]...) key = append(key, nodeID[:]...) - key = append(key, database.PackUInt64(height)...) + key = append(key, packedHeight...) return key } @@ -160,9 +157,10 @@ func splitMerkleWeightDiffKey(key []byte) (ids.ID, ids.NodeID, uint64, error) { } func merkleBlsKeytDiffKey(nodeID ids.NodeID, height uint64) []byte { - key := make([]byte, 0, len(nodeID)) // missing height part + packedHeight := database.PackUInt64(height) + key := make([]byte, 0, len(nodeID)+len(packedHeight)) key = append(key, nodeID[:]...) - key = append(key, database.PackUInt64(height)...) + key = append(key, packedHeight...) return key } diff --git a/vms/platformvm/state/merkle_state_test.go b/vms/platformvm/state/merkle_state_test.go new file mode 100644 index 000000000000..d8bdae2a7255 --- /dev/null +++ b/vms/platformvm/state/merkle_state_test.go @@ -0,0 +1,188 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package state + +import ( + "math/rand" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" +) + +func TestSuppliesKeyTest(t *testing.T) { + require := require.New(t) + subnetID := ids.GenerateTestID() + + key := merkleSuppliesKey(subnetID) + prefix, retrievedSubnetID := splitMerkleSuppliesKey(key) + + require.Equal(merkleSuppliesPrefix, prefix) + require.Equal(subnetID, retrievedSubnetID) +} + +func TestPermissionedSubnetKey(t *testing.T) { + require := require.New(t) + subnetID := ids.GenerateTestID() + prefix := permissionedSubnetSectionPrefix + + key := merklePermissionedSubnetKey(subnetID) + + require.Len(key, len(prefix)+len(subnetID[:])) + require.Equal(prefix, key[0:len(prefix)]) + require.Equal(subnetID[:], key[len(prefix):]) +} + +func TestElasticSubnetKey(t *testing.T) { + require := require.New(t) + subnetID := ids.GenerateTestID() + prefix := elasticSubnetSectionPrefix + + key := merkleElasticSubnetKey(subnetID) + + require.Len(key, len(prefix)+len(subnetID[:])) + require.Equal(prefix, key[0:len(prefix)]) + require.Equal(subnetID[:], key[len(prefix):]) +} + +func TestChainKey(t *testing.T) { + require := require.New(t) + subnetID := ids.GenerateTestID() + chainID := ids.GenerateTestID() + prefix := chainsSectionPrefix + + keyPrefix := merkleChainPrefix(subnetID) + key := merkleChainKey(subnetID, chainID) + + require.Len(keyPrefix, len(prefix)+len(subnetID[:])) + require.Equal(prefix, key[0:len(prefix)]) + require.Equal(subnetID[:], keyPrefix[len(prefix):]) + + require.Len(key, len(keyPrefix)+len(chainID[:])) + require.Equal(chainID[:], key[len(keyPrefix):]) +} + +func TestUtxoIDKey(t *testing.T) { + require := require.New(t) + utxoID := ids.GenerateTestID() + prefix := utxosSectionPrefix + + key := merkleUtxoIDKey(utxoID) + + require.Len(key, len(prefix)+len(utxoID[:])) + require.Equal(prefix, key[0:len(prefix)]) + require.Equal(utxoID[:], key[len(prefix):]) +} + +func TestRewardUtxoKey(t *testing.T) { + require := require.New(t) + txID := ids.GenerateTestID() + utxoID := ids.GenerateTestID() + prefix := rewardUtxosSectionPrefix + + keyPrefix := merkleRewardUtxosIDPrefix(txID) + key := merkleRewardUtxoIDKey(txID, utxoID) + + require.Len(keyPrefix, len(prefix)+len(txID[:])) + require.Equal(prefix, key[0:len(prefix)]) + require.Equal(txID[:], keyPrefix[len(prefix):]) + + require.Len(key, len(keyPrefix)+len(utxoID[:])) + require.Equal(utxoID[:], key[len(keyPrefix):]) +} + +func TestUtxosIndexKey(t *testing.T) { + require := require.New(t) + utxoID := ids.GenerateTestID() + + keys := secp256k1.TestKeys() + address := keys[1].PublicKey().Address().Bytes() + key := merkleUtxoIndexKey(address, utxoID) + + require.Len(key, len(address[:])+len(utxoID[:])) + require.Equal(address[:], key[0:len(address[:])]) + require.Equal(utxoID[:], key[len(address[:]):]) +} + +func TestLocalUptimesKey(t *testing.T) { + require := require.New(t) + nodeID := ids.GenerateTestNodeID() + subnetID := ids.GenerateTestID() + + key := merkleLocalUptimesKey(nodeID, subnetID) + + require.Len(key, len(nodeID[:])+len(subnetID[:])) + require.Equal(nodeID[:], key[0:len(nodeID[:])]) + require.Equal(subnetID[:], key[len(nodeID[:]):]) +} + +func TestCurrentStakersKey(t *testing.T) { + require := require.New(t) + stakerID := ids.GenerateTestID() + prefix := currentStakersSectionPrefix + + key := merkleCurrentStakersKey(stakerID) + + require.Len(key, len(prefix)+len(stakerID[:])) + require.Equal(prefix, key[0:len(prefix)]) + require.Equal(stakerID[:], key[len(prefix):]) +} + +func TestPendingStakersKey(t *testing.T) { + require := require.New(t) + stakerID := ids.GenerateTestID() + prefix := pendingStakersSectionPrefix + + key := merklePendingStakersKey(stakerID) + + require.Len(key, len(prefix)+len(stakerID[:])) + require.Equal(prefix, key[0:len(prefix)]) + require.Equal(stakerID[:], key[len(prefix):]) +} + +func TestDelegateeRewardsKey(t *testing.T) { + require := require.New(t) + prefix := delegateeRewardsPrefix + nodeID := ids.GenerateTestNodeID() + subnetID := ids.GenerateTestID() + + key := merkleDelegateeRewardsKey(nodeID, subnetID) + + require.Len(key, len(prefix)+len(nodeID[:])+len(subnetID[:])) + require.Equal(prefix, key[0:len(prefix[:])]) + require.Equal(nodeID[:], key[len(prefix[:]):len(prefix[:])+len(nodeID[:])]) + require.Equal(subnetID[:], key[len(prefix[:])+len(nodeID[:]):]) +} + +func TestWeightDiffKey(t *testing.T) { + require := require.New(t) + + subnetID := ids.GenerateTestID() + nodeID := ids.GenerateTestNodeID() + height := rand.Uint64() + + key := merkleWeightDiffKey(subnetID, nodeID, height) + rSubnetID, rNodeID, rHeight, err := splitMerkleWeightDiffKey(key) + + require.NoError(err) + require.Equal(subnetID, rSubnetID) + require.Equal(nodeID, rNodeID) + require.Equal(height, rHeight) +} + +func TestBlsKeyDiffKey(t *testing.T) { + require := require.New(t) + + nodeID := ids.GenerateTestNodeID() + height := rand.Uint64() + + key := merkleBlsKeytDiffKey(nodeID, height) + rNodeID, rHeight, err := splitMerkleBlsKeyDiffKey(key) + + require.NoError(err) + require.Equal(nodeID, rNodeID) + require.Equal(height, rHeight) +} diff --git a/vms/platformvm/vm.go b/vms/platformvm/vm.go index 0f7a264d1b6a..cef998042da1 100644 --- a/vms/platformvm/vm.go +++ b/vms/platformvm/vm.go @@ -143,16 +143,16 @@ func (vm *VM) Initialize( } rewards := reward.NewCalculator(vm.RewardConfig) - vm.state, err = state.New( + vm.state, err = state.NewMerkleState( vm.dbManager.Current().Database, + vm.metrics, genesisBytes, - registerer, &vm.Config, vm.ctx, - vm.metrics, + registerer, rewards, &vm.bootstrapped, - platformConfig.ChecksumsEnabled, + // platformConfig.ChecksumsEnabled, ) if err != nil { return err From 554d42f601077555e9989c378999ed7b55c8d31c Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Sat, 22 Jul 2023 12:46:12 +0200 Subject: [PATCH 061/132] wip: some more bug fixing --- vms/platformvm/state/merkle_state.go | 56 ++++++++++++++----- vms/platformvm/state/merkle_state_helpers.go | 8 +++ vms/platformvm/state/merkle_state_load_ops.go | 1 + vms/platformvm/state/merkle_state_test.go | 18 +++--- 4 files changed, 61 insertions(+), 22 deletions(-) diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index 53ab8b57d5bc..b7bb3fdb0069 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -4,6 +4,7 @@ package state import ( + "bytes" "context" "fmt" "time" @@ -318,6 +319,13 @@ func (ms *merkleState) GetCurrentValidator(subnetID ids.ID, nodeID ids.NodeID) ( func (ms *merkleState) PutCurrentValidator(staker *Staker) { ms.currentStakers.PutValidator(staker) + + // make sure that each new validator has an uptime entry + // merkleState implementation of SetUptime must not err + err := ms.SetUptime(staker.NodeID, staker.SubnetID, 0 /*duration*/, staker.StartTime) + if err != nil { + panic(err) + } } func (ms *merkleState) DeleteCurrentValidator(staker *Staker) { @@ -441,9 +449,9 @@ func (ms *merkleState) UTXOIDs(addr []byte, start ids.ID, limit int) ([]ids.ID, utxoIDs := []ids.ID(nil) for len(utxoIDs) < limit && iter.Next() { - utxoID, err := ids.ToID(iter.Key()) - if err != nil { - return nil, err + itAddr, utxoID := splitUtxoIndexKey(iter.Key()) + if !bytes.Equal(itAddr, addr) { + break } if utxoID == start { continue @@ -750,26 +758,47 @@ func (ms *merkleState) AddStatelessBlock(block blocks.Block) { // UPTIMES SECTION func (ms *merkleState) GetUptime(vdrID ids.NodeID, subnetID ids.ID) (upDuration time.Duration, lastUpdated time.Time, err error) { nodeUptimes, exists := ms.localUptimesCache[vdrID] - if !exists { - return 0, time.Time{}, database.ErrNotFound + if exists { + uptime, exists := nodeUptimes[subnetID] + if exists { + return uptime.Duration, uptime.lastUpdated, nil + } } - uptime, exists := nodeUptimes[subnetID] - if !exists { + + // try loading from DB + key := merkleLocalUptimesKey(vdrID, subnetID) + uptimeBytes, err := ms.localUptimesDB.Get(key) + switch err { + case nil: + upTm := &uptimes{} + if _, err := txs.GenesisCodec.Unmarshal(uptimeBytes, upTm); err != nil { + return 0, time.Time{}, err + } + upTm.lastUpdated = time.Unix(int64(upTm.LastUpdated), 0) + ms.localUptimesCache[vdrID] = make(map[ids.ID]*uptimes) + ms.localUptimesCache[vdrID][subnetID] = upTm + return upTm.Duration, upTm.lastUpdated, nil + + case database.ErrNotFound: + // no local data for this staker uptime return 0, time.Time{}, database.ErrNotFound + default: + return 0, time.Time{}, err } - - return uptime.Duration, uptime.lastUpdated, nil } func (ms *merkleState) SetUptime(vdrID ids.NodeID, subnetID ids.ID, upDuration time.Duration, lastUpdated time.Time) error { nodeUptimes, exists := ms.localUptimesCache[vdrID] if !exists { - nodeUptimes = map[ids.ID]*uptimes{} + nodeUptimes = make(map[ids.ID]*uptimes) ms.localUptimesCache[vdrID] = nodeUptimes } - nodeUptimes[subnetID].Duration = upDuration - nodeUptimes[subnetID].lastUpdated = lastUpdated + nodeUptimes[subnetID] = &uptimes{ + Duration: upDuration, + LastUpdated: uint64(lastUpdated.Unix()), + lastUpdated: lastUpdated, + } // track diff updatedNodeUptimes, ok := ms.modifiedLocalUptimes[vdrID] @@ -782,7 +811,7 @@ func (ms *merkleState) SetUptime(vdrID ids.NodeID, subnetID ids.ID, upDuration t } func (ms *merkleState) GetStartTime(nodeID ids.NodeID, subnetID ids.ID) (time.Time, error) { - staker, err := ms.currentStakers.GetValidator(subnetID, nodeID) + staker, err := ms.GetCurrentValidator(subnetID, nodeID) if err != nil { return time.Time{}, err } @@ -1351,7 +1380,6 @@ func (ms *merkleState) writeLocalUptimes() error { key := merkleLocalUptimesKey(vdrID, subnetID) uptimes := ms.localUptimesCache[vdrID][subnetID] - uptimes.LastUpdated = uint64(uptimes.lastUpdated.Unix()) uptimeBytes, err := txs.GenesisCodec.Marshal(txs.Version, uptimes) if err != nil { return err diff --git a/vms/platformvm/state/merkle_state_helpers.go b/vms/platformvm/state/merkle_state_helpers.go index d762c2e2ce83..038f586e4da6 100644 --- a/vms/platformvm/state/merkle_state_helpers.go +++ b/vms/platformvm/state/merkle_state_helpers.go @@ -103,6 +103,14 @@ func merkleUtxoIndexKey(address []byte, utxoID ids.ID) []byte { return key } +func splitUtxoIndexKey(b []byte) ([]byte, ids.ID) { + utxoID := ids.Empty + address := make([]byte, len(b)-len(utxoID)) + copy(address, b[:len(address)]) + copy(utxoID[:], b[len(address):]) + return address, utxoID +} + func merkleLocalUptimesKey(nodeID ids.NodeID, subnetID ids.ID) []byte { key := make([]byte, len(nodeID), len(nodeID)+len(subnetID)) copy(key, nodeID[:]) diff --git a/vms/platformvm/state/merkle_state_load_ops.go b/vms/platformvm/state/merkle_state_load_ops.go index 0554286f7a16..2529bedbdfe6 100644 --- a/vms/platformvm/state/merkle_state_load_ops.go +++ b/vms/platformvm/state/merkle_state_load_ops.go @@ -230,6 +230,7 @@ func (ms *merkleState) loadCurrentStakers() error { return err } if staker.Priority.IsValidator() { + // TODO: why not PutValidator/PutDelegator?? validator := ms.currentStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID) validator.validator = staker ms.currentStakers.stakers.ReplaceOrInsert(staker) diff --git a/vms/platformvm/state/merkle_state_test.go b/vms/platformvm/state/merkle_state_test.go index d8bdae2a7255..93708f16ade0 100644 --- a/vms/platformvm/state/merkle_state_test.go +++ b/vms/platformvm/state/merkle_state_test.go @@ -100,11 +100,13 @@ func TestUtxosIndexKey(t *testing.T) { keys := secp256k1.TestKeys() address := keys[1].PublicKey().Address().Bytes() + key := merkleUtxoIndexKey(address, utxoID) + rAddress, rUtxoID := splitUtxoIndexKey(key) - require.Len(key, len(address[:])+len(utxoID[:])) - require.Equal(address[:], key[0:len(address[:])]) - require.Equal(utxoID[:], key[len(address[:]):]) + require.Len(key, len(address)+len(utxoID[:])) + require.Equal(address, rAddress) + require.Equal(utxoID, rUtxoID) } func TestLocalUptimesKey(t *testing.T) { @@ -152,9 +154,9 @@ func TestDelegateeRewardsKey(t *testing.T) { key := merkleDelegateeRewardsKey(nodeID, subnetID) require.Len(key, len(prefix)+len(nodeID[:])+len(subnetID[:])) - require.Equal(prefix, key[0:len(prefix[:])]) - require.Equal(nodeID[:], key[len(prefix[:]):len(prefix[:])+len(nodeID[:])]) - require.Equal(subnetID[:], key[len(prefix[:])+len(nodeID[:]):]) + require.Equal(prefix, key[0:len(prefix)]) + require.Equal(nodeID[:], key[len(prefix):len(prefix)+len(nodeID[:])]) + require.Equal(subnetID[:], key[len(prefix)+len(nodeID[:]):]) } func TestWeightDiffKey(t *testing.T) { @@ -162,7 +164,7 @@ func TestWeightDiffKey(t *testing.T) { subnetID := ids.GenerateTestID() nodeID := ids.GenerateTestNodeID() - height := rand.Uint64() + height := rand.Uint64() // #nosec G404 key := merkleWeightDiffKey(subnetID, nodeID, height) rSubnetID, rNodeID, rHeight, err := splitMerkleWeightDiffKey(key) @@ -177,7 +179,7 @@ func TestBlsKeyDiffKey(t *testing.T) { require := require.New(t) nodeID := ids.GenerateTestNodeID() - height := rand.Uint64() + height := rand.Uint64() // #nosec G404 key := merkleBlsKeytDiffKey(nodeID, height) rNodeID, rHeight, err := splitMerkleBlsKeyDiffKey(key) From d07531d8b656d776621b241da72e125401f697b9 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Sat, 22 Jul 2023 13:36:41 +0200 Subject: [PATCH 062/132] nit --- vms/platformvm/state/merkle_state.go | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index b7bb3fdb0069..c883a1fa3dd3 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -287,6 +287,11 @@ type merkleState struct { addedChains map[ids.ID][]*txs.Tx // maps subnetID -> the newly added chains to the subnet chainCache cache.Cacher[ids.ID, []*txs.Tx] // cache of subnetID -> the chains after all local modifications []*txs.Tx + // Blocks section + addedBlocks map[ids.ID]blocks.Block // map of blockID -> Block + blockCache cache.Cacher[ids.ID, blocks.Block] // cache of blockID -> Block. If the entry is nil, it is not in the database + blockDB database.Database + // Txs section // FIND a way to reduce use of these. No use in verification of addedTxs // a limited windows to support APIs @@ -294,11 +299,6 @@ type merkleState struct { txCache cache.Cacher[ids.ID, *txAndStatus] // txID -> {*txs.Tx, Status}. If the entry is nil, it isn't in the database txDB database.Database - // Blocks section - addedBlocks map[ids.ID]blocks.Block // map of blockID -> Block - blockCache cache.Cacher[ids.ID, blocks.Block] // cache of blockID -> Block. If the entry is nil, it is not in the database - blockDB database.Database - indexedUTXOsDB database.Database localUptimesCache map[ids.NodeID]map[ids.ID]*uptimes // vdrID -> subnetID -> metadata @@ -931,6 +931,8 @@ func (*merkleState) Checksum() ids.ID { func (ms *merkleState) Close() error { errs := wrappers.Errs{} errs.Add( + ms.localBlsKeyDiffDB.Close(), + ms.localWeightDiffDB.Close(), ms.localUptimesDB.Close(), ms.indexedUTXOsDB.Close(), ms.txDB.Close(), From b43061c30aba79001f99d1e782ccb5590b757a10 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Sat, 22 Jul 2023 15:31:49 +0200 Subject: [PATCH 063/132] wip: some more fixes --- vms/platformvm/state/merkle_state.go | 18 +++++++++++------- vms/platformvm/state/merkle_state_helpers.go | 9 +++++++-- 2 files changed, 18 insertions(+), 9 deletions(-) diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index c883a1fa3dd3..fb28e6802c0e 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -206,7 +206,7 @@ func NewMerkleState( suppliesCache: suppliesCache, addedPermissionedSubnets: make([]*txs.Tx, 0), - permissionedSubnetCache: make([]*txs.Tx, 0), + permissionedSubnetCache: nil, // created first time GetSubnets is called addedElasticSubnets: make(map[ids.ID]*txs.Tx), elasticSubnetCache: transformedSubnetCache, @@ -442,9 +442,12 @@ func (ms *merkleState) GetUTXO(utxoID ids.ID) (*avax.UTXO, error) { } func (ms *merkleState) UTXOIDs(addr []byte, start ids.ID, limit int) ([]ids.ID, error) { - key := merkleUtxoIndexKey(addr, start) + var ( + prefix = merkleUtxoIndexPrefix(addr) + key = merkleUtxoIndexKey(addr, start) + ) - iter := ms.indexedUTXOsDB.NewIteratorWithStart(key) + iter := ms.indexedUTXOsDB.NewIteratorWithStartAndPrefix(key, prefix) defer iter.Release() utxoIDs := []ids.ID(nil) @@ -1235,25 +1238,26 @@ func (ms *merkleState) writeUTXOs(view merkledb.TrieView, ctx context.Context) e delete(ms.modifiedUTXOs, utxoID) key := merkleUtxoIDKey(utxoID) if utxo == nil { // delete the UTXO - switch _, err := ms.GetUTXO(utxoID); err { + switch utxo, err := ms.GetUTXO(utxoID); err { case nil: ms.utxoCache.Put(utxoID, nil) if err := view.Remove(ctx, key); err != nil { return err } - // store the index if err := ms.writeUTXOsIndex(utxo, false /*insertUtxo*/); err != nil { return err } + // go process next utxo + continue case database.ErrNotFound: - return nil + // trying to delete a non-existing utxo. + continue default: return err } - continue } // insert the UTXO diff --git a/vms/platformvm/state/merkle_state_helpers.go b/vms/platformvm/state/merkle_state_helpers.go index 038f586e4da6..9b7b259a6544 100644 --- a/vms/platformvm/state/merkle_state_helpers.go +++ b/vms/platformvm/state/merkle_state_helpers.go @@ -96,9 +96,14 @@ func merkleRewardUtxoIDKey(txID, utxoID ids.ID) []byte { return key } +func merkleUtxoIndexPrefix(address []byte) []byte { + prefix := make([]byte, len(address)) + copy(prefix, address) + return prefix +} + func merkleUtxoIndexKey(address []byte, utxoID ids.ID) []byte { - key := make([]byte, len(address), len(address)+len(utxoID)) - copy(key, address) + key := merkleUtxoIndexPrefix(address) key = append(key, utxoID[:]...) return key } From 46d995236901be4c07add83f0e16bc7b92be79ac Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Sat, 22 Jul 2023 16:08:41 +0200 Subject: [PATCH 064/132] wip: some more fixes --- vms/platformvm/state/merkle_state.go | 42 +++++++++++++++++++--------- 1 file changed, 29 insertions(+), 13 deletions(-) diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index fb28e6802c0e..a6420cb1b446 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -378,13 +378,28 @@ func (ms *merkleState) GetPendingStakerIterator() (StakerIterator, error) { func (ms *merkleState) GetDelegateeReward(subnetID ids.ID, vdrID ids.NodeID) (uint64, error) { nodeDelegateeRewards, exists := ms.delegateeRewardCache[vdrID] - if !exists { - return 0, database.ErrNotFound + if exists { + delegateeReward, exists := nodeDelegateeRewards[subnetID] + if exists { + return delegateeReward, nil + } } - delegateeReward, exists := nodeDelegateeRewards[subnetID] - if !exists { - return 0, database.ErrNotFound + + // try loading from the db + key := merkleDelegateeRewardsKey(vdrID, subnetID) + amountBytes, err := ms.merkleDB.Get(key) + if err != nil { + return 0, err } + delegateeReward, err := database.ParseUInt64(amountBytes) + if err != nil { + return 0, err + } + + if _, found := ms.delegateeRewardCache[vdrID]; !found { + ms.delegateeRewardCache[vdrID] = make(map[ids.ID]uint64) + } + ms.delegateeRewardCache[vdrID][subnetID] = delegateeReward return delegateeReward, nil } @@ -992,6 +1007,12 @@ func (ms *merkleState) processCurrentStakers() ( } outputValSet[weightKey] = validatorDiff + // make sure there is an entry for delegators even in case + // there are no validators modified. + outputWeights[weightKey] = &ValidatorWeightDiff{ + Decrease: validatorDiff.validatorStatus == deleted, + } + switch validatorDiff.validatorStatus { case added: var ( @@ -1008,10 +1029,7 @@ func (ms *merkleState) processCurrentStakers() ( TxBytes: tx.Bytes(), PotentialReward: potentialReward, } - outputWeights[weightKey] = &ValidatorWeightDiff{ - Decrease: false, - Amount: weight, - } + outputWeights[weightKey].Amount = weight case deleted: var ( @@ -1023,10 +1041,8 @@ func (ms *merkleState) processCurrentStakers() ( outputStakers[txID] = &stakersData{ TxBytes: nil, } - outputWeights[weightKey] = &ValidatorWeightDiff{ - Decrease: true, - Amount: weight, - } + outputWeights[weightKey].Amount = weight + if blkKey != nil { outputBlsKey[nodeID] = blkKey } From 669ad939aaab036a031ba22b16b71b72bda5bcf9 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Sat, 22 Jul 2023 19:00:23 +0200 Subject: [PATCH 065/132] wip: some more fixes --- vms/platformvm/state/merkle_state.go | 22 ++++++++++++++++---- vms/platformvm/state/merkle_state_helpers.go | 2 +- vms/platformvm/vm_regression_test.go | 16 ++++++-------- 3 files changed, 25 insertions(+), 15 deletions(-) diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index a6420cb1b446..847918a6739b 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -1223,12 +1223,19 @@ func (*merkleState) writeCurrentStakers(view merkledb.TrieView, ctx context.Cont for stakerTxID, data := range currentData { key := merkleCurrentStakersKey(stakerTxID) + if data.TxBytes == nil { + if err := view.Remove(ctx, key); err != nil { + return fmt.Errorf("failed to remove current stakers data, stakerTxID %v: %w", stakerTxID, err) + } + continue + } + dataBytes, err := txs.GenesisCodec.Marshal(txs.Version, data) if err != nil { - return fmt.Errorf("failed to serialize current stakers data, stakerTxID%v : %w", stakerTxID, err) + return fmt.Errorf("failed to serialize current stakers data, stakerTxID %v: %w", stakerTxID, err) } if err := view.Insert(ctx, key, dataBytes); err != nil { - return fmt.Errorf("failed to write current stakers data, stakerTxID%v : %w", stakerTxID, err) + return fmt.Errorf("failed to write current stakers data, stakerTxID %v: %w", stakerTxID, err) } } return nil @@ -1238,12 +1245,19 @@ func (*merkleState) writePendingStakers(view merkledb.TrieView, ctx context.Cont for stakerTxID, data := range pendingData { key := merklePendingStakersKey(stakerTxID) + if data.TxBytes == nil { + if err := view.Remove(ctx, key); err != nil { + return fmt.Errorf("failed to write pending stakers data, stakerTxID %v: %w", stakerTxID, err) + } + continue + } + dataBytes, err := txs.GenesisCodec.Marshal(txs.Version, data) if err != nil { - return fmt.Errorf("failed to serialize pending stakers data, stakerTxID%v : %w", stakerTxID, err) + return fmt.Errorf("failed to serialize pending stakers data, stakerTxID %v: %w", stakerTxID, err) } if err := view.Insert(ctx, key, dataBytes); err != nil { - return fmt.Errorf("failed to write pending stakers data, stakerTxID%v : %w", stakerTxID, err) + return fmt.Errorf("failed to write pending stakers data, stakerTxID %v: %w", stakerTxID, err) } } return nil diff --git a/vms/platformvm/state/merkle_state_helpers.go b/vms/platformvm/state/merkle_state_helpers.go index 9b7b259a6544..359bc17cb0cc 100644 --- a/vms/platformvm/state/merkle_state_helpers.go +++ b/vms/platformvm/state/merkle_state_helpers.go @@ -20,7 +20,7 @@ type uptimes struct { } type stakersData struct { - TxBytes []byte `serialize:"true"` + TxBytes []byte `serialize:"true"` // nit signals remove PotentialReward uint64 `serialize:"true"` } diff --git a/vms/platformvm/vm_regression_test.go b/vms/platformvm/vm_regression_test.go index 1969478c61e2..f451dd44cf01 100644 --- a/vms/platformvm/vm_regression_test.go +++ b/vms/platformvm/vm_regression_test.go @@ -44,8 +44,6 @@ import ( blockexecutor "github.com/ava-labs/avalanchego/vms/platformvm/blocks/executor" ) -const trackChecksum = false - func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { require := require.New(t) vm, _, _ := defaultVM(t) @@ -655,16 +653,15 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { // Force a reload of the state from the database. vm.Config.Validators = validators.NewManager() vm.Config.Validators.Add(constants.PrimaryNetworkID, validators.NewSet()) - is, err := state.New( + is, err := state.NewMerkleState( vm.dbManager.Current().Database, + metrics.Noop, nil, - prometheus.NewRegistry(), &vm.Config, vm.ctx, - metrics.Noop, + prometheus.NewRegistry(), reward.NewCalculator(vm.Config.RewardConfig), &utils.Atomic[bool]{}, - trackChecksum, ) require.NoError(err) vm.state = is @@ -965,16 +962,15 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { // Force a reload of the state from the database. vm.Config.Validators = validators.NewManager() vm.Config.Validators.Add(constants.PrimaryNetworkID, validators.NewSet()) - is, err := state.New( + is, err := state.NewMerkleState( vm.dbManager.Current().Database, + metrics.Noop, nil, - prometheus.NewRegistry(), &vm.Config, vm.ctx, - metrics.Noop, + prometheus.NewRegistry(), reward.NewCalculator(vm.Config.RewardConfig), &utils.Atomic[bool]{}, - trackChecksum, ) require.NoError(err) vm.state = is From cc22caad118841f146f4231e0dba5a26ebdccad4 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Sat, 22 Jul 2023 19:20:23 +0200 Subject: [PATCH 066/132] wip: some more fixes --- vms/platformvm/state/merkle_state.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index 847918a6739b..444a5edf8950 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -321,11 +321,16 @@ func (ms *merkleState) PutCurrentValidator(staker *Staker) { ms.currentStakers.PutValidator(staker) // make sure that each new validator has an uptime entry - // merkleState implementation of SetUptime must not err + // and a delegatee reward entry. MerkleState implementations + // of SetUptime and SetDelegateeReward must not err err := ms.SetUptime(staker.NodeID, staker.SubnetID, 0 /*duration*/, staker.StartTime) if err != nil { panic(err) } + err = ms.SetDelegateeReward(staker.SubnetID, staker.NodeID, 0) + if err != nil { + panic(err) + } } func (ms *merkleState) DeleteCurrentValidator(staker *Staker) { From c2118a30120f67e6b7eee8b1d43a9d5455797235 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Sat, 22 Jul 2023 21:12:41 +0200 Subject: [PATCH 067/132] wip: replaced state.state with merkleState --- vms/platformvm/blocks/builder/helpers_test.go | 7 +++---- vms/platformvm/blocks/executor/helpers_test.go | 7 +++---- vms/platformvm/state/merkle_state.go | 4 ++++ vms/platformvm/state/stakers_helpers_test.go | 7 +++---- vms/platformvm/state/stakers_model_storage_test.go | 2 +- vms/platformvm/txs/executor/helpers_test.go | 7 +++---- vms/platformvm/vm_regression_test.go | 1 - 7 files changed, 17 insertions(+), 18 deletions(-) diff --git a/vms/platformvm/blocks/builder/helpers_test.go b/vms/platformvm/blocks/builder/helpers_test.go index 685247eb007e..87639e72a03b 100644 --- a/vms/platformvm/blocks/builder/helpers_test.go +++ b/vms/platformvm/blocks/builder/helpers_test.go @@ -230,16 +230,15 @@ func defaultState( require := require.New(t) genesisBytes := buildGenesisTest(t, ctx) - state, err := state.New( + state, err := state.NewMerkleState( db, + metrics.Noop, genesisBytes, - prometheus.NewRegistry(), cfg, ctx, - metrics.Noop, + prometheus.NewRegistry(), rewards, &utils.Atomic[bool]{}, - trackChecksum, ) require.NoError(err) diff --git a/vms/platformvm/blocks/executor/helpers_test.go b/vms/platformvm/blocks/executor/helpers_test.go index d6b9b0e36e04..a9d2b5572153 100644 --- a/vms/platformvm/blocks/executor/helpers_test.go +++ b/vms/platformvm/blocks/executor/helpers_test.go @@ -269,16 +269,15 @@ func defaultState( rewards reward.Calculator, ) state.State { genesisBytes := buildGenesisTest(ctx) - state, err := state.New( + state, err := state.NewMerkleState( db, + metrics.Noop, genesisBytes, - prometheus.NewRegistry(), cfg, ctx, - metrics.Noop, + prometheus.NewRegistry(), rewards, &utils.Atomic[bool]{}, - trackChecksum, ) if err != nil { panic(err) diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index 444a5edf8950..428797fe9d0a 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -1498,6 +1498,10 @@ func (ms *merkleState) updateValidatorSet( if subnetID != constants.PrimaryNetworkID && !ms.cfg.TrackedSubnets.Contains(subnetID) { continue } + if weightDiff.Amount == 0 { + // No weight change to record; go to next validator. + continue + } if weightDiff.Decrease { err = validators.RemoveWeight(ms.cfg.Validators, subnetID, nodeID, weightDiff.Amount) diff --git a/vms/platformvm/state/stakers_helpers_test.go b/vms/platformvm/state/stakers_helpers_test.go index 0c8a1edf5546..e199f2af53e0 100644 --- a/vms/platformvm/state/stakers_helpers_test.go +++ b/vms/platformvm/state/stakers_helpers_test.go @@ -73,16 +73,15 @@ func buildChainState(baseDB database.Database, trackedSubnets []ids.ID) (State, } rewardsCalc := reward.NewCalculator(cfg.RewardConfig) - return New( + return NewMerkleState( baseDB, + metrics.Noop, genesisBytes, - prometheus.NewRegistry(), cfg, ctx, - metrics.Noop, + prometheus.NewRegistry(), rewardsCalc, &utils.Atomic[bool]{}, - trackChecksum, ) } diff --git a/vms/platformvm/state/stakers_model_storage_test.go b/vms/platformvm/state/stakers_model_storage_test.go index e17fbe96ac4c..edde31bd0fce 100644 --- a/vms/platformvm/state/stakers_model_storage_test.go +++ b/vms/platformvm/state/stakers_model_storage_test.go @@ -877,7 +877,7 @@ func checkSystemAndModelContent(cmdState commands.State, res commands.Result) bo // makes sure they are coherent. func checkValidatorSetContent(res commands.Result) bool { sys := res.(*sysUnderTest) - valSet := sys.baseState.(*state).cfg.Validators + valSet := sys.baseState.(*merkleState).cfg.Validators sysIt, err := sys.baseState.GetCurrentStakerIterator() if err != nil { diff --git a/vms/platformvm/txs/executor/helpers_test.go b/vms/platformvm/txs/executor/helpers_test.go index 732dce70f98e..c46af74d5f7b 100644 --- a/vms/platformvm/txs/executor/helpers_test.go +++ b/vms/platformvm/txs/executor/helpers_test.go @@ -221,16 +221,15 @@ func defaultState( rewards reward.Calculator, ) state.State { genesisBytes := buildGenesisTest(ctx) - state, err := state.New( + state, err := state.NewMerkleState( db, + metrics.Noop, genesisBytes, - prometheus.NewRegistry(), cfg, ctx, - metrics.Noop, + prometheus.NewRegistry(), rewards, &utils.Atomic[bool]{}, - trackChecksum, ) if err != nil { panic(err) diff --git a/vms/platformvm/vm_regression_test.go b/vms/platformvm/vm_regression_test.go index f451dd44cf01..3e29e82c57a9 100644 --- a/vms/platformvm/vm_regression_test.go +++ b/vms/platformvm/vm_regression_test.go @@ -1457,7 +1457,6 @@ func TestSubnetValidatorBLSKeyDiffAfterExpiry(t *testing.T) { vm.ctx.Lock.Lock() defer func() { require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() }() subnetID := testSubnet1.TxID From b24702eb47562ff74473f3d64539d51e24374b6f Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Sun, 23 Jul 2023 13:23:57 +0200 Subject: [PATCH 068/132] some more fixes --- vms/platformvm/state/merkle_state.go | 21 +++++++++----------- vms/platformvm/state/merkle_state_helpers.go | 16 ++++++++------- vms/platformvm/state/merkle_state_test.go | 3 +-- vms/platformvm/vm_regression_test.go | 15 +++++++++++--- 4 files changed, 31 insertions(+), 24 deletions(-) diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index 428797fe9d0a..78d56b89317f 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -906,20 +906,17 @@ func (ms *merkleState) GetValidatorPublicKeyDiffs(height uint64) (map[ids.NodeID iter := ms.localBlsKeyDiffDB.NewIterator() defer iter.Release() for iter.Next() { - nodeID, retrievedHeight, err := splitMerkleBlsKeyDiffKey(iter.Key()) - switch { - case err != nil: - return nil, err - case retrievedHeight != height: + nodeID, retrievedHeight := splitMerkleBlsKeyDiffKey(iter.Key()) + if retrievedHeight != height { continue // loop them all, we'll worry about efficiency after correctness - default: - pkBytes := iter.Value() - val, err := bls.PublicKeyFromBytes(pkBytes) - if err != nil { - return nil, err - } - res[nodeID] = val } + + pkBytes := iter.Value() + val, err := bls.PublicKeyFromBytes(pkBytes) + if err != nil { + return nil, err + } + res[nodeID] = val } return res, iter.Error() } diff --git a/vms/platformvm/state/merkle_state_helpers.go b/vms/platformvm/state/merkle_state_helpers.go index 359bc17cb0cc..994725c74629 100644 --- a/vms/platformvm/state/merkle_state_helpers.go +++ b/vms/platformvm/state/merkle_state_helpers.go @@ -4,6 +4,7 @@ package state import ( + "encoding/binary" "time" "github.com/ava-labs/avalanchego/database" @@ -170,20 +171,21 @@ func splitMerkleWeightDiffKey(key []byte) (ids.ID, ids.NodeID, uint64, error) { } func merkleBlsKeytDiffKey(nodeID ids.NodeID, height uint64) []byte { - packedHeight := database.PackUInt64(height) - key := make([]byte, 0, len(nodeID)+len(packedHeight)) - key = append(key, nodeID[:]...) - key = append(key, packedHeight...) + key := make([]byte, len(nodeID)+database.Uint64Size) + copy(key, nodeID[:]) + binary.BigEndian.PutUint64(key[len(nodeID):], ^height) return key } // TODO: remove when ValidatorDiff optimization is merged in -func splitMerkleBlsKeyDiffKey(key []byte) (ids.NodeID, uint64, error) { +func splitMerkleBlsKeyDiffKey(key []byte) (ids.NodeID, uint64) { nodeIDLenght := 20 nodeID := ids.EmptyNodeID copy(nodeID[:], key[0:nodeIDLenght]) - height, err := database.ParseUInt64(key[nodeIDLenght:]) - return nodeID, height, err + // Because we bit flip the height when constructing the key, we must + // remember to bip flip again here. + height := ^binary.BigEndian.Uint64(key[nodeIDLenght:]) + return nodeID, height } diff --git a/vms/platformvm/state/merkle_state_test.go b/vms/platformvm/state/merkle_state_test.go index 93708f16ade0..1390a0d8e5e5 100644 --- a/vms/platformvm/state/merkle_state_test.go +++ b/vms/platformvm/state/merkle_state_test.go @@ -182,9 +182,8 @@ func TestBlsKeyDiffKey(t *testing.T) { height := rand.Uint64() // #nosec G404 key := merkleBlsKeytDiffKey(nodeID, height) - rNodeID, rHeight, err := splitMerkleBlsKeyDiffKey(key) + rNodeID, rHeight := splitMerkleBlsKeyDiffKey(key) - require.NoError(err) require.Equal(nodeID, rNodeID) require.Equal(height, rHeight) } diff --git a/vms/platformvm/vm_regression_test.go b/vms/platformvm/vm_regression_test.go index 3e29e82c57a9..31cede39fb14 100644 --- a/vms/platformvm/vm_regression_test.go +++ b/vms/platformvm/vm_regression_test.go @@ -4,6 +4,7 @@ package platformvm import ( + "bytes" "context" "errors" "testing" @@ -2130,9 +2131,17 @@ func checkValidatorBlsKeyIsSet( if !found { return database.ErrNotFound } - if val.PublicKey != expectedBlsKey { + switch { + case expectedBlsKey == nil && val.PublicKey == nil: + return nil + case expectedBlsKey != nil && val.PublicKey == nil: return errors.New("unexpected BLS key") + case expectedBlsKey == nil && val.PublicKey != nil: + return errors.New("unexpected BLS key") + default: + if !bytes.Equal(bls.PublicKeyToBytes(val.PublicKey), bls.PublicKeyToBytes(expectedBlsKey)) { + return errors.New("unexpected BLS key") + } + return nil } - - return nil } From 94e413bcdcbf22ef49673a2968e36b046da5ed4f Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Sun, 23 Jul 2023 14:32:37 +0200 Subject: [PATCH 069/132] fix panic --- vms/platformvm/state/merkle_state.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index 9794ee8cd7b3..f6bc4df54ba5 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -1199,13 +1199,13 @@ func (ms *merkleState) writePermissionedSubnets(view merkledb.TrieView, ctx cont } func (ms *merkleState) writeElasticSubnets(view merkledb.TrieView, ctx context.Context) error { - for _, subnetTx := range ms.addedElasticSubnets { + for subnetID, subnetTx := range ms.addedElasticSubnets { key := merkleElasticSubnetKey(subnetTx.ID()) if err := view.Insert(ctx, key, subnetTx.Bytes()); err != nil { return fmt.Errorf("failed to write subnetTx: %w", err) } + delete(ms.addedElasticSubnets, subnetID) } - ms.addedElasticSubnets = nil return nil } From 76d09a31a46d996c5996538439025f50c74db354 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Sun, 23 Jul 2023 17:02:40 +0200 Subject: [PATCH 070/132] nits --- vms/platformvm/state/merkle_state.go | 5 +++++ vms/platformvm/txs/executor/staker_tx_verification.go | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index f6bc4df54ba5..a789a8a39fdd 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -1205,6 +1205,11 @@ func (ms *merkleState) writeElasticSubnets(view merkledb.TrieView, ctx context.C return fmt.Errorf("failed to write subnetTx: %w", err) } delete(ms.addedElasticSubnets, subnetID) + + // Note: Evict is used rather than Put here because tx may end up + // referencing additional data (because of shared byte slices) that + // would not be properly accounted for in the cache sizing. + ms.elasticSubnetCache.Evict(subnetID) } return nil } diff --git a/vms/platformvm/txs/executor/staker_tx_verification.go b/vms/platformvm/txs/executor/staker_tx_verification.go index 304c2d73edf8..85bc8e9103e0 100644 --- a/vms/platformvm/txs/executor/staker_tx_verification.go +++ b/vms/platformvm/txs/executor/staker_tx_verification.go @@ -468,7 +468,7 @@ func verifyAddPermissionlessValidatorTx( validatorRules, err := getValidatorRules(backend, chainState, tx.Subnet) if err != nil { - return err + return fmt.Errorf("failed retrieving validator rules: %w", err) } duration := tx.Validator.Duration() From 0edc2f49c92235f80558e242b3222f000d72421f Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Sun, 23 Jul 2023 17:38:39 +0200 Subject: [PATCH 071/132] wip: fixed ginko tests --- vms/platformvm/state/diff.go | 7 ++----- vms/platformvm/state/merkle_state.go | 11 +++++------ 2 files changed, 7 insertions(+), 11 deletions(-) diff --git a/vms/platformvm/state/diff.go b/vms/platformvm/state/diff.go index 568515b02769..6ce07bd57964 100644 --- a/vms/platformvm/state/diff.go +++ b/vms/platformvm/state/diff.go @@ -309,12 +309,9 @@ func (d *diff) GetSubnetTransformation(subnetID ids.ID) (*txs.Tx, error) { func (d *diff) AddSubnetTransformation(transformSubnetTxIntf *txs.Tx) { transformSubnetTx := transformSubnetTxIntf.Unsigned.(*txs.TransformSubnetTx) if d.transformedSubnets == nil { - d.transformedSubnets = map[ids.ID]*txs.Tx{ - transformSubnetTx.Subnet: transformSubnetTxIntf, - } - } else { - d.transformedSubnets[transformSubnetTx.Subnet] = transformSubnetTxIntf + d.transformedSubnets = make(map[ids.ID]*txs.Tx) } + d.transformedSubnets[transformSubnetTx.Subnet] = transformSubnetTxIntf } func (d *diff) GetChains(subnetID ids.ID) ([]*txs.Tx, error) { diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index a789a8a39fdd..77613cb7cf12 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -632,11 +632,10 @@ func (ms *merkleState) GetSubnetTransformation(subnetID ids.ID) (*txs.Tx, error) } key := merkleElasticSubnetKey(subnetID) - - transformSubnetTxID, err := database.GetID(ms.merkleDB, key) + transformSubnetTxBytes, err := ms.merkleDB.Get(key) switch err { case nil: - transformSubnetTx, _, err := ms.GetTx(transformSubnetTxID) + transformSubnetTx, err := txs.Parse(txs.GenesisCodec, transformSubnetTxBytes) if err != nil { return nil, err } @@ -1199,9 +1198,9 @@ func (ms *merkleState) writePermissionedSubnets(view merkledb.TrieView, ctx cont } func (ms *merkleState) writeElasticSubnets(view merkledb.TrieView, ctx context.Context) error { - for subnetID, subnetTx := range ms.addedElasticSubnets { - key := merkleElasticSubnetKey(subnetTx.ID()) - if err := view.Insert(ctx, key, subnetTx.Bytes()); err != nil { + for subnetID, transforkSubnetTx := range ms.addedElasticSubnets { + key := merkleElasticSubnetKey(subnetID) + if err := view.Insert(ctx, key, transforkSubnetTx.Bytes()); err != nil { return fmt.Errorf("failed to write subnetTx: %w", err) } delete(ms.addedElasticSubnets, subnetID) From b7e84b9c1f7633b8b59508e8256c2cf91eed468d Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Mon, 24 Jul 2023 09:04:56 +0200 Subject: [PATCH 072/132] completed UTs rotation from state.state to state.merklestate --- vms/platformvm/state/merkle_state.go | 51 ++++++--- vms/platformvm/state/state_test.go | 153 +++++++++++++++++++-------- 2 files changed, 147 insertions(+), 57 deletions(-) diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index 77613cb7cf12..9b2ad8762419 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -81,6 +81,39 @@ func NewMerkleState( rewards reward.Calculator, bootstrapped *utils.Atomic[bool], ) (State, error) { + res, err := newMerklsState( + rawDB, + metrics, + cfg, + execCfg, + ctx, + metricsReg, + rewards, + bootstrapped, + ) + if err != nil { + return nil, err + } + + if err := res.sync(genesisBytes); err != nil { + // Drop any errors on close to return the first error + _ = res.Close() + return nil, err + } + + return res, nil +} + +func newMerklsState( + rawDB database.Database, + metrics metrics.Metrics, + cfg *config.Config, + execCfg *config.ExecutionConfig, + ctx *snow.Context, + metricsReg prometheus.Registerer, + rewards reward.Calculator, + bootstrapped *utils.Atomic[bool], +) (*merkleState, error) { var ( baseDB = versiondb.New(rawDB) baseMerkleDB = prefixdb.New(merkleStatePrefix, baseDB) @@ -181,7 +214,7 @@ func NewMerkleState( return nil, err } - res := &merkleState{ + return &merkleState{ cfg: cfg, ctx: ctx, metrics: metrics, @@ -233,15 +266,7 @@ func NewMerkleState( validatorBlsKeyDiffsCache: validatorBlsKeyDiffsCache, localBlsKeyDiffDB: localBlsKeyDiffDB, - } - - if err := res.sync(genesisBytes); err != nil { - // Drop any errors on close to return the first error - _ = res.Close() - return nil, err - } - - return res, nil + }, nil } type merkleState struct { @@ -1459,9 +1484,11 @@ func (ms *merkleState) writeWeightDiffs(height uint64, weightDiffs map[weightDif Height: height, SubnetID: weightKey.subnetID, } - cacheValue := map[ids.NodeID]*ValidatorWeightDiff{ - weightKey.nodeID: weightDiff, + cacheValue, found := ms.validatorWeightDiffsCache.Get(cacheKey) + if !found { + cacheValue = make(map[ids.NodeID]*ValidatorWeightDiff) } + cacheValue[weightKey.nodeID] = weightDiff ms.validatorWeightDiffsCache.Put(cacheKey, cacheValue) } return nil diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index 697c013fa79d..94c3b0cb2909 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -4,6 +4,7 @@ package state import ( + "strconv" "testing" "time" @@ -30,6 +31,7 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/genesis" "github.com/ava-labs/avalanchego/vms/platformvm/metrics" "github.com/ava-labs/avalanchego/vms/platformvm/reward" + "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -45,16 +47,16 @@ func TestStateInitialization(t *testing.T) { require := require.New(t) s, db := newUninitializedState(require) - shouldInit, err := s.(*state).shouldInit() + shouldInit, err := s.(*merkleState).shouldInit() require.NoError(err) require.True(shouldInit) - require.NoError(s.(*state).doneInit()) + require.NoError(s.(*merkleState).doneInit()) require.NoError(s.Commit()) s = newStateFromDB(require, db) - shouldInit, err = s.(*state).shouldInit() + shouldInit, err = s.(*merkleState).shouldInit() require.NoError(err) require.False(shouldInit) } @@ -87,20 +89,36 @@ func TestStateSyncGenesis(t *testing.T) { func TestGetValidatorWeightDiffs(t *testing.T) { require := require.New(t) stateIntf, _ := newInitializedState(require) - state := stateIntf.(*state) + state := stateIntf.(*merkleState) - txID0 := ids.GenerateTestID() - txID1 := ids.GenerateTestID() - txID2 := ids.GenerateTestID() - txID3 := ids.GenerateTestID() + tx0 := &txs.Tx{ + Unsigned: &txs.AddValidatorTx{}, // don't really care to fill it up + } + tx0.SetBytes([]byte{0x1}, []byte(strconv.Itoa(0))) - nodeID0 := ids.GenerateTestNodeID() + tx1 := &txs.Tx{ + Unsigned: &txs.AddDelegatorTx{}, // don't really care to fill it up + } + tx1.SetBytes([]byte{0x1}, []byte(strconv.Itoa(1))) + + tx2 := &txs.Tx{ + Unsigned: &txs.AddDelegatorTx{}, // don't really care to fill it up + } + tx2.SetBytes([]byte{0x1}, []byte(strconv.Itoa(2))) + tx3 := &txs.Tx{ + Unsigned: &txs.AddValidatorTx{}, // don't really care to fill it up + } + tx3.SetBytes([]byte{0x1}, []byte(strconv.Itoa(3))) + + nodeID0 := ids.GenerateTestNodeID() subnetID0 := ids.GenerateTestID() type stakerDiff struct { validatorsToAdd []*Staker + valTxs []*txs.Tx delegatorsToAdd []*Staker + delTxs []*txs.Tx validatorsToRemove []*Staker delegatorsToRemove []*Staker @@ -110,12 +128,13 @@ func TestGetValidatorWeightDiffs(t *testing.T) { { validatorsToAdd: []*Staker{ { - TxID: txID0, + TxID: tx0.ID(), NodeID: nodeID0, SubnetID: constants.PrimaryNetworkID, Weight: 1, }, }, + valTxs: []*txs.Tx{tx0}, expectedValidatorWeightDiffs: map[ids.ID]map[ids.NodeID]*ValidatorWeightDiff{ constants.PrimaryNetworkID: { nodeID0: { @@ -128,20 +147,22 @@ func TestGetValidatorWeightDiffs(t *testing.T) { { validatorsToAdd: []*Staker{ { - TxID: txID3, + TxID: tx3.ID(), NodeID: nodeID0, SubnetID: subnetID0, Weight: 10, }, }, + valTxs: []*txs.Tx{tx3}, delegatorsToAdd: []*Staker{ { - TxID: txID1, + TxID: tx1.ID(), NodeID: nodeID0, SubnetID: constants.PrimaryNetworkID, Weight: 5, }, }, + delTxs: []*txs.Tx{tx1}, expectedValidatorWeightDiffs: map[ids.ID]map[ids.NodeID]*ValidatorWeightDiff{ constants.PrimaryNetworkID: { nodeID0: { @@ -160,15 +181,16 @@ func TestGetValidatorWeightDiffs(t *testing.T) { { delegatorsToAdd: []*Staker{ { - TxID: txID2, + TxID: tx2.ID(), NodeID: nodeID0, SubnetID: constants.PrimaryNetworkID, Weight: 15, }, }, + delTxs: []*txs.Tx{tx2}, delegatorsToRemove: []*Staker{ { - TxID: txID1, + TxID: tx1.ID(), NodeID: nodeID0, SubnetID: constants.PrimaryNetworkID, Weight: 5, @@ -186,26 +208,28 @@ func TestGetValidatorWeightDiffs(t *testing.T) { { validatorsToRemove: []*Staker{ { - TxID: txID0, + TxID: tx0.ID(), NodeID: nodeID0, SubnetID: constants.PrimaryNetworkID, Weight: 1, }, { - TxID: txID3, + TxID: tx3.ID(), NodeID: nodeID0, SubnetID: subnetID0, Weight: 10, }, }, + valTxs: []*txs.Tx{tx0, tx3}, delegatorsToRemove: []*Staker{ { - TxID: txID2, + TxID: tx2.ID(), NodeID: nodeID0, SubnetID: constants.PrimaryNetworkID, Weight: 15, }, }, + delTxs: []*txs.Tx{tx2}, expectedValidatorWeightDiffs: map[ids.ID]map[ids.NodeID]*ValidatorWeightDiff{ constants.PrimaryNetworkID: { nodeID0: { @@ -225,6 +249,12 @@ func TestGetValidatorWeightDiffs(t *testing.T) { } for i, stakerDiff := range stakerDiffs { + for _, valTx := range stakerDiff.valTxs { + state.AddTx(valTx, status.Committed) + } + for _, delTx := range stakerDiff.delTxs { + state.AddTx(delTx, status.Committed) + } for _, validator := range stakerDiff.validatorsToAdd { state.PutCurrentValidator(validator) } @@ -259,11 +289,11 @@ func TestGetValidatorWeightDiffs(t *testing.T) { func TestGetValidatorPublicKeyDiffs(t *testing.T) { require := require.New(t) stateIntf, _ := newInitializedState(require) - state := stateIntf.(*state) + state := stateIntf.(*merkleState) var ( numNodes = 6 - txIDs = make([]ids.ID, numNodes) + valTxs = make([]*txs.Tx, numNodes) nodeIDs = make([]ids.NodeID, numNodes) sks = make([]*bls.SecretKey, numNodes) pks = make([]*bls.PublicKey, numNodes) @@ -271,7 +301,10 @@ func TestGetValidatorPublicKeyDiffs(t *testing.T) { err error ) for i := 0; i < numNodes; i++ { - txIDs[i] = ids.GenerateTestID() + valTxs[i] = &txs.Tx{ + Unsigned: &txs.AddValidatorTx{}, // don't really care to fill it up + } + valTxs[i].SetBytes([]byte{0x1}, []byte(strconv.Itoa(i))) nodeIDs[i] = ids.GenerateTestNodeID() sks[i], err = bls.NewSecretKey() require.NoError(err) @@ -281,6 +314,7 @@ func TestGetValidatorPublicKeyDiffs(t *testing.T) { type stakerDiff struct { validatorsToAdd []*Staker + validatorsTxsToAdd []*txs.Tx validatorsToRemove []*Staker expectedPublicKeyDiffs map[ids.NodeID]*bls.PublicKey } @@ -289,25 +323,26 @@ func TestGetValidatorPublicKeyDiffs(t *testing.T) { // Add two validators validatorsToAdd: []*Staker{ { - TxID: txIDs[0], + TxID: valTxs[0].ID(), NodeID: nodeIDs[0], Weight: 1, PublicKey: pks[0], }, { - TxID: txIDs[1], + TxID: valTxs[1].ID(), NodeID: nodeIDs[1], Weight: 10, PublicKey: pks[1], }, }, + validatorsTxsToAdd: []*txs.Tx{valTxs[0], valTxs[1]}, expectedPublicKeyDiffs: map[ids.NodeID]*bls.PublicKey{}, }, { // Remove a validator validatorsToRemove: []*Staker{ { - TxID: txIDs[0], + TxID: valTxs[0].ID(), NodeID: nodeIDs[0], Weight: 1, PublicKey: pks[0], @@ -321,21 +356,22 @@ func TestGetValidatorPublicKeyDiffs(t *testing.T) { // Add 2 validators and remove a validator validatorsToAdd: []*Staker{ { - TxID: txIDs[2], + TxID: valTxs[2].ID(), NodeID: nodeIDs[2], Weight: 10, PublicKey: pks[2], }, { - TxID: txIDs[3], + TxID: valTxs[3].ID(), NodeID: nodeIDs[3], Weight: 10, PublicKey: pks[3], }, }, + validatorsTxsToAdd: []*txs.Tx{valTxs[2], valTxs[3]}, validatorsToRemove: []*Staker{ { - TxID: txIDs[1], + TxID: valTxs[1].ID(), NodeID: nodeIDs[1], Weight: 10, PublicKey: pks[1], @@ -349,21 +385,22 @@ func TestGetValidatorPublicKeyDiffs(t *testing.T) { // Remove 2 validators and add a validator validatorsToAdd: []*Staker{ { - TxID: txIDs[4], + TxID: valTxs[4].ID(), NodeID: nodeIDs[4], Weight: 10, PublicKey: pks[4], }, }, + validatorsTxsToAdd: []*txs.Tx{valTxs[4]}, validatorsToRemove: []*Staker{ { - TxID: txIDs[2], + TxID: valTxs[2].ID(), NodeID: nodeIDs[2], Weight: 10, PublicKey: pks[2], }, { - TxID: txIDs[3], + TxID: valTxs[3].ID(), NodeID: nodeIDs[3], Weight: 10, PublicKey: pks[3], @@ -378,19 +415,20 @@ func TestGetValidatorPublicKeyDiffs(t *testing.T) { // Add a validator with no pub key validatorsToAdd: []*Staker{ { - TxID: txIDs[5], + TxID: valTxs[5].ID(), NodeID: nodeIDs[5], Weight: 10, PublicKey: nil, }, }, + validatorsTxsToAdd: []*txs.Tx{valTxs[5]}, expectedPublicKeyDiffs: map[ids.NodeID]*bls.PublicKey{}, }, { // Remove a validator with no pub key validatorsToRemove: []*Staker{ { - TxID: txIDs[5], + TxID: valTxs[5].ID(), NodeID: nodeIDs[5], Weight: 10, PublicKey: nil, @@ -401,6 +439,9 @@ func TestGetValidatorPublicKeyDiffs(t *testing.T) { } for i, stakerDiff := range stakerDiffs { + for _, tx := range stakerDiff.validatorsTxsToAdd { + state.AddTx(tx, status.Committed) + } for _, validator := range stakerDiff.validatorsToAdd { state.PutCurrentValidator(validator) } @@ -418,7 +459,7 @@ func TestGetValidatorPublicKeyDiffs(t *testing.T) { pkDiffs, err := state.GetValidatorPublicKeyDiffs(uint64(j + 1)) require.NoError(err) require.Equal(stakerDiff.expectedPublicKeyDiffs, pkDiffs) - state.validatorPublicKeyDiffsCache.Flush() + state.validatorBlsKeyDiffsCache.Flush() } } } @@ -482,7 +523,7 @@ func newInitializedState(require *require.Assertions) (State, database.Database) genesisBlk, err := blocks.NewApricotCommitBlock(genesisBlkID, 0) require.NoError(err) - require.NoError(s.(*state).syncGenesis(genesisBlk, genesisState)) + require.NoError(s.(*merkleState).syncGenesis(genesisBlk, genesisState)) return s, db } @@ -497,7 +538,7 @@ func newStateFromDB(require *require.Assertions, db database.Database) State { primaryVdrs := validators.NewSet() _ = vdrs.Add(constants.PrimaryNetworkID, primaryVdrs) execCfg, _ := config.GetExecutionConfig(nil) - state, err := new( + state, err := newMerklsState( db, metrics.Noop, &config.Config{ @@ -672,14 +713,28 @@ func TestStateAddRemoveValidator(t *testing.T) { startTime = time.Now() endTime = startTime.Add(24 * time.Hour) stakers = make([]Staker, numNodes) + addedTxs = make([]*txs.Tx, numNodes) ) for i := 0; i < numNodes; i++ { + valTx := txs.Validator{ + NodeID: ids.GenerateTestNodeID(), + Start: uint64(startTime.Add(time.Duration(i) * time.Second).Unix()), + End: uint64(endTime.Add(time.Duration(i) * time.Second).Unix()), + Wght: uint64(i + 1), + } + addedTxs[i] = &txs.Tx{ + Unsigned: &txs.AddValidatorTx{ + Validator: valTx, + }, + } + addedTxs[i].SetBytes([]byte{0x1}, []byte(strconv.Itoa(10*i))) + stakers[i] = Staker{ - TxID: ids.GenerateTestID(), - NodeID: ids.GenerateTestNodeID(), - Weight: uint64(i + 1), - StartTime: startTime.Add(time.Duration(i) * time.Second), - EndTime: endTime.Add(time.Duration(i) * time.Second), + TxID: addedTxs[i].ID(), + NodeID: valTx.NodeID, + Weight: valTx.Wght, + StartTime: valTx.StartTime(), + EndTime: valTx.EndTime(), PotentialReward: uint64(i + 1), } if i%2 == 0 { @@ -693,7 +748,8 @@ func TestStateAddRemoveValidator(t *testing.T) { } type diff struct { - added []Staker + addedStakers []Staker + addedTxs []*txs.Tx removed []Staker expectedSubnetWeightDiff map[ids.NodeID]*ValidatorWeightDiff expectedPrimaryNetworkWeightDiff map[ids.NodeID]*ValidatorWeightDiff @@ -702,7 +758,8 @@ func TestStateAddRemoveValidator(t *testing.T) { diffs := []diff{ { // Add a subnet validator - added: []Staker{stakers[0]}, + addedStakers: []Staker{stakers[0]}, + addedTxs: []*txs.Tx{addedTxs[0]}, expectedPrimaryNetworkWeightDiff: map[ids.NodeID]*ValidatorWeightDiff{}, expectedSubnetWeightDiff: map[ids.NodeID]*ValidatorWeightDiff{ stakers[0].NodeID: { @@ -727,7 +784,8 @@ func TestStateAddRemoveValidator(t *testing.T) { expectedPublicKeyDiff: map[ids.NodeID]*bls.PublicKey{}, }, { // Add a primary network validator - added: []Staker{stakers[1]}, + addedStakers: []Staker{stakers[1]}, + addedTxs: []*txs.Tx{addedTxs[1]}, expectedPrimaryNetworkWeightDiff: map[ids.NodeID]*ValidatorWeightDiff{ stakers[1].NodeID: { Decrease: false, @@ -752,7 +810,8 @@ func TestStateAddRemoveValidator(t *testing.T) { }, { // Add 2 subnet validators and a primary network validator - added: []Staker{stakers[0], stakers[1], stakers[2]}, + addedStakers: []Staker{stakers[0], stakers[1], stakers[2]}, + addedTxs: []*txs.Tx{addedTxs[0], addedTxs[1], addedTxs[2]}, expectedPrimaryNetworkWeightDiff: map[ids.NodeID]*ValidatorWeightDiff{ stakers[1].NodeID: { Decrease: false, @@ -797,7 +856,11 @@ func TestStateAddRemoveValidator(t *testing.T) { } for i, diff := range diffs { - for _, added := range diff.added { + for _, tx := range diff.addedTxs { + state.AddTx(tx, status.Committed) + } + + for _, added := range diff.addedStakers { added := added state.PutCurrentValidator(&added) } @@ -811,7 +874,7 @@ func TestStateAddRemoveValidator(t *testing.T) { require.NoError(state.Commit()) - for _, added := range diff.added { + for _, added := range diff.addedStakers { gotValidator, err := state.GetCurrentValidator(added.SubnetID, added.NodeID) require.NoError(err) require.Equal(added, *gotValidator) From 87abc102a389dcd96392a4df23fbf39b00482442 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Wed, 16 Aug 2023 08:32:16 +0200 Subject: [PATCH 073/132] dropped unreachable code --- vms/platformvm/state/stakers.go | 24 +++++------------------- 1 file changed, 5 insertions(+), 19 deletions(-) diff --git a/vms/platformvm/state/stakers.go b/vms/platformvm/state/stakers.go index bb202a640bb6..2113979cb42f 100644 --- a/vms/platformvm/state/stakers.go +++ b/vms/platformvm/state/stakers.go @@ -148,13 +148,8 @@ func (v *baseStakers) DeleteValidator(staker *Staker) { v.pruneValidator(staker.SubnetID, staker.NodeID) validatorDiff := v.getOrCreateValidatorDiff(staker.SubnetID, staker.NodeID) - if validatorDiff.validatorStatus == added { - validatorDiff.validatorStatus = unmodified - validatorDiff.validator = nil - } else { - validatorDiff.validatorStatus = deleted - validatorDiff.validator = staker - } + validatorDiff.validatorStatus = deleted + validatorDiff.validator = staker v.stakers.Delete(staker) } @@ -195,19 +190,10 @@ func (v *baseStakers) DeleteDelegator(staker *Staker) { v.pruneValidator(staker.SubnetID, staker.NodeID) validatorDiff := v.getOrCreateValidatorDiff(staker.SubnetID, staker.NodeID) - found := false - if validatorDiff.addedDelegators != nil { - if _, found = validatorDiff.addedDelegators.Get(staker); found { - // delegator to be removed was just added. Wipe it up here - validatorDiff.addedDelegators.Delete(staker) - } - } - if !found { - if validatorDiff.deletedDelegators == nil { - validatorDiff.deletedDelegators = make(map[ids.ID]*Staker) - } - validatorDiff.deletedDelegators[staker.TxID] = staker + if validatorDiff.deletedDelegators == nil { + validatorDiff.deletedDelegators = make(map[ids.ID]*Staker) } + validatorDiff.deletedDelegators[staker.TxID] = staker v.stakers.Delete(staker) } From 12cc86270891381f47b7ee2afabc22ebd50a6afa Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Fri, 25 Aug 2023 11:49:03 +0200 Subject: [PATCH 074/132] leftover from merge --- vms/platformvm/state/stakers.go | 24 +++++------------------- 1 file changed, 5 insertions(+), 19 deletions(-) diff --git a/vms/platformvm/state/stakers.go b/vms/platformvm/state/stakers.go index bb202a640bb6..2113979cb42f 100644 --- a/vms/platformvm/state/stakers.go +++ b/vms/platformvm/state/stakers.go @@ -148,13 +148,8 @@ func (v *baseStakers) DeleteValidator(staker *Staker) { v.pruneValidator(staker.SubnetID, staker.NodeID) validatorDiff := v.getOrCreateValidatorDiff(staker.SubnetID, staker.NodeID) - if validatorDiff.validatorStatus == added { - validatorDiff.validatorStatus = unmodified - validatorDiff.validator = nil - } else { - validatorDiff.validatorStatus = deleted - validatorDiff.validator = staker - } + validatorDiff.validatorStatus = deleted + validatorDiff.validator = staker v.stakers.Delete(staker) } @@ -195,19 +190,10 @@ func (v *baseStakers) DeleteDelegator(staker *Staker) { v.pruneValidator(staker.SubnetID, staker.NodeID) validatorDiff := v.getOrCreateValidatorDiff(staker.SubnetID, staker.NodeID) - found := false - if validatorDiff.addedDelegators != nil { - if _, found = validatorDiff.addedDelegators.Get(staker); found { - // delegator to be removed was just added. Wipe it up here - validatorDiff.addedDelegators.Delete(staker) - } - } - if !found { - if validatorDiff.deletedDelegators == nil { - validatorDiff.deletedDelegators = make(map[ids.ID]*Staker) - } - validatorDiff.deletedDelegators[staker.TxID] = staker + if validatorDiff.deletedDelegators == nil { + validatorDiff.deletedDelegators = make(map[ids.ID]*Staker) } + validatorDiff.deletedDelegators[staker.TxID] = staker v.stakers.Delete(staker) } From 3616ab745be0b160590b766fe8e703186ca59af4 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Fri, 25 Aug 2023 12:22:53 +0200 Subject: [PATCH 075/132] nits --- vms/platformvm/blocks/builder/helpers_test.go | 4 ++-- vms/platformvm/blocks/executor/helpers_test.go | 4 ++-- vms/platformvm/state/merkle_state.go | 4 ++-- vms/platformvm/state/stakers_helpers_test.go | 4 ++-- vms/platformvm/txs/executor/helpers_test.go | 4 ++-- vms/platformvm/vm.go | 4 ++-- vms/platformvm/vm_regression_test.go | 8 ++++---- 7 files changed, 16 insertions(+), 16 deletions(-) diff --git a/vms/platformvm/blocks/builder/helpers_test.go b/vms/platformvm/blocks/builder/helpers_test.go index b66e74dd9cb9..46c22e04a4ff 100644 --- a/vms/platformvm/blocks/builder/helpers_test.go +++ b/vms/platformvm/blocks/builder/helpers_test.go @@ -232,12 +232,12 @@ func defaultState( genesisBytes := buildGenesisTest(t, ctx) state, err := state.NewMerkleState( db, - metrics.Noop, genesisBytes, + prometheus.NewRegistry(), cfg, execCfg, ctx, - prometheus.NewRegistry(), + metrics.Noop, rewards, &utils.Atomic[bool]{}, ) diff --git a/vms/platformvm/blocks/executor/helpers_test.go b/vms/platformvm/blocks/executor/helpers_test.go index 3164c15b3362..31d834edf34b 100644 --- a/vms/platformvm/blocks/executor/helpers_test.go +++ b/vms/platformvm/blocks/executor/helpers_test.go @@ -271,12 +271,12 @@ func defaultState( execCfg, _ := config.GetExecutionConfig([]byte(`{}`)) state, err := state.NewMerkleState( db, - metrics.Noop, genesisBytes, + prometheus.NewRegistry(), cfg, execCfg, ctx, - prometheus.NewRegistry(), + metrics.Noop, rewards, &utils.Atomic[bool]{}, ) diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index fa67de3ad5be..d1f99fbc50a4 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -75,12 +75,12 @@ var ( func NewMerkleState( rawDB database.Database, - metrics metrics.Metrics, genesisBytes []byte, + metricsReg prometheus.Registerer, cfg *config.Config, execCfg *config.ExecutionConfig, ctx *snow.Context, - metricsReg prometheus.Registerer, + metrics metrics.Metrics, rewards reward.Calculator, bootstrapped *utils.Atomic[bool], ) (State, error) { diff --git a/vms/platformvm/state/stakers_helpers_test.go b/vms/platformvm/state/stakers_helpers_test.go index d226cc18119d..c2a2b001d921 100644 --- a/vms/platformvm/state/stakers_helpers_test.go +++ b/vms/platformvm/state/stakers_helpers_test.go @@ -80,12 +80,12 @@ func buildChainState(baseDB database.Database, trackedSubnets []ids.ID) (State, rewardsCalc := reward.NewCalculator(cfg.RewardConfig) return NewMerkleState( baseDB, - metrics.Noop, genesisBytes, + prometheus.NewRegistry(), cfg, execConfig, ctx, - prometheus.NewRegistry(), + metrics.Noop, rewardsCalc, &utils.Atomic[bool]{}, ) diff --git a/vms/platformvm/txs/executor/helpers_test.go b/vms/platformvm/txs/executor/helpers_test.go index 900210ee5cbf..9a1cfbd8921c 100644 --- a/vms/platformvm/txs/executor/helpers_test.go +++ b/vms/platformvm/txs/executor/helpers_test.go @@ -223,12 +223,12 @@ func defaultState( execCfg, _ := config.GetExecutionConfig(nil) state, err := state.NewMerkleState( db, - metrics.Noop, genesisBytes, + prometheus.NewRegistry(), cfg, execCfg, ctx, - prometheus.NewRegistry(), + metrics.Noop, rewards, &utils.Atomic[bool]{}, ) diff --git a/vms/platformvm/vm.go b/vms/platformvm/vm.go index 608c4d252629..a4f5407465ba 100644 --- a/vms/platformvm/vm.go +++ b/vms/platformvm/vm.go @@ -139,12 +139,12 @@ func (vm *VM) Initialize( vm.state, err = state.NewMerkleState( vm.dbManager.Current().Database, - vm.metrics, genesisBytes, + registerer, &vm.Config, execConfig, vm.ctx, - registerer, + vm.metrics, rewards, &vm.bootstrapped, ) diff --git a/vms/platformvm/vm_regression_test.go b/vms/platformvm/vm_regression_test.go index f9f698db6a1e..fc1e8b91a663 100644 --- a/vms/platformvm/vm_regression_test.go +++ b/vms/platformvm/vm_regression_test.go @@ -657,12 +657,12 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { execCfg, _ := config.GetExecutionConfig(nil) newState, err := state.NewMerkleState( vm.dbManager.Current().Database, - metrics.Noop, nil, + prometheus.NewRegistry(), &vm.Config, execCfg, vm.ctx, - prometheus.NewRegistry(), + metrics.Noop, reward.NewCalculator(vm.Config.RewardConfig), &utils.Atomic[bool]{}, ) @@ -967,12 +967,12 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { execCfg, _ := config.GetExecutionConfig(nil) newState, err := state.NewMerkleState( vm.dbManager.Current().Database, - metrics.Noop, nil, + prometheus.NewRegistry(), &vm.Config, execCfg, vm.ctx, - prometheus.NewRegistry(), + metrics.Noop, reward.NewCalculator(vm.Config.RewardConfig), &utils.Atomic[bool]{}, ) From c9db860e63672a9d45cf50911e197390069dcb1b Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Fri, 25 Aug 2023 14:01:00 +0200 Subject: [PATCH 076/132] fixed metrics --- vms/platformvm/state/merkle_state.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index d1f99fbc50a4..8eb72dfbf6fa 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -1642,5 +1642,12 @@ func (ms *merkleState) updateValidatorSet( return fmt.Errorf("failed to update validator weight: %w", err) } } + + primaryValidators, ok := ms.cfg.Validators.Get(constants.PrimaryNetworkID) + if !ok { + return nil + } + ms.metrics.SetLocalStake(primaryValidators.GetWeight(ms.ctx.NodeID)) + ms.metrics.SetTotalStake(primaryValidators.Weight()) return nil } From aec5b59a5e747ad384386d8375cea3bed27e92f9 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Fri, 25 Aug 2023 14:36:04 +0200 Subject: [PATCH 077/132] added merkleDB root logging --- vms/platformvm/state/merkle_state.go | 30 ++++++++++++++++++- vms/platformvm/state/merkle_state_load_ops.go | 2 ++ vms/platformvm/state/state_test.go | 5 +++- 3 files changed, 35 insertions(+), 2 deletions(-) diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index 8eb72dfbf6fa..72f854e700eb 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -11,6 +11,7 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" + "go.uber.org/zap" "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/cache/metercacher" @@ -1244,6 +1245,8 @@ func (ms *merkleState) writeMerkleState(currentData, pendingData map[ids.ID]*sta ms.writeDelegateeRewards(&batchOps), ms.writeUTXOs(&batchOps), ms.writeRewardUTXOs(&batchOps), + + ms.logMerkleRoot(), ) if errs.Err != nil { return errs.Err @@ -1252,7 +1255,7 @@ func (ms *merkleState) writeMerkleState(currentData, pendingData map[ids.ID]*sta ctx := context.TODO() view, err := ms.merkleDB.NewView(ctx, batchOps) if err != nil { - return err + return fmt.Errorf("failed creating merkleDB view: %w", err) } return view.CommitToDB(ctx) @@ -1651,3 +1654,28 @@ func (ms *merkleState) updateValidatorSet( ms.metrics.SetTotalStake(primaryValidators.Weight()) return nil } + +func (ms *merkleState) logMerkleRoot() error { + ctx := context.TODO() + view, err := ms.merkleDB.NewView(ctx, nil) + if err != nil { + return fmt.Errorf("failed creating merkleDB view: %w", err) + } + root, err := view.GetMerkleRoot(ctx) + if err != nil { + return fmt.Errorf("failed pulling merkle root: %w", err) + } + + // get current Height + blk, err := ms.GetStatelessBlock(ms.GetLastAccepted()) + if err != nil { + // may happen in tests. Let's just skip + return nil + } + + ms.ctx.Log.Info("merkle root", + zap.Uint64("height", blk.Height()), + zap.String("merkle root", root.String()), + ) + return nil +} diff --git a/vms/platformvm/state/merkle_state_load_ops.go b/vms/platformvm/state/merkle_state_load_ops.go index 4983c3d0309b..5e5867edaf8b 100644 --- a/vms/platformvm/state/merkle_state_load_ops.go +++ b/vms/platformvm/state/merkle_state_load_ops.go @@ -158,6 +158,8 @@ func (ms *merkleState) load() error { ms.loadCurrentStakers(), ms.loadPendingStakers(), ms.initValidatorSets(), + + ms.logMerkleRoot(), ) return errs.Err } diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index d33f7a66a4ec..7f0af2c18287 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -24,6 +24,7 @@ import ( "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/utils/wrappers" @@ -170,7 +171,9 @@ func newStateFromDB(require *require.Assertions, db database.Database) State { Validators: vdrs, }, execCfg, - &snow.Context{}, + &snow.Context{ + Log: logging.NoLog{}, + }, prometheus.NewRegistry(), reward.NewCalculator(reward.Config{ MaxConsumptionRate: .12 * reward.PercentDenominator, From 920ab8cf90c75c8c550a4965de5d112644af7f9b Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Sat, 26 Aug 2023 17:22:08 +0200 Subject: [PATCH 078/132] nit --- vms/platformvm/state/merkle_state.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index 72f854e700eb..0493ff550eb7 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -1245,8 +1245,6 @@ func (ms *merkleState) writeMerkleState(currentData, pendingData map[ids.ID]*sta ms.writeDelegateeRewards(&batchOps), ms.writeUTXOs(&batchOps), ms.writeRewardUTXOs(&batchOps), - - ms.logMerkleRoot(), ) if errs.Err != nil { return errs.Err @@ -1257,8 +1255,11 @@ func (ms *merkleState) writeMerkleState(currentData, pendingData map[ids.ID]*sta if err != nil { return fmt.Errorf("failed creating merkleDB view: %w", err) } + if err := view.CommitToDB(ctx); err != nil { + return fmt.Errorf("failed committing merkleDB view: %w", err) + } - return view.CommitToDB(ctx) + return ms.logMerkleRoot() } func (ms *merkleState) writeMetadata(batchOps *[]database.BatchOp) error { From ccffef8b34c6f4c1b13552ed6e4831b167d78720 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Wed, 30 Aug 2023 12:58:20 +0200 Subject: [PATCH 079/132] nits to improve merkleDB commit logging --- vms/platformvm/state/merkle_state.go | 51 ++++++++++++------- vms/platformvm/state/merkle_state_load_ops.go | 6 +-- 2 files changed, 37 insertions(+), 20 deletions(-) diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index 0493ff550eb7..c7f2581697a2 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -311,7 +311,8 @@ type merkleState struct { chainCache cache.Cacher[ids.ID, []*txs.Tx] // cache of subnetID -> the chains after all local modifications []*txs.Tx // Blocks section - addedBlocks map[ids.ID]blocks.Block // map of blockID -> Block + // Note: addedBlocks is a list because multiple blocks can be committed at one (proposal + accepted option) + addedBlocks map[ids.ID]blocks.Block // map of blockID -> Block. blockCache cache.Cacher[ids.ID, blocks.Block] // cache of blockID -> Block. If the entry is nil, it is not in the database blockDB database.Database @@ -1186,6 +1187,8 @@ func (ms *merkleState) processPendingStakers() (map[ids.ID]*stakersData, error) for subnetID, subnetValidatorDiffs := range ms.pendingStakers.validatorDiffs { delete(ms.pendingStakers.validatorDiffs, subnetID) for _, validatorDiff := range subnetValidatorDiffs { + // validatorDiff.validator is not guaranteed to be non-nil here. + // Access it only if validatorDiff.validatorStatus is added or deleted switch validatorDiff.validatorStatus { case added: txID := validatorDiff.validator.TxID @@ -1250,16 +1253,20 @@ func (ms *merkleState) writeMerkleState(currentData, pendingData map[ids.ID]*sta return errs.Err } - ctx := context.TODO() - view, err := ms.merkleDB.NewView(ctx, batchOps) - if err != nil { - return fmt.Errorf("failed creating merkleDB view: %w", err) - } - if err := view.CommitToDB(ctx); err != nil { - return fmt.Errorf("failed committing merkleDB view: %w", err) + if len(batchOps) != 0 { + // do commit only if there are changes to merkle state + ctx := context.TODO() + view, err := ms.merkleDB.NewView(ctx, batchOps) + if err != nil { + return fmt.Errorf("failed creating merkleDB view: %w", err) + } + if err := view.CommitToDB(ctx); err != nil { + return fmt.Errorf("failed committing merkleDB view: %w", err) + } } - return ms.logMerkleRoot() + // log whether we had changes or not + return ms.logMerkleRoot(len(batchOps) != 0) } func (ms *merkleState) writeMetadata(batchOps *[]database.BatchOp) error { @@ -1656,7 +1663,23 @@ func (ms *merkleState) updateValidatorSet( return nil } -func (ms *merkleState) logMerkleRoot() error { +func (ms *merkleState) logMerkleRoot(hasChanges bool) error { + // get current Height + blk, err := ms.GetStatelessBlock(ms.GetLastAccepted()) + if err != nil { + // may happen in tests. Let's just skip + return nil + } + + if !hasChanges { + ms.ctx.Log.Info("merkle root", + zap.Uint64("height", blk.Height()), + zap.Stringer("blkID", blk.ID()), + zap.String("merkle root", "no changes to merkle state"), + ) + return nil + } + ctx := context.TODO() view, err := ms.merkleDB.NewView(ctx, nil) if err != nil { @@ -1667,15 +1690,9 @@ func (ms *merkleState) logMerkleRoot() error { return fmt.Errorf("failed pulling merkle root: %w", err) } - // get current Height - blk, err := ms.GetStatelessBlock(ms.GetLastAccepted()) - if err != nil { - // may happen in tests. Let's just skip - return nil - } - ms.ctx.Log.Info("merkle root", zap.Uint64("height", blk.Height()), + zap.Stringer("blkID", blk.ID()), zap.String("merkle root", root.String()), ) return nil diff --git a/vms/platformvm/state/merkle_state_load_ops.go b/vms/platformvm/state/merkle_state_load_ops.go index 5e5867edaf8b..cbf24e175d0e 100644 --- a/vms/platformvm/state/merkle_state_load_ops.go +++ b/vms/platformvm/state/merkle_state_load_ops.go @@ -45,7 +45,7 @@ func (ms *merkleState) sync(genesis []byte) error { } } - return ms.load() + return ms.load(shouldInit) } func (ms *merkleState) shouldInit() (bool, error) { @@ -151,7 +151,7 @@ func (ms *merkleState) syncGenesis(genesisBlk blocks.Block, genesis *genesis.Sta } // Load pulls data previously stored on disk that is expected to be in memory. -func (ms *merkleState) load() error { +func (ms *merkleState) load(hasSynced bool) error { errs := wrappers.Errs{} errs.Add( ms.loadMerkleMetadata(), @@ -159,7 +159,7 @@ func (ms *merkleState) load() error { ms.loadPendingStakers(), ms.initValidatorSets(), - ms.logMerkleRoot(), + ms.logMerkleRoot(!hasSynced), // we already logged if sync has happened ) return errs.Err } From 415b34d8dbce6df44ce802ca13fbbf7870c75498 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Wed, 30 Aug 2023 13:09:09 +0200 Subject: [PATCH 080/132] avoid genesis double commit --- vms/platformvm/state/merkle_state.go | 52 +++++++++++-------- vms/platformvm/state/merkle_state_load_ops.go | 20 +++---- 2 files changed, 35 insertions(+), 37 deletions(-) diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index c7f2581697a2..512324bbf66e 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -232,8 +232,8 @@ func newMerklsState( addedRewardUTXOs: make(map[ids.ID][]*avax.UTXO), rewardUTXOsCache: rewardUTXOsCache, - supplies: make(map[ids.ID]uint64), - suppliesCache: suppliesCache, + modifiedSupplies: make(map[ids.ID]uint64), + suppliesCache: suppliesCache, addedPermissionedSubnets: make([]*txs.Tx, 0), permissionedSubnetCache: nil, // created first time GetSubnets is called @@ -294,11 +294,11 @@ type merkleState struct { rewardUTXOsCache cache.Cacher[ids.ID, []*avax.UTXO] // txID -> []*UTXO // Metadata section - chainTime time.Time - lastAcceptedBlkID ids.ID - lastAcceptedHeight uint64 // TODO: Should this be written to state?? - supplies map[ids.ID]uint64 // map of subnetID -> current supply - suppliesCache cache.Cacher[ids.ID, *uint64] // cache of subnetID -> current supply if the entry is nil, it is not in the database + chainTime, latestComittedChainTime time.Time + lastAcceptedBlkID, latestCommittedLastAcceptedBlkID ids.ID + lastAcceptedHeight uint64 // TODO: Should this be written to state?? + modifiedSupplies map[ids.ID]uint64 // map of subnetID -> current supply + suppliesCache cache.Cacher[ids.ID, *uint64] // cache of subnetID -> current supply if the entry is nil, it is not in the database // Subnets section addedPermissionedSubnets []*txs.Tx // added SubnetTxs, waiting to be committed @@ -575,7 +575,7 @@ func (ms *merkleState) SetHeight(height uint64) { } func (ms *merkleState) GetCurrentSupply(subnetID ids.ID) (uint64, error) { - supply, ok := ms.supplies[subnetID] + supply, ok := ms.modifiedSupplies[subnetID] if ok { return supply, nil } @@ -608,7 +608,7 @@ func (ms *merkleState) GetCurrentSupply(subnetID ids.ID) (uint64, error) { } func (ms *merkleState) SetCurrentSupply(subnetID ids.ID, cs uint64) { - ms.supplies[subnetID] = cs + ms.modifiedSupplies[subnetID] = cs } // SUBNETS Section @@ -1270,27 +1270,33 @@ func (ms *merkleState) writeMerkleState(currentData, pendingData map[ids.ID]*sta } func (ms *merkleState) writeMetadata(batchOps *[]database.BatchOp) error { - encodedChainTime, err := ms.chainTime.MarshalBinary() - if err != nil { - return fmt.Errorf("failed to encoding chainTime: %w", err) - } + if !ms.chainTime.Equal(ms.latestComittedChainTime) { + encodedChainTime, err := ms.chainTime.MarshalBinary() + if err != nil { + return fmt.Errorf("failed to encoding chainTime: %w", err) + } - *batchOps = append(*batchOps, database.BatchOp{ - Key: merkleChainTimeKey, - Value: encodedChainTime, - }) + *batchOps = append(*batchOps, database.BatchOp{ + Key: merkleChainTimeKey, + Value: encodedChainTime, + }) + ms.latestComittedChainTime = ms.chainTime + } - *batchOps = append(*batchOps, database.BatchOp{ - Key: merkleLastAcceptedBlkIDKey, - Value: ms.lastAcceptedBlkID[:], - }) + if ms.lastAcceptedBlkID != ms.latestCommittedLastAcceptedBlkID { + *batchOps = append(*batchOps, database.BatchOp{ + Key: merkleLastAcceptedBlkIDKey, + Value: ms.lastAcceptedBlkID[:], + }) + ms.latestCommittedLastAcceptedBlkID = ms.lastAcceptedBlkID + } // lastAcceptedBlockHeight not persisted yet in merkleDB state. // TODO: Consider if it should be - for subnetID, supply := range ms.supplies { + for subnetID, supply := range ms.modifiedSupplies { supply := supply - delete(ms.supplies, subnetID) + delete(ms.modifiedSupplies, subnetID) // clear up ms.supplies to avoid potential double commits ms.suppliesCache.Put(subnetID, &supply) key := merkleSuppliesKey(subnetID) diff --git a/vms/platformvm/state/merkle_state_load_ops.go b/vms/platformvm/state/merkle_state_load_ops.go index cbf24e175d0e..7942fa269c49 100644 --- a/vms/platformvm/state/merkle_state_load_ops.go +++ b/vms/platformvm/state/merkle_state_load_ops.go @@ -9,7 +9,6 @@ import ( "github.com/google/btree" - "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/constants" @@ -174,6 +173,7 @@ func (ms *merkleState) loadMerkleMetadata() error { if err := chainTime.UnmarshalBinary(chainTimeBytes); err != nil { return err } + ms.latestComittedChainTime = chainTime ms.SetTimestamp(chainTime) // load last accepted block @@ -183,21 +183,13 @@ func (ms *merkleState) loadMerkleMetadata() error { } lastAcceptedBlkID := ids.Empty copy(lastAcceptedBlkID[:], blkIDBytes) + ms.latestCommittedLastAcceptedBlkID = lastAcceptedBlkID ms.SetLastAccepted(lastAcceptedBlkID) - // load supplies - suppliedPrefix := merkleSuppliesKeyPrefix() - iter := ms.merkleDB.NewIteratorWithPrefix(suppliedPrefix) - defer iter.Release() - for iter.Next() { - _, subnetID := splitMerkleSuppliesKey(iter.Key()) - supply, err := database.ParseUInt64(iter.Value()) - if err != nil { - return err - } - ms.supplies[subnetID] = supply - } - return iter.Error() + // wen don't need to load supplies. Unlike chainTime and lastBlkID + // which have the persisted* attribute, we signal supplies have not + // been modified by having an empty map. + return nil } func (ms *merkleState) loadCurrentStakers() error { From 79d1520913089570b830acb4b8d5d970a50d6f11 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Thu, 31 Aug 2023 15:29:42 +0200 Subject: [PATCH 081/132] nit --- vms/platformvm/state/merkle_state.go | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index 512324bbf66e..911a5b2f6cac 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -1253,19 +1253,19 @@ func (ms *merkleState) writeMerkleState(currentData, pendingData map[ids.ID]*sta return errs.Err } - if len(batchOps) != 0 { - // do commit only if there are changes to merkle state - ctx := context.TODO() - view, err := ms.merkleDB.NewView(ctx, batchOps) - if err != nil { - return fmt.Errorf("failed creating merkleDB view: %w", err) - } - if err := view.CommitToDB(ctx); err != nil { - return fmt.Errorf("failed committing merkleDB view: %w", err) - } + if len(batchOps) == 0 { + // nothing to commit + return nil } - // log whether we had changes or not + ctx := context.TODO() + view, err := ms.merkleDB.NewView(ctx, batchOps) + if err != nil { + return fmt.Errorf("failed creating merkleDB view: %w", err) + } + if err := view.CommitToDB(ctx); err != nil { + return fmt.Errorf("failed committing merkleDB view: %w", err) + } return ms.logMerkleRoot(len(batchOps) != 0) } From 7cd06060aa6d7c24eb34d33af4b9bf2d2b82ffd4 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Thu, 14 Sep 2023 09:40:54 +0200 Subject: [PATCH 082/132] appease linter --- vms/platformvm/state/stakers_properties_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/vms/platformvm/state/stakers_properties_test.go b/vms/platformvm/state/stakers_properties_test.go index 83b3ddb6049d..829c3c5f566b 100644 --- a/vms/platformvm/state/stakers_properties_test.go +++ b/vms/platformvm/state/stakers_properties_test.go @@ -501,7 +501,7 @@ func buildDiffOnTopOfBaseState(trackedSubnets []ids.ID) (Diff, State, error) { baseDB := versiondb.New(baseDBManager.Current().Database) baseState, err := buildChainState(baseDB, trackedSubnets) if err != nil { - return nil, nil, fmt.Errorf("unexpected error while creating chain base state, err %v", err) + return nil, nil, fmt.Errorf("unexpected error while creating chain base state, err %w", err) } genesisID := baseState.GetLastAccepted() @@ -510,7 +510,7 @@ func buildDiffOnTopOfBaseState(trackedSubnets []ids.ID) (Diff, State, error) { } diff, err := NewDiff(genesisID, versions) if err != nil { - return nil, nil, fmt.Errorf("unexpected error while creating diff, err %v", err) + return nil, nil, fmt.Errorf("unexpected error while creating diff, err %w", err) } return diff, baseState, nil } @@ -532,7 +532,7 @@ func checkStakersContent(store Stakers, stakers []*Staker, stakersType stakerSta return errors.New("Unhandled stakers status") } if err != nil { - return fmt.Errorf("unexpected failure in staker iterator creation, error %v", err) + return fmt.Errorf("unexpected failure in staker iterator creation, error %w", err) } defer it.Release() From a193dc3186f80e994018169e3954dd22938ab356 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 24 Oct 2023 15:35:10 -0400 Subject: [PATCH 083/132] lower cache sizes from 2 GiB to 512 MiB --- vms/platformvm/state/merkle_state.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index b7fae8e6ad3c..18b5668ae334 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -40,11 +40,11 @@ import ( ) const ( - HistoryLength = int(256) - valueNodeCacheSize = 2 * units.GiB - intermediateNodeCacheSize = 2 * units.GiB + HistoryLength = int(256) - utxoCacheSize = 8192 // from avax/utxo_state.go + valueNodeCacheSize = 512 * units.MiB + intermediateNodeCacheSize = 512 * units.MiB + utxoCacheSize = 8192 // from avax/utxo_state.go ) var ( From 59bc033e7c276c0aaedd5381a2be51663b9b40e7 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 24 Oct 2023 15:40:15 -0400 Subject: [PATCH 084/132] fix append --- vms/platformvm/state/merkle_state.go | 44 ++++++++++++++-------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index 18b5668ae334..dd1565fb33d8 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -50,30 +50,30 @@ const ( var ( _ State = (*merkleState)(nil) - merkleStatePrefix = []byte{0x0} - merkleSingletonPrefix = []byte{0x1} - merkleBlockPrefix = []byte{0x2} - merkleBlockIDsPrefix = []byte{0x3} - merkleTxPrefix = []byte{0x4} - merkleIndexUTXOsPrefix = []byte{0x5} // to serve UTXOIDs(addr) - merkleUptimesPrefix = []byte{0x6} // locally measured uptimes - merkleWeightDiffPrefix = []byte{0x7} // non-merklelized validators weight diff. TODO: should we merklelize them? - merkleBlsKeyDiffPrefix = []byte{0x8} + merkleStatePrefix = []byte{0x00} + merkleSingletonPrefix = []byte{0x01} + merkleBlockPrefix = []byte{0x02} + merkleBlockIDsPrefix = []byte{0x03} + merkleTxPrefix = []byte{0x04} + merkleIndexUTXOsPrefix = []byte{0x05} // to serve UTXOIDs(addr) + merkleUptimesPrefix = []byte{0x06} // locally measured uptimes + merkleWeightDiffPrefix = []byte{0x07} // non-merklelized validators weight diff. TODO: should we merklelize them? + merkleBlsKeyDiffPrefix = []byte{0x08} // merkle db sections - metadataSectionPrefix = []byte{0x0} - merkleChainTimeKey = append(metadataSectionPrefix, []byte{0x0}...) - merkleLastAcceptedBlkIDKey = append(metadataSectionPrefix, []byte{0x1}...) - merkleSuppliesPrefix = append(metadataSectionPrefix, []byte{0x2}...) - - permissionedSubnetSectionPrefix = []byte{0x1} - elasticSubnetSectionPrefix = []byte{0x2} - chainsSectionPrefix = []byte{0x3} - utxosSectionPrefix = []byte{0x4} - rewardUtxosSectionPrefix = []byte{0x5} - currentStakersSectionPrefix = []byte{0x6} - pendingStakersSectionPrefix = []byte{0x7} - delegateeRewardsPrefix = []byte{0x8} + metadataSectionPrefix = byte(0x00) + merkleChainTimeKey = []byte{metadataSectionPrefix, 0x00} + merkleLastAcceptedBlkIDKey = []byte{metadataSectionPrefix, 0x01} + merkleSuppliesPrefix = []byte{metadataSectionPrefix, 0x02} + + permissionedSubnetSectionPrefix = []byte{0x01} + elasticSubnetSectionPrefix = []byte{0x02} + chainsSectionPrefix = []byte{0x03} + utxosSectionPrefix = []byte{0x04} + rewardUtxosSectionPrefix = []byte{0x05} + currentStakersSectionPrefix = []byte{0x06} + pendingStakersSectionPrefix = []byte{0x07} + delegateeRewardsPrefix = []byte{0x08} ) func NewMerkleState( From 0448de9c081e9abff534d7feede7b45c1b4e11f2 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 24 Oct 2023 15:57:54 -0400 Subject: [PATCH 085/132] getTXs --> getTxs --- vms/platformvm/state/merkle_state.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index dd1565fb33d8..decdf3b5bda1 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -1068,7 +1068,7 @@ func (ms *merkleState) write(updateValidators bool, height uint64) error { errs.Add( ms.writeMerkleState(currentData, pendingData), ms.writeBlocks(), - ms.writeTXs(), + ms.writeTxs(), ms.writeLocalUptimes(), ms.writeWeightDiffs(height, weightDiffs), ms.writeBlsKeyDiffs(height, blsKeyDiffs), @@ -1511,7 +1511,7 @@ func (ms *merkleState) writeBlocks() error { return nil } -func (ms *merkleState) writeTXs() error { +func (ms *merkleState) writeTxs() error { for txID, txStatus := range ms.addedTxs { txID := txID From c2321132660ec1097056f300f0b7b0e13dac4d8e Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 24 Oct 2023 16:02:27 -0400 Subject: [PATCH 086/132] remove merkleUtxoIndexPrefix --- vms/platformvm/state/merkle_state.go | 3 ++- vms/platformvm/state/merkle_state_helpers.go | 11 +++-------- 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index decdf3b5bda1..4e369df56ed6 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -12,6 +12,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" + "golang.org/x/exp/slices" "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/cache/metercacher" @@ -491,7 +492,7 @@ func (ms *merkleState) GetUTXO(utxoID ids.ID) (*avax.UTXO, error) { func (ms *merkleState) UTXOIDs(addr []byte, start ids.ID, limit int) ([]ids.ID, error) { var ( - prefix = merkleUtxoIndexPrefix(addr) + prefix = slices.Clone(addr) key = merkleUtxoIndexKey(addr, start) ) diff --git a/vms/platformvm/state/merkle_state_helpers.go b/vms/platformvm/state/merkle_state_helpers.go index 4c13f4aa48c9..6fc9c88b2d09 100644 --- a/vms/platformvm/state/merkle_state_helpers.go +++ b/vms/platformvm/state/merkle_state_helpers.go @@ -95,15 +95,10 @@ func merkleRewardUtxoIDKey(txID, utxoID ids.ID) []byte { return key } -func merkleUtxoIndexPrefix(address []byte) []byte { - prefix := make([]byte, len(address)) - copy(prefix, address) - return prefix -} - func merkleUtxoIndexKey(address []byte, utxoID ids.ID) []byte { - key := merkleUtxoIndexPrefix(address) - key = append(key, utxoID[:]...) + key := make([]byte, len(address)+ids.IDLen) + copy(key, address) + copy(key[len(address):], utxoID[:]) return key } From 04b63489694a0a3248d38dcf303a1051e8a8c742 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 24 Oct 2023 16:19:53 -0400 Subject: [PATCH 087/132] typo fix; cleanup context --- vms/platformvm/state/merkle_state.go | 17 +++++------- vms/platformvm/state/merkle_state_helpers.go | 27 ++++++++++---------- vms/platformvm/state/state_test.go | 2 +- 3 files changed, 22 insertions(+), 24 deletions(-) diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index 4e369df56ed6..50648249fa37 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -88,7 +88,7 @@ func NewMerkleState( rewards reward.Calculator, bootstrapped *utils.Atomic[bool], ) (State, error) { - res, err := newMerklsState( + res, err := newMerkleState( rawDB, metrics, cfg, @@ -111,7 +111,7 @@ func NewMerkleState( return res, nil } -func newMerklsState( +func newMerkleState( rawDB database.Database, metrics metrics.Metrics, cfg *config.Config, @@ -134,13 +134,12 @@ func newMerklsState( flatValidatorPublicKeyDiffsDB = prefixdb.New(merkleBlsKeyDiffPrefix, baseDB) ) - traceCtx := context.TODO() noOpTracer, err := trace.New(trace.Config{Enabled: false}) if err != nil { return nil, fmt.Errorf("failed creating noOpTraces: %w", err) } - merkleDB, err := merkledb.New(traceCtx, baseMerkleDB, merkledb.Config{ + merkleDB, err := merkledb.New(context.TODO(), baseMerkleDB, merkledb.Config{ HistoryLength: HistoryLength, ValueNodeCacheSize: valueNodeCacheSize, IntermediateNodeCacheSize: intermediateNodeCacheSize, @@ -1262,12 +1261,11 @@ func (ms *merkleState) writeMerkleState(currentData, pendingData map[ids.ID]*sta return nil } - ctx := context.TODO() - view, err := ms.merkleDB.NewView(ctx, merkledb.ViewChanges{BatchOps: batchOps}) + view, err := ms.merkleDB.NewView(context.TODO(), merkledb.ViewChanges{BatchOps: batchOps}) if err != nil { return fmt.Errorf("failed creating merkleDB view: %w", err) } - if err := view.CommitToDB(ctx); err != nil { + if err := view.CommitToDB(context.TODO()); err != nil { return fmt.Errorf("failed committing merkleDB view: %w", err) } return ms.logMerkleRoot(len(batchOps) != 0) @@ -1690,12 +1688,11 @@ func (ms *merkleState) logMerkleRoot(hasChanges bool) error { return nil } - ctx := context.TODO() - view, err := ms.merkleDB.NewView(ctx, merkledb.ViewChanges{}) + view, err := ms.merkleDB.NewView(context.TODO(), merkledb.ViewChanges{}) if err != nil { return fmt.Errorf("failed creating merkleDB view: %w", err) } - root, err := view.GetMerkleRoot(ctx) + root, err := view.GetMerkleRoot(context.TODO()) if err != nil { return fmt.Errorf("failed pulling merkle root: %w", err) } diff --git a/vms/platformvm/state/merkle_state_helpers.go b/vms/platformvm/state/merkle_state_helpers.go index 6fc9c88b2d09..2da94797ce35 100644 --- a/vms/platformvm/state/merkle_state_helpers.go +++ b/vms/platformvm/state/merkle_state_helpers.go @@ -83,9 +83,9 @@ func merkleUtxoIDKey(utxoID ids.ID) []byte { } func merkleRewardUtxosIDPrefix(txID ids.ID) []byte { - prefix := make([]byte, len(rewardUtxosSectionPrefix), len(rewardUtxosSectionPrefix)+len(txID)) + prefix := make([]byte, len(rewardUtxosSectionPrefix)+len(txID)) copy(prefix, rewardUtxosSectionPrefix) - prefix = append(prefix, txID[:]...) + copy(prefix[len(rewardUtxosSectionPrefix):], txID[:]) return prefix } @@ -103,38 +103,39 @@ func merkleUtxoIndexKey(address []byte, utxoID ids.ID) []byte { } func splitUtxoIndexKey(b []byte) ([]byte, ids.ID) { - utxoID := ids.Empty - address := make([]byte, len(b)-len(utxoID)) + address := make([]byte, len(b)-ids.IDLen) copy(address, b[:len(address)]) + + utxoID := ids.Empty copy(utxoID[:], b[len(address):]) return address, utxoID } func merkleLocalUptimesKey(nodeID ids.NodeID, subnetID ids.ID) []byte { - key := make([]byte, len(nodeID), len(nodeID)+len(subnetID)) + key := make([]byte, len(nodeID)+len(subnetID)) copy(key, nodeID[:]) - key = append(key, subnetID[:]...) + copy(key[ids.NodeIDLen:], subnetID[:]) return key } func merkleCurrentStakersKey(txID ids.ID) []byte { - key := make([]byte, len(currentStakersSectionPrefix), len(currentStakersSectionPrefix)+len(txID)) + key := make([]byte, len(currentStakersSectionPrefix)+len(txID)) copy(key, currentStakersSectionPrefix) - key = append(key, txID[:]...) + copy(key[len(currentStakersSectionPrefix):], txID[:]) return key } func merklePendingStakersKey(txID ids.ID) []byte { - key := make([]byte, len(pendingStakersSectionPrefix), len(pendingStakersSectionPrefix)+len(txID)) + key := make([]byte, len(pendingStakersSectionPrefix)+len(txID)) copy(key, pendingStakersSectionPrefix) - key = append(key, txID[:]...) + copy(key[len(pendingStakersSectionPrefix):], txID[:]) return key } func merkleDelegateeRewardsKey(nodeID ids.NodeID, subnetID ids.ID) []byte { - key := make([]byte, len(delegateeRewardsPrefix), len(delegateeRewardsPrefix)+len(nodeID)+len(subnetID)) + key := make([]byte, len(delegateeRewardsPrefix)+len(nodeID)+len(subnetID)) copy(key, delegateeRewardsPrefix) - key = append(key, nodeID[:]...) - key = append(key, subnetID[:]...) + copy(key[len(delegateeRewardsPrefix):], nodeID[:]) + copy(key[len(delegateeRewardsPrefix)+ids.NodeIDLen:], subnetID[:]) return key } diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index 7f0af2c18287..8776c0767782 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -164,7 +164,7 @@ func newStateFromDB(require *require.Assertions, db database.Database) State { _ = vdrs.Add(constants.PrimaryNetworkID, primaryVdrs) execCfg, _ := config.GetExecutionConfig(nil) - state, err := newMerklsState( + state, err := newMerkleState( db, metrics.Noop, &config.Config{ From 6a22584c6661f75efdedf85e582ab09cf4ad6e20 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 24 Oct 2023 16:34:49 -0400 Subject: [PATCH 088/132] reduce allocations --- vms/platformvm/state/merkle_state_helpers.go | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/vms/platformvm/state/merkle_state_helpers.go b/vms/platformvm/state/merkle_state_helpers.go index 2da94797ce35..2f9439bfdb83 100644 --- a/vms/platformvm/state/merkle_state_helpers.go +++ b/vms/platformvm/state/merkle_state_helpers.go @@ -28,15 +28,10 @@ type weightDiffKey struct { nodeID ids.NodeID } -func merkleSuppliesKeyPrefix() []byte { - prefix := make([]byte, len(merkleSuppliesPrefix)) - copy(prefix, merkleSuppliesPrefix) - return prefix -} - func merkleSuppliesKey(subnetID ids.ID) []byte { - key := merkleSuppliesKeyPrefix() - key = append(key, subnetID[:]...) + key := make([]byte, len(merkleSuppliesPrefix)+ids.IDLen) + copy(key, merkleSuppliesPrefix) + copy(key[len(merkleSuppliesPrefix):], subnetID[:]) return key } From 801e2eae56f0d46c6730618326aff5667c0acec6 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Wed, 25 Oct 2023 12:19:53 +0200 Subject: [PATCH 089/132] fixed merge --- vms/platformvm/state/stakers_helpers_test.go | 5 +---- vms/platformvm/state/stakers_model_storage_test.go | 11 +++++------ 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/vms/platformvm/state/stakers_helpers_test.go b/vms/platformvm/state/stakers_helpers_test.go index a12171251d78..f20b55bc7110 100644 --- a/vms/platformvm/state/stakers_helpers_test.go +++ b/vms/platformvm/state/stakers_helpers_test.go @@ -92,13 +92,10 @@ func buildChainState(baseDB database.Database, trackedSubnets []ids.ID) (State, } func defaultConfig() *config.Config { - vdrs := validators.NewManager() - primaryVdrs := validators.NewSet() - _ = vdrs.Add(constants.PrimaryNetworkID, primaryVdrs) return &config.Config{ Chains: chains.TestManager, UptimeLockedCalculator: uptime.NewLockedCalculator(), - Validators: vdrs, + Validators: validators.NewManager(), TxFee: defaultTxFee, CreateSubnetTxFee: 100 * defaultTxFee, CreateBlockchainTxFee: 100 * defaultTxFee, diff --git a/vms/platformvm/state/stakers_model_storage_test.go b/vms/platformvm/state/stakers_model_storage_test.go index e17fbe96ac4c..10e3296d91bf 100644 --- a/vms/platformvm/state/stakers_model_storage_test.go +++ b/vms/platformvm/state/stakers_model_storage_test.go @@ -901,13 +901,12 @@ func checkValidatorSetContent(res commands.Result) bool { sysIt.Release() for subnetID, nodes := range valContent { - vals, found := valSet.Get(subnetID) - if !found { - return false - } for nodeID, weight := range nodes { - valWeight := vals.GetWeight(nodeID) - if weight != valWeight { + val, found := valSet.GetValidator(subnetID, nodeID) + if !found { + return false + } + if weight != val.Weight { return false } } From 98fed7b53546a0aafc85958226cc56ee6503f239 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Fri, 27 Oct 2023 14:08:37 -0400 Subject: [PATCH 090/132] appease linter --- vms/platformvm/state/merkle_state.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index bd692086dcfc..6c1cf7dbaecf 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -1396,7 +1396,7 @@ func (ms *merkleState) writePermissionedSubnets(batchOps *[]database.BatchOp) er return nil } -func (ms *merkleState) writeSubnetOwners(batchOps *[]database.BatchOp) error { //nolint:golint,unparam +func (ms *merkleState) writeSubnetOwners(batchOps *[]database.BatchOp) error { for subnetID, owner := range ms.subnetOwners { subnetID := subnetID owner := owner From 50bd0a57094bc086d526b516e5bd1de2d9a7d2be Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Fri, 27 Oct 2023 14:27:33 -0400 Subject: [PATCH 091/132] fix getSubnetOwners --- vms/platformvm/state/merkle_state.go | 31 ++++++++++---------- vms/platformvm/state/merkle_state_helpers.go | 7 +++++ 2 files changed, 22 insertions(+), 16 deletions(-) diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index 6c1cf7dbaecf..a43521fafd7f 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -12,6 +12,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" + "golang.org/x/exp/maps" "golang.org/x/exp/slices" "github.com/ava-labs/avalanchego/cache" @@ -52,16 +53,15 @@ const ( var ( _ State = (*merkleState)(nil) - merkleStatePrefix = []byte{0x00} - merkleSingletonPrefix = []byte{0x01} - merkleBlockPrefix = []byte{0x02} - merkleBlockIDsPrefix = []byte{0x03} - merkleTxPrefix = []byte{0x04} - merkleIndexUTXOsPrefix = []byte{0x05} // to serve UTXOIDs(addr) - merkleUptimesPrefix = []byte{0x06} // locally measured uptimes - merkleWeightDiffPrefix = []byte{0x07} // non-merklelized validators weight diff. TODO: should we merklelize them? - merkleBlsKeyDiffPrefix = []byte{0x08} - merkleSubnetOwnerPrefix = []byte{0x09} + merkleStatePrefix = []byte{0x00} + merkleSingletonPrefix = []byte{0x01} + merkleBlockPrefix = []byte{0x02} + merkleBlockIDsPrefix = []byte{0x03} + merkleTxPrefix = []byte{0x04} + merkleIndexUTXOsPrefix = []byte{0x05} // to serve UTXOIDs(addr) + merkleUptimesPrefix = []byte{0x06} // locally measured uptimes + merkleWeightDiffPrefix = []byte{0x07} // non-merklelized validators weight diff. TODO: should we merklelize them? + merkleBlsKeyDiffPrefix = []byte{0x08} // merkle db sections metadataSectionPrefix = byte(0x00) @@ -77,6 +77,7 @@ var ( currentStakersSectionPrefix = []byte{0x06} pendingStakersSectionPrefix = []byte{0x07} delegateeRewardsPrefix = []byte{0x08} + subnetOwnersPrefix = []byte{0x09} ) func NewMerkleState( @@ -134,7 +135,6 @@ func newMerkleState( localUptimesDB = prefixdb.New(merkleUptimesPrefix, baseDB) flatValidatorWeightDiffsDB = prefixdb.New(merkleWeightDiffPrefix, baseDB) flatValidatorPublicKeyDiffsDB = prefixdb.New(merkleBlsKeyDiffPrefix, baseDB) - subnetOwnerDB = prefixdb.New(merkleSubnetOwnerPrefix, baseDB) ) noOpTracer, err := trace.New(trace.Config{Enabled: false}) @@ -254,7 +254,6 @@ func newMerkleState( suppliesCache: suppliesCache, subnetOwners: make(map[ids.ID]fx.Owner), - subnetOwnerDB: subnetOwnerDB, subnetOwnerCache: subnetOwnerCache, addedPermissionedSubnets: make([]*txs.Tx, 0), @@ -326,7 +325,6 @@ type merkleState struct { // Subnet ID --> Owner of the subnet subnetOwners map[ids.ID]fx.Owner subnetOwnerCache cache.Cacher[ids.ID, fxOwnerAndSize] // cache of subnetID -> owner if the entry is nil, it is not in the database - subnetOwnerDB database.Database addedPermissionedSubnets []*txs.Tx // added SubnetTxs, waiting to be committed permissionedSubnetCache []*txs.Tx // nil if the subnets haven't been loaded @@ -682,7 +680,8 @@ func (ms *merkleState) GetSubnetOwner(subnetID ids.ID) (fx.Owner, error) { return ownerAndSize.owner, nil } - ownerBytes, err := ms.subnetOwnerDB.Get(subnetID[:]) + subnetIDKey := merkleSubnetOwnersKey(subnetID) + ownerBytes, err := ms.merkleDB.Get(subnetIDKey[:]) if err == nil { var owner fx.Owner if _, err := block.GenesisCodec.Unmarshal(ownerBytes, &owner); err != nil { @@ -1412,13 +1411,13 @@ func (ms *merkleState) writeSubnetOwners(batchOps *[]database.BatchOp) error { size: len(ownerBytes), }) - key := merklePermissionedSubnetKey(subnetID) + key := merkleSubnetOwnersKey(subnetID) *batchOps = append(*batchOps, database.BatchOp{ Key: key, Value: ownerBytes, }) } - ms.addedPermissionedSubnets = make([]*txs.Tx, 0) + maps.Clear(ms.subnetOwners) return nil } diff --git a/vms/platformvm/state/merkle_state_helpers.go b/vms/platformvm/state/merkle_state_helpers.go index 2f9439bfdb83..5f4ea3ceeb84 100644 --- a/vms/platformvm/state/merkle_state_helpers.go +++ b/vms/platformvm/state/merkle_state_helpers.go @@ -134,3 +134,10 @@ func merkleDelegateeRewardsKey(nodeID ids.NodeID, subnetID ids.ID) []byte { copy(key[len(delegateeRewardsPrefix)+ids.NodeIDLen:], subnetID[:]) return key } + +func merkleSubnetOwnersKey(subnetID ids.ID) []byte { + key := make([]byte, len(subnetOwnersPrefix)+len(subnetID)) + copy(key, delegateeRewardsPrefix) + copy(key[len(delegateeRewardsPrefix):], subnetID[:]) + return key +} From 926ef62870772ede96d1966c9c6158542ae55456 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Fri, 27 Oct 2023 14:38:03 -0400 Subject: [PATCH 092/132] appease linter --- vms/platformvm/state/merkle_state.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index a43521fafd7f..9cab7dc31857 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -681,7 +681,7 @@ func (ms *merkleState) GetSubnetOwner(subnetID ids.ID) (fx.Owner, error) { } subnetIDKey := merkleSubnetOwnersKey(subnetID) - ownerBytes, err := ms.merkleDB.Get(subnetIDKey[:]) + ownerBytes, err := ms.merkleDB.Get(subnetIDKey) if err == nil { var owner fx.Owner if _, err := block.GenesisCodec.Unmarshal(ownerBytes, &owner); err != nil { From 050d6bb5f8a472ae76b744ed91702f0d25683daf Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Fri, 27 Oct 2023 15:09:37 -0400 Subject: [PATCH 093/132] remove unneeded copy --- vms/platformvm/state/merkle_state.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index 9cab7dc31857..54f938cf52bf 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -1397,9 +1397,7 @@ func (ms *merkleState) writePermissionedSubnets(batchOps *[]database.BatchOp) er func (ms *merkleState) writeSubnetOwners(batchOps *[]database.BatchOp) error { for subnetID, owner := range ms.subnetOwners { - subnetID := subnetID owner := owner - delete(ms.subnetOwners, subnetID) ownerBytes, err := block.GenesisCodec.Marshal(block.Version, &owner) if err != nil { From a0f8e12f0c459ef72d889ffd3c19624523f7534e Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Fri, 27 Oct 2023 15:30:53 -0400 Subject: [PATCH 094/132] bump timeout --- scripts/build_test.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/build_test.sh b/scripts/build_test.sh index 66ddd6b548e4..dc690ef8f20a 100755 --- a/scripts/build_test.sh +++ b/scripts/build_test.sh @@ -8,4 +8,5 @@ AVALANCHE_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) source "$AVALANCHE_PATH"/scripts/constants.sh # Ensure execution of fixture unit tests under tests/ but exclude ginkgo tests in tests/e2e and tests/upgrade -go test -shuffle=on -race -timeout=${TIMEOUT:-"120s"} -coverprofile="coverage.out" -covermode="atomic" $(go list ./... | grep -v /mocks | grep -v proto | grep -v tests/e2e | grep -v tests/upgrade) +# TODO put time back to 120s. Changed to 240s for testing. +go test -shuffle=on -race -timeout=${TIMEOUT:-"240s"} -coverprofile="coverage.out" -covermode="atomic" $(go list ./... | grep -v /mocks | grep -v proto | grep -v tests/e2e | grep -v tests/upgrade) From 25d68878628ac0c107458fb3019d9e9551452bb6 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Fri, 27 Oct 2023 22:47:45 +0200 Subject: [PATCH 095/132] moved reward UTXOs out of merkleDB section --- vms/platformvm/state/merkle_state.go | 159 ++++++++++--------- vms/platformvm/state/merkle_state_helpers.go | 13 -- vms/platformvm/state/merkle_state_test.go | 17 -- 3 files changed, 83 insertions(+), 106 deletions(-) diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index 54f938cf52bf..1fd29660f34b 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -18,6 +18,7 @@ import ( "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/cache/metercacher" "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/linkeddb" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" @@ -53,15 +54,16 @@ const ( var ( _ State = (*merkleState)(nil) - merkleStatePrefix = []byte{0x00} - merkleSingletonPrefix = []byte{0x01} - merkleBlockPrefix = []byte{0x02} - merkleBlockIDsPrefix = []byte{0x03} - merkleTxPrefix = []byte{0x04} - merkleIndexUTXOsPrefix = []byte{0x05} // to serve UTXOIDs(addr) - merkleUptimesPrefix = []byte{0x06} // locally measured uptimes - merkleWeightDiffPrefix = []byte{0x07} // non-merklelized validators weight diff. TODO: should we merklelize them? - merkleBlsKeyDiffPrefix = []byte{0x08} + merkleStatePrefix = []byte{0x00} + merkleSingletonPrefix = []byte{0x01} + merkleBlockPrefix = []byte{0x02} + merkleBlockIDsPrefix = []byte{0x03} + merkleTxPrefix = []byte{0x04} + merkleIndexUTXOsPrefix = []byte{0x05} // to serve UTXOIDs(addr) + merkleUptimesPrefix = []byte{0x06} // locally measured uptimes + merkleWeightDiffPrefix = []byte{0x07} // non-merklelized validators weight diff. TODO: should we merklelize them? + merkleBlsKeyDiffPrefix = []byte{0x08} + merkleRewardUtxosPrefix = []byte{0x09} // merkle db sections metadataSectionPrefix = byte(0x00) @@ -73,11 +75,10 @@ var ( elasticSubnetSectionPrefix = []byte{0x02} chainsSectionPrefix = []byte{0x03} utxosSectionPrefix = []byte{0x04} - rewardUtxosSectionPrefix = []byte{0x05} - currentStakersSectionPrefix = []byte{0x06} - pendingStakersSectionPrefix = []byte{0x07} - delegateeRewardsPrefix = []byte{0x08} - subnetOwnersPrefix = []byte{0x09} + currentStakersSectionPrefix = []byte{0x05} + pendingStakersSectionPrefix = []byte{0x06} + delegateeRewardsPrefix = []byte{0x07} + subnetOwnersPrefix = []byte{0x08} ) func NewMerkleState( @@ -135,6 +136,7 @@ func newMerkleState( localUptimesDB = prefixdb.New(merkleUptimesPrefix, baseDB) flatValidatorWeightDiffsDB = prefixdb.New(merkleWeightDiffPrefix, baseDB) flatValidatorPublicKeyDiffsDB = prefixdb.New(merkleBlsKeyDiffPrefix, baseDB) + rewardUTXOsDB = prefixdb.New(merkleRewardUtxosPrefix, baseDB) ) noOpTracer, err := trace.New(trace.Config{Enabled: false}) @@ -245,10 +247,8 @@ func newMerkleState( delegateeRewardCache: make(map[ids.NodeID]map[ids.ID]uint64), modifiedDelegateeReward: make(map[ids.NodeID]set.Set[ids.ID]), - modifiedUTXOs: make(map[ids.ID]*avax.UTXO), - utxoCache: &cache.LRU[ids.ID, *avax.UTXO]{Size: utxoCacheSize}, - addedRewardUTXOs: make(map[ids.ID][]*avax.UTXO), - rewardUTXOsCache: rewardUTXOsCache, + modifiedUTXOs: make(map[ids.ID]*avax.UTXO), + utxoCache: &cache.LRU[ids.ID, *avax.UTXO]{Size: utxoCacheSize}, modifiedSupplies: make(map[ids.ID]uint64), suppliesCache: suppliesCache, @@ -284,6 +284,10 @@ func newMerkleState( flatValidatorWeightDiffsDB: flatValidatorWeightDiffsDB, flatValidatorPublicKeyDiffsDB: flatValidatorPublicKeyDiffsDB, + + addedRewardUTXOs: make(map[ids.ID][]*avax.UTXO), + rewardUTXOsCache: rewardUTXOsCache, + rewardUTXOsDB: rewardUTXOsDB, }, nil } @@ -311,9 +315,6 @@ type merkleState struct { modifiedUTXOs map[ids.ID]*avax.UTXO // map of UTXO ID -> *UTXO utxoCache cache.Cacher[ids.ID, *avax.UTXO] // UTXO ID -> *UTXO. If the *UTXO is nil the UTXO doesn't exist - addedRewardUTXOs map[ids.ID][]*avax.UTXO // map of txID -> []*UTXO - rewardUTXOsCache cache.Cacher[ids.ID, []*avax.UTXO] // txID -> []*UTXO - // Metadata section chainTime, latestComittedChainTime time.Time lastAcceptedBlkID, latestCommittedLastAcceptedBlkID ids.ID @@ -360,6 +361,11 @@ type merkleState struct { flatValidatorWeightDiffsDB database.Database flatValidatorPublicKeyDiffsDB database.Database + + // Reward UTXOs section + addedRewardUTXOs map[ids.ID][]*avax.UTXO // map of txID -> []*UTXO + rewardUTXOsCache cache.Cacher[ids.ID, []*avax.UTXO] // txID -> []*UTXO + rewardUTXOsDB database.Database } // STAKERS section @@ -544,40 +550,6 @@ func (ms *merkleState) DeleteUTXO(utxoID ids.ID) { ms.modifiedUTXOs[utxoID] = nil } -func (ms *merkleState) GetRewardUTXOs(txID ids.ID) ([]*avax.UTXO, error) { - if utxos, exists := ms.addedRewardUTXOs[txID]; exists { - return utxos, nil - } - if utxos, exists := ms.rewardUTXOsCache.Get(txID); exists { - return utxos, nil - } - - utxos := make([]*avax.UTXO, 0) - - prefix := merkleRewardUtxosIDPrefix(txID) - - it := ms.merkleDB.NewIteratorWithPrefix(prefix) - defer it.Release() - - for it.Next() { - utxo := &avax.UTXO{} - if _, err := txs.Codec.Unmarshal(it.Value(), utxo); err != nil { - return nil, err - } - utxos = append(utxos, utxo) - } - if err := it.Error(); err != nil { - return nil, err - } - - ms.rewardUTXOsCache.Put(txID, utxos) - return utxos, nil -} - -func (ms *merkleState) AddRewardUTXO(txID ids.ID, utxo *avax.UTXO) { - ms.addedRewardUTXOs[txID] = append(ms.addedRewardUTXOs[txID], utxo) -} - // METADATA Section func (ms *merkleState) GetTimestamp() time.Time { return ms.chainTime @@ -975,6 +947,40 @@ func (ms *merkleState) GetStartTime(nodeID ids.NodeID, subnetID ids.ID) (time.Ti return staker.StartTime, nil } +// REWARD UTXOs SECTION +func (ms *merkleState) GetRewardUTXOs(txID ids.ID) ([]*avax.UTXO, error) { + if utxos, exists := ms.addedRewardUTXOs[txID]; exists { + return utxos, nil + } + if utxos, exists := ms.rewardUTXOsCache.Get(txID); exists { + return utxos, nil + } + + rawTxDB := prefixdb.New(txID[:], ms.rewardUTXOsDB) + txDB := linkeddb.NewDefault(rawTxDB) + it := txDB.NewIterator() + defer it.Release() + + utxos := []*avax.UTXO(nil) + for it.Next() { + utxo := &avax.UTXO{} + if _, err := txs.Codec.Unmarshal(it.Value(), utxo); err != nil { + return nil, err + } + utxos = append(utxos, utxo) + } + if err := it.Error(); err != nil { + return nil, err + } + + ms.rewardUTXOsCache.Put(txID, utxos) + return utxos, nil +} + +func (ms *merkleState) AddRewardUTXO(txID ids.ID, utxo *avax.UTXO) { + ms.addedRewardUTXOs[txID] = append(ms.addedRewardUTXOs[txID], utxo) +} + // VALIDATORS Section func (ms *merkleState) ApplyCurrentValidators(subnetID ids.ID, vdrs validators.Manager) error { for nodeID, validator := range ms.currentStakers.validators[subnetID] { @@ -1144,6 +1150,7 @@ func (ms *merkleState) write(updateValidators bool, height uint64) error { ms.writeLocalUptimes(), ms.writeWeightDiffs(height, weightDiffs), ms.writeBlsKeyDiffs(height, blsKeyDiffs), + ms.writeRewardUTXOs(), ms.updateValidatorSet(updateValidators, valSetDiff, weightDiffs), ) return errs.Err @@ -1323,7 +1330,6 @@ func (ms *merkleState) writeMerkleState(currentData, pendingData map[ids.ID]*sta ms.writePendingStakers(&batchOps, pendingData), ms.writeDelegateeRewards(&batchOps), ms.writeUTXOs(&batchOps), - ms.writeRewardUTXOs(&batchOps), ) if errs.Err != nil { return errs.Err @@ -1544,26 +1550,6 @@ func (ms *merkleState) writeUTXOs(batchOps *[]database.BatchOp) error { return nil } -func (ms *merkleState) writeRewardUTXOs(batchOps *[]database.BatchOp) error { - for txID, utxos := range ms.addedRewardUTXOs { - delete(ms.addedRewardUTXOs, txID) - ms.rewardUTXOsCache.Put(txID, utxos) - for _, utxo := range utxos { - utxoBytes, err := txs.GenesisCodec.Marshal(txs.Version, utxo) - if err != nil { - return fmt.Errorf("failed to serialize reward UTXO: %w", err) - } - - key := merkleRewardUtxoIDKey(txID, utxo.InputID()) - *batchOps = append(*batchOps, database.BatchOp{ - Key: key, - Value: utxoBytes, - }) - } - } - return nil -} - func (ms *merkleState) writeDelegateeRewards(batchOps *[]database.BatchOp) error { //nolint:golint,unparam for nodeID, nodeDelegateeRewards := range ms.modifiedDelegateeReward { nodeDelegateeRewardsList := nodeDelegateeRewards.List() @@ -1711,6 +1697,27 @@ func (ms *merkleState) writeBlsKeyDiffs(height uint64, blsKeyDiffs map[ids.NodeI return nil } +func (ms *merkleState) writeRewardUTXOs() error { + for txID, utxos := range ms.addedRewardUTXOs { + delete(ms.addedRewardUTXOs, txID) + ms.rewardUTXOsCache.Put(txID, utxos) + rawTxDB := prefixdb.New(txID[:], ms.rewardUTXOsDB) + txDB := linkeddb.NewDefault(rawTxDB) + + for _, utxo := range utxos { + utxoBytes, err := txs.GenesisCodec.Marshal(txs.Version, utxo) + if err != nil { + return fmt.Errorf("failed to serialize reward UTXO: %w", err) + } + utxoID := utxo.InputID() + if err := txDB.Put(utxoID[:], utxoBytes); err != nil { + return fmt.Errorf("failed to add reward UTXO: %w", err) + } + } + } + return nil +} + func (ms *merkleState) updateValidatorSet( updateValidators bool, valSetDiff map[weightDiffKey]*diffValidator, diff --git a/vms/platformvm/state/merkle_state_helpers.go b/vms/platformvm/state/merkle_state_helpers.go index 5f4ea3ceeb84..cf8b165e2698 100644 --- a/vms/platformvm/state/merkle_state_helpers.go +++ b/vms/platformvm/state/merkle_state_helpers.go @@ -77,19 +77,6 @@ func merkleUtxoIDKey(utxoID ids.ID) []byte { return key } -func merkleRewardUtxosIDPrefix(txID ids.ID) []byte { - prefix := make([]byte, len(rewardUtxosSectionPrefix)+len(txID)) - copy(prefix, rewardUtxosSectionPrefix) - copy(prefix[len(rewardUtxosSectionPrefix):], txID[:]) - return prefix -} - -func merkleRewardUtxoIDKey(txID, utxoID ids.ID) []byte { - key := merkleRewardUtxosIDPrefix(txID) - key = append(key, utxoID[:]...) - return key -} - func merkleUtxoIndexKey(address []byte, utxoID ids.ID) []byte { key := make([]byte, len(address)+ids.IDLen) copy(key, address) diff --git a/vms/platformvm/state/merkle_state_test.go b/vms/platformvm/state/merkle_state_test.go index 2a70cb81916b..00547c9c8d93 100644 --- a/vms/platformvm/state/merkle_state_test.go +++ b/vms/platformvm/state/merkle_state_test.go @@ -76,23 +76,6 @@ func TestUtxoIDKey(t *testing.T) { require.Equal(utxoID[:], key[len(prefix):]) } -func TestRewardUtxoKey(t *testing.T) { - require := require.New(t) - txID := ids.GenerateTestID() - utxoID := ids.GenerateTestID() - prefix := rewardUtxosSectionPrefix - - keyPrefix := merkleRewardUtxosIDPrefix(txID) - key := merkleRewardUtxoIDKey(txID, utxoID) - - require.Len(keyPrefix, len(prefix)+len(txID[:])) - require.Equal(prefix, key[0:len(prefix)]) - require.Equal(txID[:], keyPrefix[len(prefix):]) - - require.Len(key, len(keyPrefix)+len(utxoID[:])) - require.Equal(utxoID[:], key[len(keyPrefix):]) -} - func TestUtxosIndexKey(t *testing.T) { require := require.New(t) utxoID := ids.GenerateTestID() From 6e5c8aadb15c879711895bff58dc8917a83f3ffb Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Mon, 30 Oct 2023 08:55:43 +0100 Subject: [PATCH 096/132] used utils.Err --- vms/platformvm/state/merkle_state.go | 21 ++++++------------- vms/platformvm/state/merkle_state_load_ops.go | 6 ++---- 2 files changed, 8 insertions(+), 19 deletions(-) diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index 1fd29660f34b..6858d553220f 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -31,7 +31,6 @@ import ( "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/block" "github.com/ava-labs/avalanchego/vms/platformvm/config" @@ -1117,8 +1116,7 @@ func (*merkleState) Checksum() ids.ID { } func (ms *merkleState) Close() error { - errs := wrappers.Errs{} - errs.Add( + return utils.Err( ms.flatValidatorWeightDiffsDB.Close(), ms.flatValidatorPublicKeyDiffsDB.Close(), ms.localUptimesDB.Close(), @@ -1129,7 +1127,6 @@ func (ms *merkleState) Close() error { ms.merkleDB.Close(), ms.baseMerkleDB.Close(), ) - return errs.Err } func (ms *merkleState) write(updateValidators bool, height uint64) error { @@ -1142,8 +1139,7 @@ func (ms *merkleState) write(updateValidators bool, height uint64) error { return err } - errs := wrappers.Errs{} - errs.Add( + return utils.Err( ms.writeMerkleState(currentData, pendingData), ms.writeBlocks(), ms.writeTxs(), @@ -1153,7 +1149,6 @@ func (ms *merkleState) write(updateValidators bool, height uint64) error { ms.writeRewardUTXOs(), ms.updateValidatorSet(updateValidators, valSetDiff, weightDiffs), ) - return errs.Err } func (ms *merkleState) processCurrentStakers() ( @@ -1315,12 +1310,8 @@ func (ms *merkleState) processPendingStakers() (map[ids.ID]*stakersData, error) } func (ms *merkleState) writeMerkleState(currentData, pendingData map[ids.ID]*stakersData) error { - var ( - errs = wrappers.Errs{} - batchOps = make([]database.BatchOp, 0) - ) - - errs.Add( + batchOps := make([]database.BatchOp, 0) + err := utils.Err( ms.writeMetadata(&batchOps), ms.writePermissionedSubnets(&batchOps), ms.writeSubnetOwners(&batchOps), @@ -1331,8 +1322,8 @@ func (ms *merkleState) writeMerkleState(currentData, pendingData map[ids.ID]*sta ms.writeDelegateeRewards(&batchOps), ms.writeUTXOs(&batchOps), ) - if errs.Err != nil { - return errs.Err + if err != nil { + return err } if len(batchOps) == 0 { diff --git a/vms/platformvm/state/merkle_state_load_ops.go b/vms/platformvm/state/merkle_state_load_ops.go index 7f1e97771038..62e243b44311 100644 --- a/vms/platformvm/state/merkle_state_load_ops.go +++ b/vms/platformvm/state/merkle_state_load_ops.go @@ -11,10 +11,10 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/math" - "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/block" "github.com/ava-labs/avalanchego/vms/platformvm/genesis" @@ -152,8 +152,7 @@ func (ms *merkleState) syncGenesis(genesisBlk block.Block, genesis *genesis.Gene // Load pulls data previously stored on disk that is expected to be in memory. func (ms *merkleState) load(hasSynced bool) error { - errs := wrappers.Errs{} - errs.Add( + return utils.Err( ms.loadMerkleMetadata(), ms.loadCurrentStakers(), ms.loadPendingStakers(), @@ -161,7 +160,6 @@ func (ms *merkleState) load(hasSynced bool) error { ms.logMerkleRoot(!hasSynced), // we already logged if sync has happened ) - return errs.Err } func (ms *merkleState) loadMerkleMetadata() error { From 170cfb4b3ad5afbf320142ebfc98aff5ca2ce991 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Mon, 6 Nov 2023 10:01:08 +0100 Subject: [PATCH 097/132] rebased on dev --- scripts/build_test.sh | 3 +- vms/platformvm/block/executor/acceptor.go | 19 +- vms/platformvm/state/diff_test.go | 27 +- vms/platformvm/state/stakers.go | 3 - vms/platformvm/state/stakers_helpers_test.go | 142 --- .../stakers_model_generator_check_test.go | 243 ----- .../state/stakers_model_generator_test.go | 405 ------- vms/platformvm/state/stakers_model_storage.go | 248 ----- .../state/stakers_model_storage_test.go | 994 ------------------ .../state/stakers_properties_test.go | 568 ---------- 10 files changed, 22 insertions(+), 2630 deletions(-) delete mode 100644 vms/platformvm/state/stakers_helpers_test.go delete mode 100644 vms/platformvm/state/stakers_model_generator_check_test.go delete mode 100644 vms/platformvm/state/stakers_model_generator_test.go delete mode 100644 vms/platformvm/state/stakers_model_storage.go delete mode 100644 vms/platformvm/state/stakers_model_storage_test.go delete mode 100644 vms/platformvm/state/stakers_properties_test.go diff --git a/scripts/build_test.sh b/scripts/build_test.sh index dc690ef8f20a..66ddd6b548e4 100755 --- a/scripts/build_test.sh +++ b/scripts/build_test.sh @@ -8,5 +8,4 @@ AVALANCHE_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) source "$AVALANCHE_PATH"/scripts/constants.sh # Ensure execution of fixture unit tests under tests/ but exclude ginkgo tests in tests/e2e and tests/upgrade -# TODO put time back to 120s. Changed to 240s for testing. -go test -shuffle=on -race -timeout=${TIMEOUT:-"240s"} -coverprofile="coverage.out" -covermode="atomic" $(go list ./... | grep -v /mocks | grep -v proto | grep -v tests/e2e | grep -v tests/upgrade) +go test -shuffle=on -race -timeout=${TIMEOUT:-"120s"} -coverprofile="coverage.out" -covermode="atomic" $(go list ./... | grep -v /mocks | grep -v proto | grep -v tests/e2e | grep -v tests/upgrade) diff --git a/vms/platformvm/block/executor/acceptor.go b/vms/platformvm/block/executor/acceptor.go index 941dbff2d17c..c66440aa83fe 100644 --- a/vms/platformvm/block/executor/acceptor.go +++ b/vms/platformvm/block/executor/acceptor.go @@ -81,11 +81,7 @@ func (a *acceptor) ApricotAtomicBlock(b *block.ApricotAtomicBlock) error { // Update the state to reflect the changes made in [onAcceptState]. if err := blkState.onAcceptState.Apply(a.state); err != nil { - return fmt.Errorf( - "failed to apply accept state for block %s: %w", - blkID, - err, - ) + return err } defer a.state.Abort() @@ -180,13 +176,8 @@ func (a *acceptor) optionBlock(b, parent block.Block, blockType string) error { if !ok { return fmt.Errorf("%w %s", errMissingBlockState, blkID) } - if err := blkState.onAcceptState.Apply(a.state); err != nil { - return fmt.Errorf( - "failed to apply accept state for block %s: %w", - blkID, - err, - ) + return err } if err := a.state.Commit(); err != nil { @@ -250,11 +241,7 @@ func (a *acceptor) standardBlock(b block.Block, blockType string) error { // Update the state to reflect the changes made in [onAcceptState]. if err := blkState.onAcceptState.Apply(a.state); err != nil { - return fmt.Errorf( - "failed to apply accept state for block %s: %w", - blkID, - err, - ) + return err } defer a.state.Abort() diff --git a/vms/platformvm/state/diff_test.go b/vms/platformvm/state/diff_test.go index b2e3f113fc47..c35fb925594b 100644 --- a/vms/platformvm/state/diff_test.go +++ b/vms/platformvm/state/diff_test.go @@ -80,7 +80,8 @@ func TestDiffCurrentValidator(t *testing.T) { lastAcceptedID := ids.GenerateTestID() state := NewMockState(ctrl) - state.EXPECT().GetTimestamp().Return(time.Now()) // Called in NewDiff + // Called in NewDiff + state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) states := NewMockVersions(ctrl) states.EXPECT().GetState(lastAcceptedID).Return(state, true).AnyTimes() @@ -116,7 +117,8 @@ func TestDiffPendingValidator(t *testing.T) { lastAcceptedID := ids.GenerateTestID() state := NewMockState(ctrl) - state.EXPECT().GetTimestamp().Return(time.Now()) // Called in NewDiff + // Called in NewDiff + state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) states := NewMockVersions(ctrl) states.EXPECT().GetState(lastAcceptedID).Return(state, true).AnyTimes() @@ -157,7 +159,8 @@ func TestDiffCurrentDelegator(t *testing.T) { } state := NewMockState(ctrl) - state.EXPECT().GetTimestamp().Return(time.Now()) // Called in NewDiff + // Called in NewDiff + state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) states := NewMockVersions(ctrl) lastAcceptedID := ids.GenerateTestID() @@ -205,7 +208,8 @@ func TestDiffPendingDelegator(t *testing.T) { } state := NewMockState(ctrl) - state.EXPECT().GetTimestamp().Return(time.Now()) // Called in NewDiff + // Called in NewDiff + state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) states := NewMockVersions(ctrl) lastAcceptedID := ids.GenerateTestID() @@ -247,7 +251,8 @@ func TestDiffSubnet(t *testing.T) { ctrl := gomock.NewController(t) state := NewMockState(ctrl) - state.EXPECT().GetTimestamp().Return(time.Now()) // Called in NewDiff + // Called in NewDiff + state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) states := NewMockVersions(ctrl) lastAcceptedID := ids.GenerateTestID() @@ -284,7 +289,8 @@ func TestDiffChain(t *testing.T) { ctrl := gomock.NewController(t) state := NewMockState(ctrl) - state.EXPECT().GetTimestamp().Return(time.Now()) // Called in NewDiff + // Called in NewDiff + state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) states := NewMockVersions(ctrl) lastAcceptedID := ids.GenerateTestID() @@ -322,7 +328,8 @@ func TestDiffTx(t *testing.T) { ctrl := gomock.NewController(t) state := NewMockState(ctrl) - state.EXPECT().GetTimestamp().Return(time.Now()) // Called in NewDiff + // Called in NewDiff + state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) states := NewMockVersions(ctrl) lastAcceptedID := ids.GenerateTestID() @@ -371,7 +378,8 @@ func TestDiffRewardUTXO(t *testing.T) { ctrl := gomock.NewController(t) state := NewMockState(ctrl) - state.EXPECT().GetTimestamp().Return(time.Now()) // Called in NewDiff + // Called in NewDiff + state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) states := NewMockVersions(ctrl) lastAcceptedID := ids.GenerateTestID() @@ -415,7 +423,8 @@ func TestDiffUTXO(t *testing.T) { ctrl := gomock.NewController(t) state := NewMockState(ctrl) - state.EXPECT().GetTimestamp().Return(time.Now()) // Called in NewDiff + // Called in NewDiff + state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) states := NewMockVersions(ctrl) lastAcceptedID := ids.GenerateTestID() diff --git a/vms/platformvm/state/stakers.go b/vms/platformvm/state/stakers.go index 2113979cb42f..5276ff4f8204 100644 --- a/vms/platformvm/state/stakers.go +++ b/vms/platformvm/state/stakers.go @@ -297,7 +297,6 @@ func (s *diffStakers) PutValidator(staker *Staker) { s.addedStakers = btree.NewG(defaultTreeDegree, (*Staker).Less) } s.addedStakers.ReplaceOrInsert(staker) - delete(s.deletedStakers, staker.TxID) } func (s *diffStakers) DeleteValidator(staker *Staker) { @@ -349,13 +348,11 @@ func (s *diffStakers) PutDelegator(staker *Staker) { validatorDiff.addedDelegators = btree.NewG(defaultTreeDegree, (*Staker).Less) } validatorDiff.addedDelegators.ReplaceOrInsert(staker) - delete(validatorDiff.deletedDelegators, staker.TxID) if s.addedStakers == nil { s.addedStakers = btree.NewG(defaultTreeDegree, (*Staker).Less) } s.addedStakers.ReplaceOrInsert(staker) - delete(s.deletedStakers, staker.TxID) } func (s *diffStakers) DeleteDelegator(staker *Staker) { diff --git a/vms/platformvm/state/stakers_helpers_test.go b/vms/platformvm/state/stakers_helpers_test.go deleted file mode 100644 index 4412589b68f6..000000000000 --- a/vms/platformvm/state/stakers_helpers_test.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package state - -import ( - "fmt" - "time" - - "github.com/prometheus/client_golang/prometheus" - - "github.com/ava-labs/avalanchego/chains" - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/uptime" - "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/formatting" - "github.com/ava-labs/avalanchego/utils/json" - "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/vms/platformvm/api" - "github.com/ava-labs/avalanchego/vms/platformvm/config" - "github.com/ava-labs/avalanchego/vms/platformvm/metrics" - "github.com/ava-labs/avalanchego/vms/platformvm/reward" -) - -var ( - _ Versions = (*versionsHolder)(nil) - - xChainID = ids.Empty.Prefix(0) - cChainID = ids.Empty.Prefix(1) - avaxAssetID = ids.ID{'y', 'e', 'e', 't'} - - defaultMinStakingDuration = 24 * time.Hour - defaultMaxStakingDuration = 365 * 24 * time.Hour - defaultGenesisTime = time.Date(1997, 1, 1, 0, 0, 0, 0, time.UTC) - defaultValidateStartTime = defaultGenesisTime - defaultValidateEndTime = defaultValidateStartTime.Add(10 * defaultMinStakingDuration) - defaultTxFee = uint64(100) -) - -type stakerStatus int - -type versionsHolder struct { - baseState State -} - -func (h *versionsHolder) GetState(blkID ids.ID) (Chain, bool) { - return h.baseState, blkID == h.baseState.GetLastAccepted() -} - -func buildStateCtx() *snow.Context { - ctx := snow.DefaultContextTest() - ctx.NetworkID = constants.UnitTestID - ctx.XChainID = xChainID - ctx.CChainID = cChainID - ctx.AVAXAssetID = avaxAssetID - - return ctx -} - -func buildChainState(baseDB database.Database, trackedSubnets []ids.ID) (State, error) { - cfg := defaultConfig() - cfg.TrackedSubnets.Add(trackedSubnets...) - - execConfig, err := config.GetExecutionConfig(nil) - if err != nil { - return nil, err - } - - ctx := buildStateCtx() - - genesisBytes, err := buildGenesisTest(ctx) - if err != nil { - return nil, err - } - - rewardsCalc := reward.NewCalculator(cfg.RewardConfig) - return NewMerkleState( - baseDB, - genesisBytes, - prometheus.NewRegistry(), - cfg, - execConfig, - ctx, - metrics.Noop, - rewardsCalc, - ) -} - -func defaultConfig() *config.Config { - return &config.Config{ - Chains: chains.TestManager, - UptimeLockedCalculator: uptime.NewLockedCalculator(), - Validators: validators.NewManager(), - TxFee: defaultTxFee, - CreateSubnetTxFee: 100 * defaultTxFee, - CreateBlockchainTxFee: 100 * defaultTxFee, - MinValidatorStake: 5 * units.MilliAvax, - MaxValidatorStake: 500 * units.MilliAvax, - MinDelegatorStake: 1 * units.MilliAvax, - MinStakeDuration: defaultMinStakingDuration, - MaxStakeDuration: defaultMaxStakingDuration, - RewardConfig: reward.Config{ - MaxConsumptionRate: .12 * reward.PercentDenominator, - MinConsumptionRate: .10 * reward.PercentDenominator, - MintingPeriod: defaultMaxStakingDuration, - SupplyCap: 720 * units.MegaAvax, - }, - ApricotPhase3Time: defaultValidateEndTime, - ApricotPhase5Time: defaultValidateEndTime, - BanffTime: defaultValidateEndTime, - CortinaTime: defaultValidateEndTime, - } -} - -func buildGenesisTest(ctx *snow.Context) ([]byte, error) { - buildGenesisArgs := api.BuildGenesisArgs{ - NetworkID: json.Uint32(constants.UnitTestID), - AvaxAssetID: ctx.AVAXAssetID, - UTXOs: nil, // no UTXOs in this genesis. Not relevant to package tests. - Validators: nil, // no validators in this genesis. Tests will handle them. - Chains: nil, - Time: json.Uint64(defaultGenesisTime.Unix()), - InitialSupply: json.Uint64(360 * units.MegaAvax), - Encoding: formatting.Hex, - } - - buildGenesisResponse := api.BuildGenesisReply{} - platformvmSS := api.StaticService{} - if err := platformvmSS.BuildGenesis(nil, &buildGenesisArgs, &buildGenesisResponse); err != nil { - return nil, fmt.Errorf("problem while building platform chain's genesis state: %w", err) - } - - genesisBytes, err := formatting.Decode(buildGenesisResponse.Encoding, buildGenesisResponse.Bytes) - if err != nil { - return nil, err - } - - return genesisBytes, nil -} diff --git a/vms/platformvm/state/stakers_model_generator_check_test.go b/vms/platformvm/state/stakers_model_generator_check_test.go deleted file mode 100644 index 8c841c779e5b..000000000000 --- a/vms/platformvm/state/stakers_model_generator_check_test.go +++ /dev/null @@ -1,243 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package state - -import ( - "errors" - "fmt" - "math" - "testing" - - "github.com/leanovate/gopter" - "github.com/leanovate/gopter/prop" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/vms/platformvm/txs" -) - -var ( - errNotAStakerTx = errors.New("tx is not a stakerTx") - errWrongNodeID = errors.New("unexpected nodeID") -) - -// TestGeneratedStakersValidity tests the staker generator itself. -// It documents and verifies the invariants enforced by the staker generator. -func TestGeneratedStakersValidity(t *testing.T) { - properties := gopter.NewProperties(nil) - - ctx := buildStateCtx() - subnetID := ids.GenerateTestID() - nodeID := ids.GenerateTestNodeID() - maxDelegatorWeight := uint64(2023) - - properties.Property("AddValidatorTx generator checks", prop.ForAll( - func(nonInitTx *txs.Tx) string { - signedTx, err := txs.NewSigned(nonInitTx.Unsigned, txs.Codec, nil) - if err != nil { - panic(fmt.Errorf("failed signing tx, %w", err)) - } - - if err := signedTx.SyntacticVerify(ctx); err != nil { - return err.Error() - } - - addValTx, ok := signedTx.Unsigned.(*txs.AddValidatorTx) - if !ok { - return errNotAStakerTx.Error() - } - - if nodeID != addValTx.NodeID() { - return errWrongNodeID.Error() - } - - currentVal, err := NewCurrentStaker(signedTx.ID(), addValTx, uint64(100)) - if err != nil { - return err.Error() - } - - if currentVal.EndTime.Before(currentVal.StartTime) { - return fmt.Sprintf("startTime %v not before endTime %v, staker %v", - currentVal.StartTime, currentVal.EndTime, currentVal) - } - - pendingVal, err := NewPendingStaker(signedTx.ID(), addValTx) - if err != nil { - return err.Error() - } - - if pendingVal.EndTime.Before(pendingVal.StartTime) { - return fmt.Sprintf("startTime %v not before endTime %v, staker %v", - pendingVal.StartTime, pendingVal.EndTime, pendingVal) - } - - return "" - }, - addValidatorTxGenerator(ctx, &nodeID, math.MaxUint64), - )) - - properties.Property("AddDelegatorTx generator checks", prop.ForAll( - func(nonInitTx *txs.Tx) string { - signedTx, err := txs.NewSigned(nonInitTx.Unsigned, txs.Codec, nil) - if err != nil { - panic(fmt.Errorf("failed signing tx, %w", err)) - } - - if err := signedTx.SyntacticVerify(ctx); err != nil { - return err.Error() - } - - addDelTx, ok := signedTx.Unsigned.(*txs.AddDelegatorTx) - if !ok { - return errNotAStakerTx.Error() - } - - if nodeID != addDelTx.NodeID() { - return errWrongNodeID.Error() - } - - currentDel, err := NewCurrentStaker(signedTx.ID(), addDelTx, uint64(100)) - if err != nil { - return err.Error() - } - - if currentDel.EndTime.Before(currentDel.StartTime) { - return fmt.Sprintf("startTime %v not before endTime %v, staker %v", - currentDel.StartTime, currentDel.EndTime, currentDel) - } - - if currentDel.Weight > maxDelegatorWeight { - return fmt.Sprintf("delegator weight %v above maximum %v, staker %v", - currentDel.Weight, maxDelegatorWeight, currentDel) - } - - pendingDel, err := NewPendingStaker(signedTx.ID(), addDelTx) - if err != nil { - return err.Error() - } - - if pendingDel.EndTime.Before(pendingDel.StartTime) { - return fmt.Sprintf("startTime %v not before endTime %v, staker %v", - pendingDel.StartTime, pendingDel.EndTime, pendingDel) - } - - if pendingDel.Weight > maxDelegatorWeight { - return fmt.Sprintf("delegator weight %v above maximum %v, staker %v", - pendingDel.Weight, maxDelegatorWeight, pendingDel) - } - - return "" - }, - addDelegatorTxGenerator(ctx, &nodeID, maxDelegatorWeight), - )) - - properties.Property("addPermissionlessValidatorTx generator checks", prop.ForAll( - func(nonInitTx *txs.Tx) string { - signedTx, err := txs.NewSigned(nonInitTx.Unsigned, txs.Codec, nil) - if err != nil { - panic(fmt.Errorf("failed signing tx, %w", err)) - } - - if err := signedTx.SyntacticVerify(ctx); err != nil { - return err.Error() - } - - addValTx, ok := signedTx.Unsigned.(*txs.AddPermissionlessValidatorTx) - if !ok { - return errNotAStakerTx.Error() - } - - if nodeID != addValTx.NodeID() { - return errWrongNodeID.Error() - } - - if subnetID != addValTx.SubnetID() { - return "subnet not duly set" - } - - currentVal, err := NewCurrentStaker(signedTx.ID(), addValTx, uint64(100)) - if err != nil { - return err.Error() - } - - if currentVal.EndTime.Before(currentVal.StartTime) { - return fmt.Sprintf("startTime %v not before endTime %v, staker %v", - currentVal.StartTime, currentVal.EndTime, currentVal) - } - - pendingVal, err := NewPendingStaker(signedTx.ID(), addValTx) - if err != nil { - return err.Error() - } - - if pendingVal.EndTime.Before(pendingVal.StartTime) { - return fmt.Sprintf("startTime %v not before endTime %v, staker %v", - pendingVal.StartTime, pendingVal.EndTime, pendingVal) - } - - return "" - }, - addPermissionlessValidatorTxGenerator(ctx, &subnetID, &nodeID, math.MaxUint64), - )) - - properties.Property("addPermissionlessDelegatorTx generator checks", prop.ForAll( - func(nonInitTx *txs.Tx) string { - signedTx, err := txs.NewSigned(nonInitTx.Unsigned, txs.Codec, nil) - if err != nil { - panic(fmt.Errorf("failed signing tx, %w", err)) - } - - if err := signedTx.SyntacticVerify(ctx); err != nil { - return err.Error() - } - - addDelTx, ok := signedTx.Unsigned.(*txs.AddPermissionlessDelegatorTx) - if !ok { - return errNotAStakerTx.Error() - } - - if nodeID != addDelTx.NodeID() { - return errWrongNodeID.Error() - } - - if subnetID != addDelTx.SubnetID() { - return "subnet not duly set" - } - - currentDel, err := NewCurrentStaker(signedTx.ID(), addDelTx, uint64(100)) - if err != nil { - return err.Error() - } - - if currentDel.EndTime.Before(currentDel.StartTime) { - return fmt.Sprintf("startTime %v not before endTime %v, staker %v", - currentDel.StartTime, currentDel.EndTime, currentDel) - } - - if currentDel.Weight > maxDelegatorWeight { - return fmt.Sprintf("delegator weight %v above maximum %v, staker %v", - currentDel.Weight, maxDelegatorWeight, currentDel) - } - - pendingDel, err := NewPendingStaker(signedTx.ID(), addDelTx) - if err != nil { - return err.Error() - } - - if pendingDel.EndTime.Before(pendingDel.StartTime) { - return fmt.Sprintf("startTime %v not before endTime %v, staker %v", - pendingDel.StartTime, pendingDel.EndTime, pendingDel) - } - - if pendingDel.Weight > maxDelegatorWeight { - return fmt.Sprintf("delegator weight %v above maximum %v, staker %v", - pendingDel.Weight, maxDelegatorWeight, pendingDel) - } - - return "" - }, - addPermissionlessDelegatorTxGenerator(ctx, &subnetID, &nodeID, maxDelegatorWeight), - )) - - properties.TestingRun(t) -} diff --git a/vms/platformvm/state/stakers_model_generator_test.go b/vms/platformvm/state/stakers_model_generator_test.go deleted file mode 100644 index add9810069b2..000000000000 --- a/vms/platformvm/state/stakers_model_generator_test.go +++ /dev/null @@ -1,405 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package state - -import ( - "fmt" - "reflect" - "time" - - "github.com/leanovate/gopter" - "github.com/leanovate/gopter/gen" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/vms/components/avax" - "github.com/ava-labs/avalanchego/vms/platformvm/reward" - "github.com/ava-labs/avalanchego/vms/platformvm/signer" - "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/secp256k1fx" - - blst "github.com/supranational/blst/bindings/go" -) - -type generatorPriorityType uint8 - -const ( - permissionlessValidator generatorPriorityType = iota - permissionedValidator - permissionlessDelegator - permissionedDelegator -) - -// stakerTxGenerator helps creating random yet reproducible txs.StakerTx, -// which can be used in our property tests. stakerTxGenerator returns txs.StakerTx -// as the Unsigned attribute of a txs.Tx just to work around the inability of -// generators to return interface. The holding txs.Tx signing is deferred to tests -// to allow them modifying stakers parameters without breaking txID. -// A full txs.StakerTx is returned, instead of a Staker object, in order to extend -// property testing to stakers reload (which starts from the transaction). The tx is filled -// just enough to rebuild staker state (inputs/outputs utxos are neglected). -// TestGeneratedStakersValidity documents and verifies the enforced invariants. -func stakerTxGenerator( - ctx *snow.Context, - priority generatorPriorityType, - subnetID *ids.ID, - nodeID *ids.NodeID, - maxWeight uint64, // helps avoiding overflows in delegator tests -) gopter.Gen { - switch priority { - case permissionedValidator: - return addValidatorTxGenerator(ctx, nodeID, maxWeight) - case permissionedDelegator: - return addDelegatorTxGenerator(ctx, nodeID, maxWeight) - case permissionlessValidator: - return addPermissionlessValidatorTxGenerator(ctx, subnetID, nodeID, maxWeight) - case permissionlessDelegator: - return addPermissionlessDelegatorTxGenerator(ctx, subnetID, nodeID, maxWeight) - default: - panic(fmt.Sprintf("unhandled tx priority %v", priority)) - } -} - -func addPermissionlessValidatorTxGenerator( - ctx *snow.Context, - subnetID *ids.ID, - nodeID *ids.NodeID, - maxWeight uint64, -) gopter.Gen { - return stakerDataGenerator(nodeID, maxWeight).FlatMap( - func(v interface{}) gopter.Gen { - genStakerSubnetID := subnetIDGen - if subnetID != nil { - genStakerSubnetID = gen.Const(*subnetID) - } - - // always return a non-empty bls key here. Will drop it - // below, in txs.Tx generator if needed. - fullBlsKeyGen := gen.SliceOfN(32, gen.UInt8()).FlatMap( - func(v interface{}) gopter.Gen { - bytes := v.([]byte) - sk1 := blst.KeyGen(bytes) - return gen.Const(signer.NewProofOfPossession(sk1)) - }, - reflect.TypeOf(&signer.ProofOfPossession{}), - ) - - stakerData := v.(txs.Validator) - - specificGen := gen.StructPtr(reflect.TypeOf(&txs.AddPermissionlessValidatorTx{}), map[string]gopter.Gen{ - "BaseTx": gen.Const(txs.BaseTx{ - BaseTx: avax.BaseTx{ - NetworkID: ctx.NetworkID, - BlockchainID: ctx.ChainID, - Ins: []*avax.TransferableInput{}, - Outs: []*avax.TransferableOutput{}, - }, - }), - "Validator": gen.Const(stakerData), - "Subnet": genStakerSubnetID, - "Signer": fullBlsKeyGen, - "StakeOuts": gen.Const([]*avax.TransferableOutput{ - { - Asset: avax.Asset{ - ID: ctx.AVAXAssetID, - }, - Out: &secp256k1fx.TransferOutput{ - Amt: stakerData.Weight(), - }, - }, - }), - "ValidatorRewardsOwner": gen.Const( - &secp256k1fx.OutputOwners{ - Addrs: []ids.ShortID{}, - }, - ), - "DelegatorRewardsOwner": gen.Const( - &secp256k1fx.OutputOwners{ - Addrs: []ids.ShortID{}, - }, - ), - "DelegationShares": gen.UInt32Range(0, reward.PercentDenominator), - }) - - return specificGen.FlatMap( - func(v interface{}) gopter.Gen { - stakerTx := v.(*txs.AddPermissionlessValidatorTx) - - // drop Signer if needed - if stakerTx.Subnet != constants.PlatformChainID { - stakerTx.Signer = &signer.Empty{} - } - - if err := stakerTx.SyntacticVerify(ctx); err != nil { - panic(fmt.Errorf("failed syntax verification in tx generator, %w", err)) - } - - // Note: we don't sign the tx here, since we want the freedom to modify - // the stakerTx just before testing while avoid having the wrong txID. - // We use txs.Tx as a box to return a txs.StakerTx interface. - sTx := &txs.Tx{Unsigned: stakerTx} - - return gen.Const(sTx) - }, - reflect.TypeOf(&txs.AddPermissionlessValidatorTx{}), - ) - }, - reflect.TypeOf(&txs.AddPermissionlessValidatorTx{}), - ) -} - -func addValidatorTxGenerator( - ctx *snow.Context, - nodeID *ids.NodeID, - maxWeight uint64, -) gopter.Gen { - return stakerDataGenerator(nodeID, maxWeight).FlatMap( - func(v interface{}) gopter.Gen { - stakerData := v.(txs.Validator) - - specificGen := gen.StructPtr(reflect.TypeOf(&txs.AddValidatorTx{}), map[string]gopter.Gen{ - "BaseTx": gen.Const(txs.BaseTx{ - BaseTx: avax.BaseTx{ - NetworkID: ctx.NetworkID, - BlockchainID: ctx.ChainID, - Ins: []*avax.TransferableInput{}, - Outs: []*avax.TransferableOutput{}, - }, - }), - "Validator": gen.Const(stakerData), - "StakeOuts": gen.Const([]*avax.TransferableOutput{ - { - Asset: avax.Asset{ - ID: ctx.AVAXAssetID, - }, - Out: &secp256k1fx.TransferOutput{ - Amt: stakerData.Weight(), - }, - }, - }), - "RewardsOwner": gen.Const( - &secp256k1fx.OutputOwners{ - Addrs: []ids.ShortID{}, - }, - ), - "DelegationShares": gen.UInt32Range(0, reward.PercentDenominator), - }) - - return specificGen.FlatMap( - func(v interface{}) gopter.Gen { - stakerTx := v.(*txs.AddValidatorTx) - - if err := stakerTx.SyntacticVerify(ctx); err != nil { - panic(fmt.Errorf("failed syntax verification in tx generator, %w", err)) - } - - // Note: we don't sign the tx here, since we want the freedom to modify - // the stakerTx just before testing while avoid having the wrong txID. - // We use txs.Tx as a box to return a txs.StakerTx interface. - sTx := &txs.Tx{Unsigned: stakerTx} - - return gen.Const(sTx) - }, - reflect.TypeOf(&txs.AddValidatorTx{}), - ) - }, - reflect.TypeOf(txs.Validator{}), - ) -} - -func addPermissionlessDelegatorTxGenerator( - ctx *snow.Context, - subnetID *ids.ID, - nodeID *ids.NodeID, - maxWeight uint64, // helps avoiding overflows in delegator tests -) gopter.Gen { - return stakerDataGenerator(nodeID, maxWeight).FlatMap( - func(v interface{}) gopter.Gen { - genStakerSubnetID := subnetIDGen - if subnetID != nil { - genStakerSubnetID = gen.Const(*subnetID) - } - - stakerData := v.(txs.Validator) - delGen := gen.StructPtr(reflect.TypeOf(txs.AddPermissionlessDelegatorTx{}), map[string]gopter.Gen{ - "BaseTx": gen.Const(txs.BaseTx{ - BaseTx: avax.BaseTx{ - NetworkID: ctx.NetworkID, - BlockchainID: ctx.ChainID, - Ins: []*avax.TransferableInput{}, - Outs: []*avax.TransferableOutput{}, - }, - }), - "Validator": gen.Const(stakerData), - "Subnet": genStakerSubnetID, - "StakeOuts": gen.Const([]*avax.TransferableOutput{ - { - Asset: avax.Asset{ - ID: ctx.AVAXAssetID, - }, - Out: &secp256k1fx.TransferOutput{ - Amt: stakerData.Weight(), - }, - }, - }), - "DelegationRewardsOwner": gen.Const( - &secp256k1fx.OutputOwners{ - Addrs: []ids.ShortID{}, - }, - ), - }) - - return delGen.FlatMap( - func(v interface{}) gopter.Gen { - stakerTx := v.(*txs.AddPermissionlessDelegatorTx) - - if err := stakerTx.SyntacticVerify(ctx); err != nil { - panic(fmt.Errorf("failed syntax verification in tx generator, %w", err)) - } - - // Note: we don't sign the tx here, since we want the freedom to modify - // the stakerTx just before testing while avoid having the wrong txID. - // We use txs.Tx as a box to return a txs.StakerTx interface. - sTx := &txs.Tx{Unsigned: stakerTx} - - return gen.Const(sTx) - }, - reflect.TypeOf(&txs.AddPermissionlessDelegatorTx{}), - ) - }, - reflect.TypeOf(txs.Validator{}), - ) -} - -func addDelegatorTxGenerator( - ctx *snow.Context, - nodeID *ids.NodeID, - maxWeight uint64, // helps avoiding overflows in delegator tests -) gopter.Gen { - return stakerDataGenerator(nodeID, maxWeight).FlatMap( - func(v interface{}) gopter.Gen { - stakerData := v.(txs.Validator) - delGen := gen.StructPtr(reflect.TypeOf(txs.AddDelegatorTx{}), map[string]gopter.Gen{ - "BaseTx": gen.Const(txs.BaseTx{ - BaseTx: avax.BaseTx{ - NetworkID: ctx.NetworkID, - BlockchainID: ctx.ChainID, - Ins: []*avax.TransferableInput{}, - Outs: []*avax.TransferableOutput{}, - }, - }), - "Validator": gen.Const(stakerData), - "StakeOuts": gen.Const([]*avax.TransferableOutput{ - { - Asset: avax.Asset{ - ID: ctx.AVAXAssetID, - }, - Out: &secp256k1fx.TransferOutput{ - Amt: stakerData.Weight(), - }, - }, - }), - "DelegationRewardsOwner": gen.Const( - &secp256k1fx.OutputOwners{ - Addrs: []ids.ShortID{}, - }, - ), - }) - - return delGen.FlatMap( - func(v interface{}) gopter.Gen { - stakerTx := v.(*txs.AddDelegatorTx) - - if err := stakerTx.SyntacticVerify(ctx); err != nil { - panic(fmt.Errorf("failed syntax verification in tx generator, %w", err)) - } - - // Note: we don't sign the tx here, since we want the freedom to modify - // the stakerTx just before testing while avoid having the wrong txID. - // We use txs.Tx as a box to return a txs.StakerTx interface. - sTx := &txs.Tx{Unsigned: stakerTx} - - return gen.Const(sTx) - }, - reflect.TypeOf(&txs.AddDelegatorTx{}), - ) - }, - reflect.TypeOf(txs.Validator{}), - ) -} - -func stakerDataGenerator( - nodeID *ids.NodeID, - maxWeight uint64, // helps avoiding overflows in delegator tests -) gopter.Gen { - return genStakerTimeData().FlatMap( - func(v interface{}) gopter.Gen { - stakerData := v.(stakerTimeData) - - genStakerNodeID := genNodeID - if nodeID != nil { - genStakerNodeID = gen.Const(*nodeID) - } - - return gen.Struct(reflect.TypeOf(txs.Validator{}), map[string]gopter.Gen{ - "NodeID": genStakerNodeID, - "Start": gen.Const(uint64(stakerData.StartTime.Unix())), - "End": gen.Const(uint64(stakerData.StartTime.Add(time.Duration(stakerData.Duration)).Unix())), - "Wght": gen.UInt64Range(1, maxWeight), - }) - }, - reflect.TypeOf(stakerTimeData{}), - ) -} - -// stakerTimeData holds seed attributes to generate a random-yet-reproducible txs.Validator -type stakerTimeData struct { - StartTime time.Time - Duration int64 -} - -// genStakerTimeData is the helper to generate stakerMicroData -func genStakerTimeData() gopter.Gen { - return gen.Struct(reflect.TypeOf(&stakerTimeData{}), map[string]gopter.Gen{ - "StartTime": gen.Time(), - "Duration": gen.Int64Range(int64(time.Hour), int64(365*24*time.Hour)), - }) -} - -const ( - lengthID = 32 - lengthNodeID = 20 -) - -// subnetIDGen is the helper generator for subnetID, duly skewed towards primary network -var subnetIDGen = gen.Weighted([]gen.WeightedGen{ - { - Weight: 50, - Gen: gen.Const(constants.PrimaryNetworkID), - }, - { - Weight: 50, - Gen: gen.SliceOfN(lengthID, gen.UInt8()).FlatMap( - func(v interface{}) gopter.Gen { - byteSlice := v.([]byte) - var byteArray [lengthID]byte - copy(byteArray[:], byteSlice) - return gen.Const(ids.ID(byteArray)) - }, - reflect.TypeOf([]byte{}), - ), - }, -}) - -// genNodeID is the helper generator for ids.NodeID objects -var genNodeID = gen.SliceOfN(lengthNodeID, gen.UInt8()).FlatMap( - func(v interface{}) gopter.Gen { - byteSlice := v.([]byte) - var byteArray [lengthNodeID]byte - copy(byteArray[:], byteSlice) - return gen.Const(ids.NodeID(byteArray)) - }, - reflect.TypeOf([]byte{}), -) diff --git a/vms/platformvm/state/stakers_model_storage.go b/vms/platformvm/state/stakers_model_storage.go deleted file mode 100644 index 0e4d456581d4..000000000000 --- a/vms/platformvm/state/stakers_model_storage.go +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package state - -import ( - "errors" - - "golang.org/x/exp/maps" - - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils" -) - -var ( - _ Stakers = (*stakersStorageModel)(nil) - _ StakerIterator = (*stakersStorageIteratorModel)(nil) -) - -// stakersStorageModel is the executable reference model of how we expect -// P-chain state and diffs to behave with respect to stakers. -// stakersStorageModel abstracts away the complexity related to -// P-chain state persistence and to the Diff flushing mechanisms. -// stakersStorageModel represents how we expect Diff and State to behave -// in a single threaded environment when stakers are written to or read from them. -// The utility of stakersStorageModel as an executable reference model is that -// we can write automatic tests asserting that Diff and State conform -// to stakersStorageModel. - -type subnetNodeKey struct { - subnetID ids.ID - nodeID ids.NodeID -} - -type stakersStorageModel struct { - currentValidators map[subnetNodeKey]*Staker - currentDelegators map[subnetNodeKey](map[ids.ID]*Staker) // -> (txID -> Staker) - - pendingValidators map[subnetNodeKey]*Staker - pendingDelegators map[subnetNodeKey](map[ids.ID]*Staker) // -> (txID -> Staker) -} - -func newStakersStorageModel() *stakersStorageModel { - return &stakersStorageModel{ - currentValidators: make(map[subnetNodeKey]*Staker), - currentDelegators: make(map[subnetNodeKey]map[ids.ID]*Staker), - pendingValidators: make(map[subnetNodeKey]*Staker), - pendingDelegators: make(map[subnetNodeKey]map[ids.ID]*Staker), - } -} - -func (m *stakersStorageModel) GetCurrentValidator(subnetID ids.ID, nodeID ids.NodeID) (*Staker, error) { - return getValidator(subnetID, nodeID, m.currentValidators) -} - -func (m *stakersStorageModel) GetPendingValidator(subnetID ids.ID, nodeID ids.NodeID) (*Staker, error) { - return getValidator(subnetID, nodeID, m.pendingValidators) -} - -func getValidator(subnetID ids.ID, nodeID ids.NodeID, domain map[subnetNodeKey]*Staker) (*Staker, error) { - key := subnetNodeKey{ - subnetID: subnetID, - nodeID: nodeID, - } - res, found := domain[key] - if !found { - return nil, database.ErrNotFound - } - return res, nil -} - -func (m *stakersStorageModel) PutCurrentValidator(staker *Staker) { - putValidator(staker, m.currentValidators) -} - -func (m *stakersStorageModel) PutPendingValidator(staker *Staker) { - putValidator(staker, m.pendingValidators) -} - -func putValidator(staker *Staker, domain map[subnetNodeKey]*Staker) { - key := subnetNodeKey{ - subnetID: staker.SubnetID, - nodeID: staker.NodeID, - } - - // overwrite validator even if already exist. In prod code, - // it's up to block verification to check that we do not overwrite - // a validator existing on state or lower diffs. - domain[key] = staker -} - -func (m *stakersStorageModel) DeleteCurrentValidator(staker *Staker) { - deleteValidator(staker, m.currentValidators) -} - -func (m *stakersStorageModel) DeletePendingValidator(staker *Staker) { - deleteValidator(staker, m.pendingValidators) -} - -func deleteValidator(staker *Staker, domain map[subnetNodeKey]*Staker) { - key := subnetNodeKey{ - subnetID: staker.SubnetID, - nodeID: staker.NodeID, - } - delete(domain, key) -} - -func (m *stakersStorageModel) GetCurrentDelegatorIterator(subnetID ids.ID, nodeID ids.NodeID) (StakerIterator, error) { - return getDelegatorIterator(subnetID, nodeID, m.currentDelegators), nil -} - -func (m *stakersStorageModel) GetPendingDelegatorIterator(subnetID ids.ID, nodeID ids.NodeID) (StakerIterator, error) { - return getDelegatorIterator(subnetID, nodeID, m.pendingDelegators), nil -} - -func getDelegatorIterator(subnetID ids.ID, nodeID ids.NodeID, domain map[subnetNodeKey](map[ids.ID]*Staker)) StakerIterator { - key := subnetNodeKey{ - subnetID: subnetID, - nodeID: nodeID, - } - dels, found := domain[key] - if !found { - return EmptyIterator - } - - sortedDels := maps.Values(dels) - utils.Sort(sortedDels) - return &stakersStorageIteratorModel{ - current: nil, - sortedStakers: sortedDels, - } -} - -func (m *stakersStorageModel) PutCurrentDelegator(staker *Staker) { - putDelegator(staker, m.currentDelegators) -} - -func (m *stakersStorageModel) PutPendingDelegator(staker *Staker) { - putDelegator(staker, m.pendingDelegators) -} - -func putDelegator(staker *Staker, domain map[subnetNodeKey]map[ids.ID]*Staker) { - key := subnetNodeKey{ - subnetID: staker.SubnetID, - nodeID: staker.NodeID, - } - - dels, found := domain[key] - if !found { - dels = make(map[ids.ID]*Staker) - domain[key] = dels - } - dels[staker.TxID] = staker -} - -func (m *stakersStorageModel) DeleteCurrentDelegator(staker *Staker) { - deleteDelegator(staker, m.currentDelegators) -} - -func (m *stakersStorageModel) DeletePendingDelegator(staker *Staker) { - deleteDelegator(staker, m.pendingDelegators) -} - -func deleteDelegator(staker *Staker, domain map[subnetNodeKey]map[ids.ID]*Staker) { - key := subnetNodeKey{ - subnetID: staker.SubnetID, - nodeID: staker.NodeID, - } - - dels, found := domain[key] - if !found { - return - } - delete(dels, staker.TxID) - - // prune - if len(dels) == 0 { - delete(domain, key) - } -} - -func (m *stakersStorageModel) GetCurrentStakerIterator() (StakerIterator, error) { - return getCurrentStakerIterator(m.currentValidators, m.currentDelegators), nil -} - -func (m *stakersStorageModel) GetPendingStakerIterator() (StakerIterator, error) { - return getCurrentStakerIterator(m.pendingValidators, m.pendingDelegators), nil -} - -func getCurrentStakerIterator( - validators map[subnetNodeKey]*Staker, - delegators map[subnetNodeKey](map[ids.ID]*Staker), -) StakerIterator { - allStakers := maps.Values(validators) - for _, dels := range delegators { - allStakers = append(allStakers, maps.Values(dels)...) - } - utils.Sort(allStakers) - return &stakersStorageIteratorModel{ - current: nil, - sortedStakers: allStakers, - } -} - -func (*stakersStorageModel) SetDelegateeReward( - ids.ID, - ids.NodeID, - uint64, -) error { - return errors.New("method non implemented in model") -} - -func (*stakersStorageModel) GetDelegateeReward( - ids.ID, - ids.NodeID, -) (uint64, error) { - return 0, errors.New("method non implemented in model") -} - -type stakersStorageIteratorModel struct { - current *Staker - - // sortedStakers contains the sorted list of stakers - // as it should be returned by iteration. - // sortedStakers must be sorted upon stakersStorageIteratorModel creation. - // Stakers are evicted from sortedStakers as Next() is called. - sortedStakers []*Staker -} - -func (i *stakersStorageIteratorModel) Next() bool { - if len(i.sortedStakers) == 0 { - return false - } - - i.current = i.sortedStakers[0] - i.sortedStakers = i.sortedStakers[1:] - return true -} - -func (i *stakersStorageIteratorModel) Value() *Staker { - return i.current -} - -func (i *stakersStorageIteratorModel) Release() { - i.current = nil - i.sortedStakers = nil -} diff --git a/vms/platformvm/state/stakers_model_storage_test.go b/vms/platformvm/state/stakers_model_storage_test.go deleted file mode 100644 index 4b6c08f3c3a7..000000000000 --- a/vms/platformvm/state/stakers_model_storage_test.go +++ /dev/null @@ -1,994 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package state - -import ( - "fmt" - "reflect" - "sync/atomic" - "testing" - - "github.com/leanovate/gopter" - "github.com/leanovate/gopter/commands" - "github.com/leanovate/gopter/gen" - - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/memdb" - "github.com/ava-labs/avalanchego/database/versiondb" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/vms/platformvm/status" - "github.com/ava-labs/avalanchego/vms/platformvm/txs" -) - -var ( - _ Versions = (*sysUnderTest)(nil) - _ commands.Command = (*putCurrentValidatorCommand)(nil) - _ commands.Command = (*deleteCurrentValidatorCommand)(nil) - _ commands.Command = (*putCurrentDelegatorCommand)(nil) - _ commands.Command = (*deleteCurrentDelegatorCommand)(nil) - _ commands.Command = (*addTopDiffCommand)(nil) - _ commands.Command = (*applyAndCommitBottomDiffCommand)(nil) - _ commands.Command = (*rebuildStateCommand)(nil) - - commandsCtx = buildStateCtx() -) - -// TestStateAndDiffComparisonToStorageModel verifies that a production-like -// system made of a stack of Diffs built on top of a State conforms to -// our stakersStorageModel. It achieves this by: -// 1. randomly generating a sequence of stakers writes as well as -// some persistence operations (commit/diff apply), -// 2. applying the sequence to both our stakersStorageModel and the production-like system. -// 3. checking that both stakersStorageModel and the production-like system have -// the same state after each operation. -// -// The following invariants are required for stakers state to properly work: -// 1. No stakers add/update/delete ops are performed directly on baseState, but on at least a diff -// 2. Any number of stakers ops can be carried out on a single diff -// 3. Diffs work in FIFO fashion: they are added on top of current state and only -// bottom diff is applied to base state. -// 4. The bottom diff applied to base state is immediately committed. -func TestStateAndDiffComparisonToStorageModel(t *testing.T) { - properties := gopter.NewProperties(nil) - - // // to reproduce a given scenario do something like this: - // parameters := gopter.DefaultTestParametersWithSeed(1688641048828490074) - // properties := gopter.NewProperties(parameters) - - properties.Property("state comparison to storage model", commands.Prop(stakersCommands)) - properties.TestingRun(t) -} - -// stakersCommands creates/destroy the system under test and generates -// commands and initial states (stakersStorageModel) -var stakersCommands = &commands.ProtoCommands{ - NewSystemUnderTestFunc: func(initialState commands.State) commands.SystemUnderTest { - model := initialState.(*stakersStorageModel) - - baseDB := versiondb.New(memdb.New()) - baseState, err := buildChainState(baseDB, nil) - if err != nil { - panic(err) - } - - // fillup baseState with model initial content - for _, staker := range model.currentValidators { - baseState.PutCurrentValidator(staker) - } - for _, delegators := range model.currentDelegators { - for _, staker := range delegators { - baseState.PutCurrentDelegator(staker) - } - } - for _, staker := range model.pendingValidators { - baseState.PutPendingValidator(staker) - } - for _, delegators := range model.currentDelegators { - for _, staker := range delegators { - baseState.PutPendingDelegator(staker) - } - } - if err := baseState.Commit(); err != nil { - panic(err) - } - - return newSysUnderTest(baseDB, baseState) - }, - DestroySystemUnderTestFunc: func(sut commands.SystemUnderTest) { - // retrieve base state and close it - sys := sut.(*sysUnderTest) - err := sys.baseState.Close() - if err != nil { - panic(err) - } - }, - // a trick to force command regeneration at each sampling. - // gen.Const would not allow it - InitialStateGen: gen.IntRange(1, 2).Map( - func(int) *stakersStorageModel { - return newStakersStorageModel() - }, - ), - - InitialPreConditionFunc: func(state commands.State) bool { - return true // nothing to do for now - }, - GenCommandFunc: func(state commands.State) gopter.Gen { - return gen.OneGenOf( - genPutCurrentValidatorCommand, - genDeleteCurrentValidatorCommand, - - genPutCurrentDelegatorCommand, - genDeleteCurrentDelegatorCommand, - - genAddTopDiffCommand, - genApplyAndCommitBottomDiffCommand, - genRebuildStateCommand, - ) - }, -} - -// PutCurrentValidator section -type putCurrentValidatorCommand struct { - sTx *txs.Tx - err error -} - -func (cmd *putCurrentValidatorCommand) Run(sut commands.SystemUnderTest) commands.Result { - sTx := cmd.sTx - sys := sut.(*sysUnderTest) - - if err := sys.checkThereIsADiff(); err != nil { - return sys // state checks later on should spot missing validator - } - - stakerTx := sTx.Unsigned.(txs.StakerTx) - currentVal, err := NewCurrentStaker(sTx.ID(), stakerTx, uint64(1000)) - if err != nil { - return sys // state checks later on should spot missing validator - } - - topChainState := sys.getTopChainState() - topChainState.PutCurrentValidator(currentVal) - topChainState.AddTx(sTx, status.Committed) - return sys -} - -func (cmd *putCurrentValidatorCommand) NextState(cmdState commands.State) commands.State { - sTx := cmd.sTx - stakerTx := sTx.Unsigned.(txs.StakerTx) - currentVal, err := NewCurrentStaker(sTx.ID(), stakerTx, uint64(1000)) - if err != nil { - return cmdState // state checks later on should spot missing validator - } - - cmdState.(*stakersStorageModel).PutCurrentValidator(currentVal) - return cmdState -} - -func (*putCurrentValidatorCommand) PreCondition(commands.State) bool { - // We allow inserting the same validator twice - return true -} - -func (cmd *putCurrentValidatorCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { - if cmd.err != nil { - cmd.err = nil // reset for next runs - return &gopter.PropResult{Status: gopter.PropFalse} - } - - if !checkSystemAndModelContent(cmdState, res) { - return &gopter.PropResult{Status: gopter.PropFalse} - } - - if !checkValidatorSetContent(res) { - return &gopter.PropResult{Status: gopter.PropFalse} - } - - return &gopter.PropResult{Status: gopter.PropTrue} -} - -func (cmd *putCurrentValidatorCommand) String() string { - stakerTx := cmd.sTx.Unsigned.(txs.StakerTx) - return fmt.Sprintf("\nputCurrentValidator(subnetID: %v, nodeID: %v, txID: %v, priority: %v, unixStartTime: %v, duration: %v)", - stakerTx.SubnetID(), - stakerTx.NodeID(), - cmd.sTx.TxID, - stakerTx.CurrentPriority(), - stakerTx.StartTime().Unix(), - stakerTx.EndTime().Sub(stakerTx.StartTime()), - ) -} - -var genPutCurrentValidatorCommand = addPermissionlessValidatorTxGenerator(commandsCtx, nil, nil, 1000).Map( - func(nonInitTx *txs.Tx) commands.Command { - sTx, err := txs.NewSigned(nonInitTx.Unsigned, txs.Codec, nil) - if err != nil { - panic(fmt.Errorf("failed signing tx, %w", err)) - } - - cmd := &putCurrentValidatorCommand{ - sTx: sTx, - err: nil, - } - return cmd - }, -) - -// DeleteCurrentValidator section -type deleteCurrentValidatorCommand struct { - err error -} - -func (cmd *deleteCurrentValidatorCommand) Run(sut commands.SystemUnderTest) commands.Result { - // delete first validator without delegators, if any - sys := sut.(*sysUnderTest) - - if err := sys.checkThereIsADiff(); err != nil { - return sys // state checks later on should spot missing validator - } - - topDiff := sys.getTopChainState() - - stakerIt, err := topDiff.GetCurrentStakerIterator() - if err != nil { - cmd.err = err - return sys - } - - var ( - found = false - validator *Staker - ) - for stakerIt.Next() { - validator = stakerIt.Value() - if !validator.Priority.IsCurrentValidator() { - continue // checks next validator - } - - // check validator has no delegators - delIt, err := topDiff.GetCurrentDelegatorIterator(validator.SubnetID, validator.NodeID) - if err != nil { - cmd.err = err - stakerIt.Release() - return sys - } - - hadDelegator := delIt.Next() - delIt.Release() - if !hadDelegator { - found = true - break // found - } else { - continue // checks next validator - } - } - - if !found { - stakerIt.Release() - return sys // no current validator to delete - } - stakerIt.Release() // release before modifying stakers collection - - topDiff.DeleteCurrentValidator(validator) - return sys // returns sys to allow comparison with state in PostCondition -} - -func (cmd *deleteCurrentValidatorCommand) NextState(cmdState commands.State) commands.State { - // delete first validator without delegators, if any - model := cmdState.(*stakersStorageModel) - stakerIt, err := model.GetCurrentStakerIterator() - if err != nil { - cmd.err = err - return cmdState - } - - var ( - found = false - validator *Staker - ) - for stakerIt.Next() { - validator = stakerIt.Value() - if !validator.Priority.IsCurrentValidator() { - continue // checks next validator - } - - // check validator has no delegators - delIt, err := model.GetCurrentDelegatorIterator(validator.SubnetID, validator.NodeID) - if err != nil { - cmd.err = err - stakerIt.Release() - return cmdState - } - - hadDelegator := delIt.Next() - delIt.Release() - if !hadDelegator { - found = true - break // found - } else { - continue // checks next validator - } - } - - if !found { - stakerIt.Release() - return cmdState // no current validator to add delegator to - } - stakerIt.Release() // release before modifying stakers collection - - model.DeleteCurrentValidator(validator) - return cmdState -} - -func (*deleteCurrentValidatorCommand) PreCondition(commands.State) bool { - // We allow deleting an un-existing validator - return true -} - -func (cmd *deleteCurrentValidatorCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { - if cmd.err != nil { - cmd.err = nil // reset for next runs - return &gopter.PropResult{Status: gopter.PropFalse} - } - - if !checkSystemAndModelContent(cmdState, res) { - return &gopter.PropResult{Status: gopter.PropFalse} - } - - if !checkValidatorSetContent(res) { - return &gopter.PropResult{Status: gopter.PropFalse} - } - - return &gopter.PropResult{Status: gopter.PropTrue} -} - -func (*deleteCurrentValidatorCommand) String() string { - return "\ndeleteCurrentValidator" -} - -// a trick to force command regeneration at each sampling. -// gen.Const would not allow it -var genDeleteCurrentValidatorCommand = gen.IntRange(1, 2).Map( - func(int) commands.Command { - return &deleteCurrentValidatorCommand{} - }, -) - -// PutCurrentDelegator section -type putCurrentDelegatorCommand struct { - sTx *txs.Tx - err error -} - -func (cmd *putCurrentDelegatorCommand) Run(sut commands.SystemUnderTest) commands.Result { - candidateDelegator := cmd.sTx - sys := sut.(*sysUnderTest) - - if err := sys.checkThereIsADiff(); err != nil { - return sys // state checks later on should spot missing validator - } - - err := addCurrentDelegatorInSystem(sys, candidateDelegator.Unsigned) - if err != nil { - cmd.err = err - } - return sys -} - -func addCurrentDelegatorInSystem(sys *sysUnderTest, candidateDelegatorTx txs.UnsignedTx) error { - // 1. check if there is a current validator, already inserted. If not return - // 2. Update candidateDelegatorTx attributes to make it delegator of selected validator - // 3. Add delegator to picked validator - chain := sys.getTopChainState() - - // 1. check if there is a current validator. If not, nothing to do - stakerIt, err := chain.GetCurrentStakerIterator() - if err != nil { - return err - } - - var ( - found = false - validator *Staker - ) - for !found && stakerIt.Next() { - validator = stakerIt.Value() - if validator.Priority.IsCurrentValidator() { - found = true - break - } - } - if !found { - stakerIt.Release() - return nil // no current validator to add delegator to - } - stakerIt.Release() // release before modifying stakers collection - - // 2. Add a delegator to it - addPermissionlessDelTx := candidateDelegatorTx.(*txs.AddPermissionlessDelegatorTx) - addPermissionlessDelTx.Subnet = validator.SubnetID - addPermissionlessDelTx.Validator.NodeID = validator.NodeID - - signedTx, err := txs.NewSigned(addPermissionlessDelTx, txs.Codec, nil) - if err != nil { - return fmt.Errorf("failed signing tx, %w", err) - } - - delegator, err := NewCurrentStaker(signedTx.ID(), signedTx.Unsigned.(txs.Staker), uint64(1000)) - if err != nil { - return fmt.Errorf("failed generating staker, %w", err) - } - - chain.PutCurrentDelegator(delegator) - chain.AddTx(signedTx, status.Committed) - return nil -} - -func (cmd *putCurrentDelegatorCommand) NextState(cmdState commands.State) commands.State { - candidateDelegator := cmd.sTx - model := cmdState.(*stakersStorageModel) - err := addCurrentDelegatorInModel(model, candidateDelegator.Unsigned) - if err != nil { - cmd.err = err - } - return cmdState -} - -func addCurrentDelegatorInModel(model *stakersStorageModel, candidateDelegatorTx txs.UnsignedTx) error { - // 1. check if there is a current validator, already inserted. If not return - // 2. Update candidateDelegator attributes to make it delegator of selected validator - // 3. Add delegator to picked validator - - // 1. check if there is a current validator. If not, nothing to do - stakerIt, err := model.GetCurrentStakerIterator() - if err != nil { - return err - } - - var ( - found = false - validator *Staker - ) - for !found && stakerIt.Next() { - validator = stakerIt.Value() - if validator.Priority.IsCurrentValidator() { - found = true - break - } - } - if !found { - stakerIt.Release() - return nil // no current validator to add delegator to - } - stakerIt.Release() // release before modifying stakers collection - - // 2. Add a delegator to it - addPermissionlessDelTx := candidateDelegatorTx.(*txs.AddPermissionlessDelegatorTx) - addPermissionlessDelTx.Subnet = validator.SubnetID - addPermissionlessDelTx.Validator.NodeID = validator.NodeID - - signedTx, err := txs.NewSigned(addPermissionlessDelTx, txs.Codec, nil) - if err != nil { - return fmt.Errorf("failed signing tx, %w", err) - } - - delegator, err := NewCurrentStaker(signedTx.ID(), signedTx.Unsigned.(txs.Staker), uint64(1000)) - if err != nil { - return fmt.Errorf("failed generating staker, %w", err) - } - - model.PutCurrentDelegator(delegator) - return nil -} - -func (*putCurrentDelegatorCommand) PreCondition(commands.State) bool { - return true -} - -func (cmd *putCurrentDelegatorCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { - if cmd.err != nil { - cmd.err = nil // reset for next runs - return &gopter.PropResult{Status: gopter.PropFalse} - } - - if !checkSystemAndModelContent(cmdState, res) { - return &gopter.PropResult{Status: gopter.PropFalse} - } - - if !checkValidatorSetContent(res) { - return &gopter.PropResult{Status: gopter.PropFalse} - } - - return &gopter.PropResult{Status: gopter.PropTrue} -} - -func (cmd *putCurrentDelegatorCommand) String() string { - stakerTx := cmd.sTx.Unsigned.(txs.StakerTx) - return fmt.Sprintf("\nputCurrentDelegator(subnetID: %v, nodeID: %v, txID: %v, priority: %v, unixStartTime: %v, duration: %v)", - stakerTx.SubnetID(), - stakerTx.NodeID(), - cmd.sTx.TxID, - stakerTx.CurrentPriority(), - stakerTx.StartTime().Unix(), - stakerTx.EndTime().Sub(stakerTx.StartTime())) -} - -var genPutCurrentDelegatorCommand = addPermissionlessDelegatorTxGenerator(commandsCtx, nil, nil, 1000).Map( - func(nonInitTx *txs.Tx) commands.Command { - sTx, err := txs.NewSigned(nonInitTx.Unsigned, txs.Codec, nil) - if err != nil { - panic(fmt.Errorf("failed signing tx, %w", err)) - } - - cmd := &putCurrentDelegatorCommand{ - sTx: sTx, - } - return cmd - }, -) - -// DeleteCurrentDelegator section -type deleteCurrentDelegatorCommand struct { - err error -} - -func (cmd *deleteCurrentDelegatorCommand) Run(sut commands.SystemUnderTest) commands.Result { - // delete first delegator, if any - sys := sut.(*sysUnderTest) - - if err := sys.checkThereIsADiff(); err != nil { - return sys // state checks later on should spot missing validator - } - - _, err := deleteCurrentDelegator(sys) - if err != nil { - cmd.err = err - } - return sys // returns sys to allow comparison with state in PostCondition -} - -func deleteCurrentDelegator(sys *sysUnderTest) (bool, error) { - // delete first validator, if any - topDiff := sys.getTopChainState() - - stakerIt, err := topDiff.GetCurrentStakerIterator() - if err != nil { - return false, err - } - - var ( - found = false - delegator *Staker - ) - for !found && stakerIt.Next() { - delegator = stakerIt.Value() - if delegator.Priority.IsCurrentDelegator() { - found = true - break - } - } - if !found { - stakerIt.Release() - return false, nil // no current validator to delete - } - stakerIt.Release() // release before modifying stakers collection - - topDiff.DeleteCurrentDelegator(delegator) - return true, nil -} - -func (*deleteCurrentDelegatorCommand) NextState(cmdState commands.State) commands.State { - model := cmdState.(*stakersStorageModel) - stakerIt, err := model.GetCurrentStakerIterator() - if err != nil { - return err - } - - var ( - found = false - delegator *Staker - ) - for !found && stakerIt.Next() { - delegator = stakerIt.Value() - if delegator.Priority.IsCurrentDelegator() { - found = true - break - } - } - if !found { - stakerIt.Release() - return cmdState // no current validator to add delegator to - } - stakerIt.Release() // release before modifying stakers collection - - model.DeleteCurrentDelegator(delegator) - return cmdState -} - -func (*deleteCurrentDelegatorCommand) PreCondition(commands.State) bool { - return true -} - -func (cmd *deleteCurrentDelegatorCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { - if cmd.err != nil { - cmd.err = nil // reset for next runs - return &gopter.PropResult{Status: gopter.PropFalse} - } - - if !checkSystemAndModelContent(cmdState, res) { - return &gopter.PropResult{Status: gopter.PropFalse} - } - - if !checkValidatorSetContent(res) { - return &gopter.PropResult{Status: gopter.PropFalse} - } - - return &gopter.PropResult{Status: gopter.PropTrue} -} - -func (*deleteCurrentDelegatorCommand) String() string { - return "\ndeleteCurrentDelegator" -} - -// a trick to force command regeneration at each sampling. -// gen.Const would not allow it -var genDeleteCurrentDelegatorCommand = gen.IntRange(1, 2).Map( - func(int) commands.Command { - return &deleteCurrentDelegatorCommand{} - }, -) - -// addTopDiffCommand section -type addTopDiffCommand struct { - err error -} - -func (cmd *addTopDiffCommand) Run(sut commands.SystemUnderTest) commands.Result { - sys := sut.(*sysUnderTest) - err := sys.addDiffOnTop() - if err != nil { - cmd.err = err - } - return sys -} - -func (*addTopDiffCommand) NextState(cmdState commands.State) commands.State { - return cmdState // model has no diffs -} - -func (*addTopDiffCommand) PreCondition(commands.State) bool { - return true -} - -func (cmd *addTopDiffCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { - if cmd.err != nil { - cmd.err = nil // reset for next runs - return &gopter.PropResult{Status: gopter.PropFalse} - } - - if !checkSystemAndModelContent(cmdState, res) { - return &gopter.PropResult{Status: gopter.PropFalse} - } - - if !checkValidatorSetContent(res) { - return &gopter.PropResult{Status: gopter.PropFalse} - } - - return &gopter.PropResult{Status: gopter.PropTrue} -} - -func (*addTopDiffCommand) String() string { - return "\naddTopDiffCommand" -} - -// a trick to force command regeneration at each sampling. -// gen.Const would not allow it -var genAddTopDiffCommand = gen.IntRange(1, 2).Map( - func(int) commands.Command { - return &addTopDiffCommand{} - }, -) - -// applyAndCommitBottomDiffCommand section -type applyAndCommitBottomDiffCommand struct { - err error -} - -func (cmd *applyAndCommitBottomDiffCommand) Run(sut commands.SystemUnderTest) commands.Result { - sys := sut.(*sysUnderTest) - if _, err := sys.flushBottomDiff(); err != nil { - cmd.err = err - return sys - } - - if err := sys.baseState.Commit(); err != nil { - cmd.err = err - return sys - } - - return sys -} - -func (*applyAndCommitBottomDiffCommand) NextState(cmdState commands.State) commands.State { - return cmdState // model has no diffs -} - -func (*applyAndCommitBottomDiffCommand) PreCondition(commands.State) bool { - return true -} - -func (cmd *applyAndCommitBottomDiffCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { - if cmd.err != nil { - cmd.err = nil // reset for next runs - return &gopter.PropResult{Status: gopter.PropFalse} - } - - if !checkSystemAndModelContent(cmdState, res) { - return &gopter.PropResult{Status: gopter.PropFalse} - } - - if !checkValidatorSetContent(res) { - return &gopter.PropResult{Status: gopter.PropFalse} - } - - return &gopter.PropResult{Status: gopter.PropTrue} -} - -func (*applyAndCommitBottomDiffCommand) String() string { - return "\napplyAndCommitBottomDiffCommand" -} - -// a trick to force command regeneration at each sampling. -// gen.Const would not allow it -var genApplyAndCommitBottomDiffCommand = gen.IntRange(1, 2).Map( - func(int) commands.Command { - return &applyAndCommitBottomDiffCommand{} - }, -) - -// rebuildStateCommand section -type rebuildStateCommand struct { - err error -} - -func (cmd *rebuildStateCommand) Run(sut commands.SystemUnderTest) commands.Result { - sys := sut.(*sysUnderTest) - - // 1. Persist all outstanding changes - for { - diffFound, err := sys.flushBottomDiff() - if err != nil { - cmd.err = err - return sys - } - if !diffFound { - break - } - - if err := sys.baseState.Commit(); err != nil { - cmd.err = err - return sys - } - } - - if err := sys.baseState.Commit(); err != nil { - cmd.err = err - return sys - } - - // 2. Rebuild the state from the db - baseState, err := buildChainState(sys.baseDB, nil) - if err != nil { - cmd.err = err - return sys - } - sys.baseState = baseState - sys.diffsMap = map[ids.ID]Diff{} - sys.sortedDiffIDs = []ids.ID{} - - return sys -} - -func (*rebuildStateCommand) NextState(cmdState commands.State) commands.State { - return cmdState // model has no diffs -} - -func (*rebuildStateCommand) PreCondition(commands.State) bool { - return true -} - -func (cmd *rebuildStateCommand) PostCondition(cmdState commands.State, res commands.Result) *gopter.PropResult { - if cmd.err != nil { - cmd.err = nil // reset for next runs - return &gopter.PropResult{Status: gopter.PropFalse} - } - - if !checkSystemAndModelContent(cmdState, res) { - return &gopter.PropResult{Status: gopter.PropFalse} - } - - if !checkValidatorSetContent(res) { - return &gopter.PropResult{Status: gopter.PropFalse} - } - - return &gopter.PropResult{Status: gopter.PropTrue} -} - -func (*rebuildStateCommand) String() string { - return "\nrebuildStateCommand" -} - -// a trick to force command regeneration at each sampling. -// gen.Const would not allow it -var genRebuildStateCommand = gen.IntRange(1, 2).Map( - func(int) commands.Command { - return &rebuildStateCommand{} - }, -) - -func checkSystemAndModelContent(cmdState commands.State, res commands.Result) bool { - model := cmdState.(*stakersStorageModel) - sys := res.(*sysUnderTest) - - // top view content must always match model content - topDiff := sys.getTopChainState() - - modelIt, err := model.GetCurrentStakerIterator() - if err != nil { - return false - } - sysIt, err := topDiff.GetCurrentStakerIterator() - if err != nil { - return false - } - - modelStakers := make([]*Staker, 0) - for modelIt.Next() { - modelStakers = append(modelStakers, modelIt.Value()) - } - modelIt.Release() - - sysStakers := make([]*Staker, 0) - for sysIt.Next() { - sysStakers = append(sysStakers, sysIt.Value()) - } - sysIt.Release() - - if len(modelStakers) != len(sysStakers) { - return false - } - - for idx, modelStaker := range modelStakers { - sysStaker := sysStakers[idx] - if modelStaker == nil || sysStaker == nil || !reflect.DeepEqual(modelStaker, sysStaker) { - return false - } - } - - return true -} - -// checkValidatorSetContent compares ValidatorsSet with P-chain base-state data and -// makes sure they are coherent. -func checkValidatorSetContent(res commands.Result) bool { - sys := res.(*sysUnderTest) - valSet := sys.baseState.(*merkleState).cfg.Validators - - sysIt, err := sys.baseState.GetCurrentStakerIterator() - if err != nil { - return false - } - - // valContent subnetID -> nodeID -> aggregate weight (validator's own weight + delegators' weight) - valContent := make(map[ids.ID]map[ids.NodeID]uint64) - for sysIt.Next() { - val := sysIt.Value() - if val.SubnetID != constants.PrimaryNetworkID { - continue - } - nodes, found := valContent[val.SubnetID] - if !found { - nodes = make(map[ids.NodeID]uint64) - valContent[val.SubnetID] = nodes - } - nodes[val.NodeID] += val.Weight - } - sysIt.Release() - - for subnetID, nodes := range valContent { - for nodeID, weight := range nodes { - if weight != valSet.GetWeight(subnetID, nodeID) { - return false - } - } - } - return true -} - -type sysUnderTest struct { - diffBlkIDSeed uint64 - baseDB database.Database - baseState State - sortedDiffIDs []ids.ID - diffsMap map[ids.ID]Diff -} - -func newSysUnderTest(baseDB database.Database, baseState State) *sysUnderTest { - sys := &sysUnderTest{ - baseDB: baseDB, - baseState: baseState, - diffsMap: map[ids.ID]Diff{}, - sortedDiffIDs: []ids.ID{}, - } - return sys -} - -func (s *sysUnderTest) GetState(blkID ids.ID) (Chain, bool) { - if state, found := s.diffsMap[blkID]; found { - return state, found - } - return s.baseState, blkID == s.baseState.GetLastAccepted() -} - -func (s *sysUnderTest) addDiffOnTop() error { - newTopBlkID := ids.Empty.Prefix(atomic.AddUint64(&s.diffBlkIDSeed, 1)) - var topBlkID ids.ID - if len(s.sortedDiffIDs) == 0 { - topBlkID = s.baseState.GetLastAccepted() - } else { - topBlkID = s.sortedDiffIDs[len(s.sortedDiffIDs)-1] - } - newTopDiff, err := NewDiff(topBlkID, s) - if err != nil { - return err - } - s.sortedDiffIDs = append(s.sortedDiffIDs, newTopBlkID) - s.diffsMap[newTopBlkID] = newTopDiff - return nil -} - -// getTopChainState returns top diff or baseState -func (s *sysUnderTest) getTopChainState() Chain { - var topChainStateID ids.ID - if len(s.sortedDiffIDs) != 0 { - topChainStateID = s.sortedDiffIDs[len(s.sortedDiffIDs)-1] - } else { - topChainStateID = s.baseState.GetLastAccepted() - } - - topChainState, _ := s.GetState(topChainStateID) - return topChainState -} - -// flushBottomDiff applies bottom diff if available -func (s *sysUnderTest) flushBottomDiff() (bool, error) { - if len(s.sortedDiffIDs) == 0 { - return false, nil - } - bottomDiffID := s.sortedDiffIDs[0] - diffToApply := s.diffsMap[bottomDiffID] - - err := diffToApply.Apply(s.baseState) - if err != nil { - return true, err - } - s.baseState.SetLastAccepted(bottomDiffID) - - s.sortedDiffIDs = s.sortedDiffIDs[1:] - delete(s.diffsMap, bottomDiffID) - return true, nil -} - -// checkThereIsADiff must be called before any stakers op. It makes -// sure that ops are carried out on at least a diff, as it happens -// in production code. -func (s *sysUnderTest) checkThereIsADiff() error { - if len(s.sortedDiffIDs) != 0 { - return nil // there is a diff - } - - return s.addDiffOnTop() -} diff --git a/vms/platformvm/state/stakers_properties_test.go b/vms/platformvm/state/stakers_properties_test.go deleted file mode 100644 index 6e7bc14e1100..000000000000 --- a/vms/platformvm/state/stakers_properties_test.go +++ /dev/null @@ -1,568 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package state - -import ( - "errors" - "fmt" - "math" - "reflect" - "testing" - - "github.com/leanovate/gopter" - "github.com/leanovate/gopter/gen" - "github.com/leanovate/gopter/prop" - - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/memdb" - "github.com/ava-labs/avalanchego/database/versiondb" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/vms/platformvm/txs" -) - -const ( - pending stakerStatus = 0 - current stakerStatus = 1 -) - -// TestGeneralStakerContainersProperties checks that State and Diff conform our stakersStorageModel. -// TestGeneralStakerContainersProperties tests State and Diff in isolation, over simple operations. -// TestStateAndDiffComparisonToStorageModel carries a more involved verification over a production-like -// mix of State and Diffs. -func TestGeneralStakerContainersProperties(t *testing.T) { - storeCreators := map[string]func() (Stakers, error){ - "base state": func() (Stakers, error) { - baseDB := versiondb.New(memdb.New()) - return buildChainState(baseDB, nil) - }, - "diff": func() (Stakers, error) { - diff, _, err := buildDiffOnTopOfBaseState(nil) - return diff, err - }, - "storage model": func() (Stakers, error) { //nolint:golint,unparam - return newStakersStorageModel(), nil - }, - } - - for storeType, storeCreatorF := range storeCreators { - t.Run(storeType, func(t *testing.T) { - properties := generalStakerContainersProperties(storeCreatorF) - properties.TestingRun(t) - }) - } -} - -func generalStakerContainersProperties(storeCreatorF func() (Stakers, error)) *gopter.Properties { - properties := gopter.NewProperties(nil) - - ctx := buildStateCtx() - - properties.Property("add, delete and query current validators", prop.ForAll( - func(nonInitTx *txs.Tx) string { - store, err := storeCreatorF() - if err != nil { - return fmt.Sprintf("unexpected error while creating staker store, err %v", err) - } - - signedTx, err := txs.NewSigned(nonInitTx.Unsigned, txs.Codec, nil) - if err != nil { - panic(fmt.Errorf("failed signing tx in tx generator, %w", err)) - } - - stakerTx := signedTx.Unsigned.(txs.StakerTx) - staker, err := NewCurrentStaker(signedTx.ID(), stakerTx, uint64(100)) - if err != nil { - return err.Error() - } - - // no staker before insertion - _, err = store.GetCurrentValidator(staker.SubnetID, staker.NodeID) - if err != database.ErrNotFound { - return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) - } - err = checkStakersContent(store, []*Staker{}, current) - if err != nil { - return err.Error() - } - - // it's fine deleting unknown validator - store.DeleteCurrentValidator(staker) - _, err = store.GetCurrentValidator(staker.SubnetID, staker.NodeID) - if err != database.ErrNotFound { - return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) - } - err = checkStakersContent(store, []*Staker{}, current) - if err != nil { - return err.Error() - } - - // insert the staker and show it can be found - store.PutCurrentValidator(staker) - retrievedStaker, err := store.GetCurrentValidator(staker.SubnetID, staker.NodeID) - if err != nil { - return fmt.Sprintf("expected no error, got %v", err) - } - if !reflect.DeepEqual(staker, retrievedStaker) { - return fmt.Sprintf("wrong staker retrieved expected %v, got %v", staker, retrievedStaker) - } - err = checkStakersContent(store, []*Staker{staker}, current) - if err != nil { - return err.Error() - } - - // delete the staker and show it's not found anymore - store.DeleteCurrentValidator(staker) - _, err = store.GetCurrentValidator(staker.SubnetID, staker.NodeID) - if err != database.ErrNotFound { - return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) - } - err = checkStakersContent(store, []*Staker{}, current) - if err != nil { - return err.Error() - } - - return "" - }, - stakerTxGenerator(ctx, permissionedValidator, &constants.PrimaryNetworkID, nil, math.MaxUint64), - )) - - properties.Property("add, delete and query pending validators", prop.ForAll( - func(nonInitTx *txs.Tx) string { - store, err := storeCreatorF() - if err != nil { - return fmt.Sprintf("unexpected error while creating staker store, err %v", err) - } - - signedTx, err := txs.NewSigned(nonInitTx.Unsigned, txs.Codec, nil) - if err != nil { - panic(fmt.Errorf("failed signing tx in tx generator, %w", err)) - } - - staker, err := NewPendingStaker(signedTx.ID(), signedTx.Unsigned.(txs.StakerTx)) - if err != nil { - return err.Error() - } - - // no staker before insertion - _, err = store.GetPendingValidator(staker.SubnetID, staker.NodeID) - if err != database.ErrNotFound { - return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) - } - err = checkStakersContent(store, []*Staker{}, pending) - if err != nil { - return err.Error() - } - - // it's fine deleting unknown validator - store.DeletePendingValidator(staker) - _, err = store.GetPendingValidator(staker.SubnetID, staker.NodeID) - if err != database.ErrNotFound { - return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) - } - err = checkStakersContent(store, []*Staker{}, pending) - if err != nil { - return err.Error() - } - - // insert the staker and show it can be found - store.PutPendingValidator(staker) - retrievedStaker, err := store.GetPendingValidator(staker.SubnetID, staker.NodeID) - if err != nil { - return fmt.Sprintf("expected no error, got %v", err) - } - if !reflect.DeepEqual(staker, retrievedStaker) { - return fmt.Sprintf("wrong staker retrieved expected %v, got %v", staker, retrievedStaker) - } - err = checkStakersContent(store, []*Staker{staker}, pending) - if err != nil { - return err.Error() - } - - // delete the staker and show it's found anymore - store.DeletePendingValidator(staker) - _, err = store.GetPendingValidator(staker.SubnetID, staker.NodeID) - if err != database.ErrNotFound { - return fmt.Sprintf("unexpected error %v, got %v", database.ErrNotFound, err) - } - err = checkStakersContent(store, []*Staker{}, pending) - if err != nil { - return err.Error() - } - - return "" - }, - stakerTxGenerator(ctx, permissionedValidator, &constants.PrimaryNetworkID, nil, math.MaxUint64), - )) - - var ( - subnetID = ids.GenerateTestID() - nodeID = ids.GenerateTestNodeID() - ) - properties.Property("add, delete and query current delegators", prop.ForAll( - func(nonInitValTx *txs.Tx, nonInitDelTxs []*txs.Tx) string { - store, err := storeCreatorF() - if err != nil { - return fmt.Sprintf("unexpected error while creating staker store, err %v", err) - } - - signedValTx, err := txs.NewSigned(nonInitValTx.Unsigned, txs.Codec, nil) - if err != nil { - panic(fmt.Errorf("failed signing tx in tx generator, %w", err)) - } - - val, err := NewCurrentStaker(signedValTx.ID(), signedValTx.Unsigned.(txs.StakerTx), uint64(1000)) - if err != nil { - return err.Error() - } - - dels := make([]*Staker, 0, len(nonInitDelTxs)) - for _, nonInitDelTx := range nonInitDelTxs { - signedDelTx, err := txs.NewSigned(nonInitDelTx.Unsigned, txs.Codec, nil) - if err != nil { - panic(fmt.Errorf("failed signing tx in tx generator, %w", err)) - } - - del, err := NewCurrentStaker(signedDelTx.ID(), signedDelTx.Unsigned.(txs.StakerTx), uint64(1000)) - if err != nil { - return err.Error() - } - - dels = append(dels, del) - } - - // store validator - store.PutCurrentValidator(val) - retrievedValidator, err := store.GetCurrentValidator(val.SubnetID, val.NodeID) - if err != nil { - return fmt.Sprintf("expected no error, got %v", err) - } - if !reflect.DeepEqual(val, retrievedValidator) { - return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &val, retrievedValidator) - } - err = checkStakersContent(store, []*Staker{val}, current) - if err != nil { - return err.Error() - } - - // store delegators - for _, del := range dels { - cpy := *del - - // it's fine deleting unknown delegator - store.DeleteCurrentDelegator(&cpy) - - // finally store the delegator - store.PutCurrentDelegator(&cpy) - } - - // check no missing delegators by subnetID, nodeID - for _, del := range dels { - found := false - delIt, err := store.GetCurrentDelegatorIterator(subnetID, nodeID) - if err != nil { - return fmt.Sprintf("unexpected failure in current delegators iterator creation, error %v", err) - } - for delIt.Next() { - if reflect.DeepEqual(delIt.Value(), del) { - found = true - break - } - } - delIt.Release() - - if !found { - return fmt.Sprintf("missing delegator %v", del) - } - } - - // check no extra delegator by subnetID, nodeID - delIt, err := store.GetCurrentDelegatorIterator(subnetID, nodeID) - if err != nil { - return fmt.Sprintf("unexpected failure in current delegators iterator creation, error %v", err) - } - for delIt.Next() { - found := false - for _, del := range dels { - if reflect.DeepEqual(delIt.Value(), del) { - found = true - break - } - } - if !found { - return fmt.Sprintf("found extra delegator %v", delIt.Value()) - } - } - delIt.Release() - - // check no missing delegators in the whole staker set - stakersSet := dels - stakersSet = append(stakersSet, val) - err = checkStakersContent(store, stakersSet, current) - if err != nil { - return err.Error() - } - - // delete delegators - for _, del := range dels { - cpy := *del - store.DeleteCurrentDelegator(&cpy) - - // check deleted delegator is not there anymore - delIt, err := store.GetCurrentDelegatorIterator(subnetID, nodeID) - if err != nil { - return fmt.Sprintf("unexpected failure in current delegators iterator creation, error %v", err) - } - - found := false - for delIt.Next() { - if reflect.DeepEqual(delIt.Value(), del) { - found = true - break - } - } - delIt.Release() - if found { - return fmt.Sprintf("found deleted delegator %v", del) - } - } - - return "" - }, - stakerTxGenerator(ctx, - permissionlessValidator, - &subnetID, - &nodeID, - math.MaxUint64, - ), - gen.SliceOfN(10, - stakerTxGenerator(ctx, - permissionlessDelegator, - &subnetID, - &nodeID, - 1000, - ), - ), - )) - - properties.Property("add, delete and query pending delegators", prop.ForAll( - func(nonInitValTx *txs.Tx, nonInitDelTxs []*txs.Tx) string { - store, err := storeCreatorF() - if err != nil { - return fmt.Sprintf("unexpected error while creating staker store, err %v", err) - } - - signedValTx, err := txs.NewSigned(nonInitValTx.Unsigned, txs.Codec, nil) - if err != nil { - panic(fmt.Errorf("failed signing tx in tx generator, %w", err)) - } - - val, err := NewCurrentStaker(signedValTx.ID(), signedValTx.Unsigned.(txs.StakerTx), uint64(1000)) - if err != nil { - return err.Error() - } - - dels := make([]*Staker, 0, len(nonInitDelTxs)) - for _, nonInitDelTx := range nonInitDelTxs { - signedDelTx, err := txs.NewSigned(nonInitDelTx.Unsigned, txs.Codec, nil) - if err != nil { - panic(fmt.Errorf("failed signing tx in tx generator, %w", err)) - } - - del, err := NewCurrentStaker(signedDelTx.ID(), signedDelTx.Unsigned.(txs.StakerTx), uint64(1000)) - if err != nil { - return err.Error() - } - - dels = append(dels, del) - } - - // store validator - store.PutCurrentValidator(val) - retrievedValidator, err := store.GetCurrentValidator(val.SubnetID, val.NodeID) - if err != nil { - return fmt.Sprintf("expected no error, got %v", err) - } - if !reflect.DeepEqual(val, retrievedValidator) { - return fmt.Sprintf("wrong staker retrieved expected %v, got %v", &val, retrievedValidator) - } - - err = checkStakersContent(store, []*Staker{val}, current) - if err != nil { - return err.Error() - } - - // store delegators - for _, del := range dels { - cpy := *del - - // it's fine deleting unknown delegator - store.DeletePendingDelegator(&cpy) - - // finally store the delegator - store.PutPendingDelegator(&cpy) - } - - // check no missing delegators by subnetID, nodeID - for _, del := range dels { - found := false - delIt, err := store.GetPendingDelegatorIterator(subnetID, nodeID) - if err != nil { - return fmt.Sprintf("unexpected failure in pending delegators iterator creation, error %v", err) - } - for delIt.Next() { - if reflect.DeepEqual(delIt.Value(), del) { - found = true - break - } - } - delIt.Release() - - if !found { - return fmt.Sprintf("missing delegator %v", del) - } - } - - // check no extra delegators by subnetID, nodeID - delIt, err := store.GetPendingDelegatorIterator(subnetID, nodeID) - if err != nil { - return fmt.Sprintf("unexpected failure in pending delegators iterator creation, error %v", err) - } - for delIt.Next() { - found := false - for _, del := range dels { - if reflect.DeepEqual(delIt.Value(), del) { - found = true - break - } - } - if !found { - return fmt.Sprintf("found extra delegator %v", delIt.Value()) - } - } - delIt.Release() - - // check no missing delegators in the whole staker set - err = checkStakersContent(store, dels, pending) - if err != nil { - return err.Error() - } - - // delete delegators - for _, del := range dels { - cpy := *del - store.DeletePendingDelegator(&cpy) - - // check deleted delegator is not there anymore - delIt, err := store.GetPendingDelegatorIterator(subnetID, nodeID) - if err != nil { - return fmt.Sprintf("unexpected failure in pending delegators iterator creation, error %v", err) - } - - found := false - for delIt.Next() { - if reflect.DeepEqual(delIt.Value(), del) { - found = true - break - } - } - delIt.Release() - if found { - return fmt.Sprintf("found deleted delegator %v", del) - } - } - - return "" - }, - stakerTxGenerator(ctx, - permissionlessValidator, - &subnetID, - &nodeID, - math.MaxUint64, - ), - gen.SliceOfN(10, - stakerTxGenerator(ctx, - permissionlessDelegator, - &subnetID, - &nodeID, - 1000, - ), - ), - )) - - return properties -} - -func buildDiffOnTopOfBaseState(trackedSubnets []ids.ID) (Diff, State, error) { - baseDB := versiondb.New(memdb.New()) - baseState, err := buildChainState(baseDB, trackedSubnets) - if err != nil { - return nil, nil, fmt.Errorf("unexpected error while creating chain base state, err %w", err) - } - - genesisID := baseState.GetLastAccepted() - versions := &versionsHolder{ - baseState: baseState, - } - diff, err := NewDiff(genesisID, versions) - if err != nil { - return nil, nil, fmt.Errorf("unexpected error while creating diff, err %w", err) - } - return diff, baseState, nil -} - -// [checkStakersContent] verifies whether store contains exactly the stakers specified in the list. -// stakers order does not matter. stakers slice gets consumed while checking. -func checkStakersContent(store Stakers, stakers []*Staker, stakersType stakerStatus) error { - var ( - it StakerIterator - err error - ) - - switch stakersType { - case current: - it, err = store.GetCurrentStakerIterator() - case pending: - it, err = store.GetPendingStakerIterator() - default: - return errors.New("Unhandled stakers status") - } - if err != nil { - return fmt.Errorf("unexpected failure in staker iterator creation, error %w", err) - } - defer it.Release() - - if len(stakers) == 0 { - if it.Next() { - return fmt.Errorf("expected empty iterator, got at least element %v", it.Value()) - } - return nil - } - - for it.Next() { - var ( - staker = it.Value() - found = false - - retrievedStakerIdx = 0 - ) - - for idx, s := range stakers { - if reflect.DeepEqual(staker, s) { - retrievedStakerIdx = idx - found = true - } - } - if !found { - return fmt.Errorf("found extra staker %v", staker) - } - stakers[retrievedStakerIdx] = stakers[len(stakers)-1] // order does not matter - stakers = stakers[:len(stakers)-1] - } - - if len(stakers) != 0 { - return errors.New("missing stakers") - } - return nil -} From 30cb24ac0b19b87f9e990ea4216fd21893558d3b Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Wed, 15 Nov 2023 15:36:52 -0500 Subject: [PATCH 098/132] comments; typos --- vms/platformvm/state/merkle_state.go | 29 ++++++++++++++++++++++++++-- 1 file changed, 27 insertions(+), 2 deletions(-) diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index 01bd95922024..514255e78477 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -60,7 +60,7 @@ var ( merkleTxPrefix = []byte{0x04} merkleIndexUTXOsPrefix = []byte{0x05} // to serve UTXOIDs(addr) merkleUptimesPrefix = []byte{0x06} // locally measured uptimes - merkleWeightDiffPrefix = []byte{0x07} // non-merklelized validators weight diff. TODO: should we merklelize them? + merkleWeightDiffPrefix = []byte{0x07} // non-merkleized validators weight diff. TODO: should we merkleize them? merkleBlsKeyDiffPrefix = []byte{0x08} merkleRewardUtxosPrefix = []byte{0x09} @@ -286,6 +286,31 @@ func newMerkleState( }, nil } +// Stores global state in a merkle trie. This means that each state corresponds +// to a unique merkle root. Specifically, the following state is merkleized. +// - Delegatee Rewards +// - UTXOs +// - Current Supply +// - Subnet Creation Transactions +// - Subnet Owners +// - Subnet Transformation Transactions +// - Chain Creation Transactions +// - Chain time +// - Last Accepted Block ID +// - Current Staker Set +// - Pending Staker Set +// +// Changing any of the above state will cause the merkle root to change. +// +// The following state is not merkleized: +// - Database Initialization Status +// - Blocks +// - Block IDs +// - Transactions (note some transactions are also stored merkleized) +// - Uptimes +// - Weight Diffs +// - BLS Key Diffs +// - Reward UTXOs type merkleState struct { cfg *config.Config ctx *snow.Context @@ -295,7 +320,7 @@ type merkleState struct { baseDB *versiondb.Database singletonDB database.Database baseMerkleDB database.Database - merkleDB merkledb.MerkleDB // meklelized state + merkleDB merkledb.MerkleDB // Stores merkleized state // stakers section (missing Delegatee piece) // TODO: Consider moving delegatee to UTXOs section From f48fbfe37a7d56c4892982cc048f6fab5d40e914 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Wed, 15 Nov 2023 16:31:53 -0500 Subject: [PATCH 099/132] re-order field setting in newMerkleState to be in same order as declarations --- vms/platformvm/state/merkle_state.go | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index 514255e78477..c508d2724629 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -227,14 +227,15 @@ func newMerkleState( } return &merkleState{ - cfg: cfg, - ctx: ctx, - metrics: metrics, - rewards: rewards, + cfg: cfg, + ctx: ctx, + metrics: metrics, + rewards: rewards, + baseDB: baseDB, + singletonDB: singletonDB, baseMerkleDB: baseMerkleDB, merkleDB: merkleDB, - singletonDB: singletonDB, currentStakers: newBaseStakers(), pendingStakers: newBaseStakers(), @@ -259,10 +260,6 @@ func newMerkleState( addedChains: make(map[ids.ID][]*txs.Tx), chainCache: chainCache, - addedTxs: make(map[ids.ID]*txAndStatus), - txCache: txCache, - txDB: txDB, - addedBlocks: make(map[ids.ID]block.Block), blockCache: blockCache, blockDB: blockDB, @@ -271,6 +268,10 @@ func newMerkleState( blockIDCache: blockIDCache, blockIDDB: blockIDsDB, + addedTxs: make(map[ids.ID]*txAndStatus), + txCache: txCache, + txDB: txDB, + indexedUTXOsDB: indexedUTXOsDB, localUptimesCache: make(map[ids.NodeID]map[ids.ID]*uptimes), From 6ab428ab46efc4c265850f961050594b31655fb7 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Wed, 15 Nov 2023 16:56:33 -0500 Subject: [PATCH 100/132] add comments --- vms/platformvm/state/merkle_state_load_ops.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/vms/platformvm/state/merkle_state_load_ops.go b/vms/platformvm/state/merkle_state_load_ops.go index a5e059f356aa..dbffe80a83ab 100644 --- a/vms/platformvm/state/merkle_state_load_ops.go +++ b/vms/platformvm/state/merkle_state_load_ops.go @@ -23,6 +23,8 @@ import ( // var errNotYetImplemented = errors.New("NOT YET IMPLEMENTED") +// If [ms] isn't initialized, initializes it with [genesis]. +// Then loads [ms] from disk. func (ms *merkleState) sync(genesis []byte) error { shouldInit, err := ms.shouldInit() if err != nil { @@ -55,6 +57,7 @@ func (ms *merkleState) doneInit() error { return ms.singletonDB.Put(initializedKey, nil) } +// Creates a genesis from [genesisBytes] and initializes [ms] with it. func (ms *merkleState) init(genesisBytes []byte) error { // Create the genesis block and save it as being accepted (We don't do // genesisBlock.Accept() because then it'd look for genesisBlock's @@ -80,9 +83,9 @@ func (ms *merkleState) init(genesisBytes []byte) error { return ms.Commit() } +// Loads the state from [genesisBls] and [genesis] into [ms]. func (ms *merkleState) syncGenesis(genesisBlk block.Block, genesis *genesis.Genesis) error { - genesisBlkID := genesisBlk.ID() - ms.SetLastAccepted(genesisBlkID) + ms.SetLastAccepted(genesisBlk.ID()) ms.SetTimestamp(time.Unix(int64(genesis.Timestamp), 0)) ms.SetCurrentSupply(constants.PrimaryNetworkID, genesis.InitialSupply) ms.AddStatelessBlock(genesisBlk) From 2f9dd382e05b6d87029fd6993c82b7ac3281178f Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Wed, 15 Nov 2023 17:18:01 -0500 Subject: [PATCH 101/132] comment nits --- vms/platformvm/state/merkle_state_load_ops.go | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/vms/platformvm/state/merkle_state_load_ops.go b/vms/platformvm/state/merkle_state_load_ops.go index dbffe80a83ab..863f182f31b6 100644 --- a/vms/platformvm/state/merkle_state_load_ops.go +++ b/vms/platformvm/state/merkle_state_load_ops.go @@ -164,13 +164,15 @@ func (ms *merkleState) load(hasSynced bool) error { ) } +// Loads the chain time and last accepted block ID from disk +// and populates them in [ms]. func (ms *merkleState) loadMerkleMetadata() error { - // load chainTime + // load chain time chainTimeBytes, err := ms.merkleDB.Get(merkleChainTimeKey) if err != nil { return err } - chainTime := time.Time{} + var chainTime time.Time if err := chainTime.UnmarshalBinary(chainTimeBytes); err != nil { return err } @@ -187,12 +189,13 @@ func (ms *merkleState) loadMerkleMetadata() error { ms.latestCommittedLastAcceptedBlkID = lastAcceptedBlkID ms.SetLastAccepted(lastAcceptedBlkID) - // wen don't need to load supplies. Unlike chainTime and lastBlkID - // which have the persisted* attribute, we signal supplies have not - // been modified by having an empty map. + // We don't need to load supplies. Unlike chain time and last block ID, + // which have the persisted* attribute, we signify that a supply hasn't + // been modified by making it nil. return nil } +// Loads current stakes from disk and populates them in [ms]. func (ms *merkleState) loadCurrentStakers() error { // TODO ABENEGIA: Check missing metadata ms.currentStakers = newBaseStakers() From 2359675416576e06d67c770ea1a9d76e93e158a1 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Thu, 16 Nov 2023 16:33:56 -0500 Subject: [PATCH 102/132] nit --- vms/platformvm/state/merkle_state.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index c508d2724629..f1ae6e28597f 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -1039,11 +1039,8 @@ func (ms *merkleState) ApplyValidatorWeightDiffs( return err } } - if err := diffIter.Error(); err != nil { - return err - } - return nil + return diffIter.Error() } func (ms *merkleState) ApplyValidatorPublicKeyDiffs( From 308f59bfb3840a3e649b0499e0bc13c30a6a0ac3 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Mon, 20 Nov 2023 10:51:33 -0500 Subject: [PATCH 103/132] fix merge --- vms/platformvm/state/merkle_state.go | 32 +++++++++---------- vms/platformvm/state/merkle_state_load_ops.go | 10 +++--- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go index f1ae6e28597f..c718003be4e6 100644 --- a/vms/platformvm/state/merkle_state.go +++ b/vms/platformvm/state/merkle_state.go @@ -84,7 +84,7 @@ func NewMerkleState( rawDB database.Database, genesisBytes []byte, metricsReg prometheus.Registerer, - cfg *config.Config, + validators validators.Manager, execCfg *config.ExecutionConfig, ctx *snow.Context, metrics metrics.Metrics, @@ -93,7 +93,7 @@ func NewMerkleState( res, err := newMerkleState( rawDB, metrics, - cfg, + validators, execCfg, ctx, metricsReg, @@ -115,7 +115,7 @@ func NewMerkleState( func newMerkleState( rawDB database.Database, metrics metrics.Metrics, - cfg *config.Config, + validators validators.Manager, execCfg *config.ExecutionConfig, ctx *snow.Context, metricsReg prometheus.Registerer, @@ -227,10 +227,10 @@ func newMerkleState( } return &merkleState{ - cfg: cfg, - ctx: ctx, - metrics: metrics, - rewards: rewards, + validators: validators, + ctx: ctx, + metrics: metrics, + rewards: rewards, baseDB: baseDB, singletonDB: singletonDB, @@ -313,10 +313,10 @@ func newMerkleState( // - BLS Key Diffs // - Reward UTXOs type merkleState struct { - cfg *config.Config - ctx *snow.Context - metrics metrics.Metrics - rewards reward.Calculator + validators validators.Manager + ctx *snow.Context + metrics metrics.Metrics + rewards reward.Calculator baseDB *versiondb.Database singletonDB database.Database @@ -1730,11 +1730,11 @@ func (ms *merkleState) updateValidatorSet( } if weightDiff.Decrease { - err = ms.cfg.Validators.RemoveWeight(subnetID, nodeID, weightDiff.Amount) + err = ms.validators.RemoveWeight(subnetID, nodeID, weightDiff.Amount) } else { if validatorDiff.validatorStatus == added { staker := validatorDiff.validator - err = ms.cfg.Validators.AddStaker( + err = ms.validators.AddStaker( subnetID, nodeID, staker.PublicKey, @@ -1742,7 +1742,7 @@ func (ms *merkleState) updateValidatorSet( weightDiff.Amount, ) } else { - err = ms.cfg.Validators.AddWeight(subnetID, nodeID, weightDiff.Amount) + err = ms.validators.AddWeight(subnetID, nodeID, weightDiff.Amount) } } if err != nil { @@ -1750,8 +1750,8 @@ func (ms *merkleState) updateValidatorSet( } } - ms.metrics.SetLocalStake(ms.cfg.Validators.GetWeight(constants.PrimaryNetworkID, ms.ctx.NodeID)) - totalWeight, err := ms.cfg.Validators.TotalWeight(constants.PrimaryNetworkID) + ms.metrics.SetLocalStake(ms.validators.GetWeight(constants.PrimaryNetworkID, ms.ctx.NodeID)) + totalWeight, err := ms.validators.TotalWeight(constants.PrimaryNetworkID) if err != nil { return fmt.Errorf("failed to get total weight: %w", err) } diff --git a/vms/platformvm/state/merkle_state_load_ops.go b/vms/platformvm/state/merkle_state_load_ops.go index 863f182f31b6..c9c17a8ddbec 100644 --- a/vms/platformvm/state/merkle_state_load_ops.go +++ b/vms/platformvm/state/merkle_state_load_ops.go @@ -289,21 +289,21 @@ func (ms *merkleState) loadPendingStakers() error { // been called. func (ms *merkleState) initValidatorSets() error { for subnetID, validators := range ms.currentStakers.validators { - if ms.cfg.Validators.Count(subnetID) != 0 { + if ms.validators.Count(subnetID) != 0 { // Enforce the invariant that the validator set is empty here. return fmt.Errorf("%w: %s", errValidatorSetAlreadyPopulated, subnetID) } for nodeID, validator := range validators { validatorStaker := validator.validator - if err := ms.cfg.Validators.AddStaker(subnetID, nodeID, validatorStaker.PublicKey, validatorStaker.TxID, validatorStaker.Weight); err != nil { + if err := ms.validators.AddStaker(subnetID, nodeID, validatorStaker.PublicKey, validatorStaker.TxID, validatorStaker.Weight); err != nil { return err } delegatorIterator := NewTreeIterator(validator.delegators) for delegatorIterator.Next() { delegatorStaker := delegatorIterator.Value() - if err := ms.cfg.Validators.AddWeight(subnetID, nodeID, delegatorStaker.Weight); err != nil { + if err := ms.validators.AddWeight(subnetID, nodeID, delegatorStaker.Weight); err != nil { delegatorIterator.Release() return err } @@ -312,8 +312,8 @@ func (ms *merkleState) initValidatorSets() error { } } - ms.metrics.SetLocalStake(ms.cfg.Validators.GetWeight(constants.PrimaryNetworkID, ms.ctx.NodeID)) - totalWeight, err := ms.cfg.Validators.TotalWeight(constants.PrimaryNetworkID) + ms.metrics.SetLocalStake(ms.validators.GetWeight(constants.PrimaryNetworkID, ms.ctx.NodeID)) + totalWeight, err := ms.validators.TotalWeight(constants.PrimaryNetworkID) if err != nil { return fmt.Errorf("failed to get total weight of primary network validators: %w", err) } From 60e72296fca97fac8095550650e0297313a95db3 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Wed, 22 Nov 2023 22:07:57 +0100 Subject: [PATCH 104/132] replaced state.State with state.MerkleState --- vms/platformvm/block/builder/helpers_test.go | 2 +- vms/platformvm/block/executor/helpers_test.go | 2 +- vms/platformvm/state/merkle_state.go | 1794 ---------- vms/platformvm/state/metadata_delegator.go | 25 - vms/platformvm/state/state.go | 2965 +++++++---------- ...rkle_state_helpers.go => state_helpers.go} | 0 ...le_state_test.go => state_helpers_test.go} | 0 ...le_state_load_ops.go => state_load_ops.go} | 124 +- vms/platformvm/state/state_test.go | 45 +- vms/platformvm/txs/executor/helpers_test.go | 2 +- vms/platformvm/vm.go | 2 +- vms/platformvm/vm_regression_test.go | 4 +- 12 files changed, 1307 insertions(+), 3658 deletions(-) delete mode 100644 vms/platformvm/state/merkle_state.go delete mode 100644 vms/platformvm/state/metadata_delegator.go rename vms/platformvm/state/{merkle_state_helpers.go => state_helpers.go} (100%) rename vms/platformvm/state/{merkle_state_test.go => state_helpers_test.go} (100%) rename vms/platformvm/state/{merkle_state_load_ops.go => state_load_ops.go} (66%) diff --git a/vms/platformvm/block/builder/helpers_test.go b/vms/platformvm/block/builder/helpers_test.go index ec677f35e463..84778add2864 100644 --- a/vms/platformvm/block/builder/helpers_test.go +++ b/vms/platformvm/block/builder/helpers_test.go @@ -246,7 +246,7 @@ func defaultState( execCfg, _ := config.GetExecutionConfig([]byte(`{}`)) genesisBytes := buildGenesisTest(t, ctx) - state, err := state.NewMerkleState( + state, err := state.New( db, genesisBytes, prometheus.NewRegistry(), diff --git a/vms/platformvm/block/executor/helpers_test.go b/vms/platformvm/block/executor/helpers_test.go index e27676976cab..ff0aa13a2ea1 100644 --- a/vms/platformvm/block/executor/helpers_test.go +++ b/vms/platformvm/block/executor/helpers_test.go @@ -276,7 +276,7 @@ func defaultState( ) state.State { genesisBytes := buildGenesisTest(ctx) execCfg, _ := config.GetExecutionConfig([]byte(`{}`)) - state, err := state.NewMerkleState( + state, err := state.New( db, genesisBytes, prometheus.NewRegistry(), diff --git a/vms/platformvm/state/merkle_state.go b/vms/platformvm/state/merkle_state.go deleted file mode 100644 index c718003be4e6..000000000000 --- a/vms/platformvm/state/merkle_state.go +++ /dev/null @@ -1,1794 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package state - -import ( - "bytes" - "context" - "fmt" - "sync" - "time" - - "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" - "golang.org/x/exp/maps" - "golang.org/x/exp/slices" - - "github.com/ava-labs/avalanchego/cache" - "github.com/ava-labs/avalanchego/cache/metercacher" - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/linkeddb" - "github.com/ava-labs/avalanchego/database/prefixdb" - "github.com/ava-labs/avalanchego/database/versiondb" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/trace" - "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto/bls" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/vms/components/avax" - "github.com/ava-labs/avalanchego/vms/platformvm/block" - "github.com/ava-labs/avalanchego/vms/platformvm/config" - "github.com/ava-labs/avalanchego/vms/platformvm/fx" - "github.com/ava-labs/avalanchego/vms/platformvm/metrics" - "github.com/ava-labs/avalanchego/vms/platformvm/reward" - "github.com/ava-labs/avalanchego/vms/platformvm/status" - "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/x/merkledb" -) - -const ( - HistoryLength = uint(256) - - valueNodeCacheSize = 512 * units.MiB - intermediateNodeCacheSize = 512 * units.MiB - utxoCacheSize = 8192 // from avax/utxo_state.go -) - -var ( - _ State = (*merkleState)(nil) - - merkleStatePrefix = []byte{0x00} - merkleSingletonPrefix = []byte{0x01} - merkleBlockPrefix = []byte{0x02} - merkleBlockIDsPrefix = []byte{0x03} - merkleTxPrefix = []byte{0x04} - merkleIndexUTXOsPrefix = []byte{0x05} // to serve UTXOIDs(addr) - merkleUptimesPrefix = []byte{0x06} // locally measured uptimes - merkleWeightDiffPrefix = []byte{0x07} // non-merkleized validators weight diff. TODO: should we merkleize them? - merkleBlsKeyDiffPrefix = []byte{0x08} - merkleRewardUtxosPrefix = []byte{0x09} - - // merkle db sections - metadataSectionPrefix = byte(0x00) - merkleChainTimeKey = []byte{metadataSectionPrefix, 0x00} - merkleLastAcceptedBlkIDKey = []byte{metadataSectionPrefix, 0x01} - merkleSuppliesPrefix = []byte{metadataSectionPrefix, 0x02} - - permissionedSubnetSectionPrefix = []byte{0x01} - elasticSubnetSectionPrefix = []byte{0x02} - chainsSectionPrefix = []byte{0x03} - utxosSectionPrefix = []byte{0x04} - currentStakersSectionPrefix = []byte{0x05} - pendingStakersSectionPrefix = []byte{0x06} - delegateeRewardsPrefix = []byte{0x07} - subnetOwnersPrefix = []byte{0x08} -) - -func NewMerkleState( - rawDB database.Database, - genesisBytes []byte, - metricsReg prometheus.Registerer, - validators validators.Manager, - execCfg *config.ExecutionConfig, - ctx *snow.Context, - metrics metrics.Metrics, - rewards reward.Calculator, -) (State, error) { - res, err := newMerkleState( - rawDB, - metrics, - validators, - execCfg, - ctx, - metricsReg, - rewards, - ) - if err != nil { - return nil, err - } - - if err := res.sync(genesisBytes); err != nil { - // Drop any errors on close to return the first error - _ = res.Close() - return nil, err - } - - return res, nil -} - -func newMerkleState( - rawDB database.Database, - metrics metrics.Metrics, - validators validators.Manager, - execCfg *config.ExecutionConfig, - ctx *snow.Context, - metricsReg prometheus.Registerer, - rewards reward.Calculator, -) (*merkleState, error) { - var ( - baseDB = versiondb.New(rawDB) - baseMerkleDB = prefixdb.New(merkleStatePrefix, baseDB) - singletonDB = prefixdb.New(merkleSingletonPrefix, baseDB) - blockDB = prefixdb.New(merkleBlockPrefix, baseDB) - blockIDsDB = prefixdb.New(merkleBlockIDsPrefix, baseDB) - txDB = prefixdb.New(merkleTxPrefix, baseDB) - indexedUTXOsDB = prefixdb.New(merkleIndexUTXOsPrefix, baseDB) - localUptimesDB = prefixdb.New(merkleUptimesPrefix, baseDB) - flatValidatorWeightDiffsDB = prefixdb.New(merkleWeightDiffPrefix, baseDB) - flatValidatorPublicKeyDiffsDB = prefixdb.New(merkleBlsKeyDiffPrefix, baseDB) - rewardUTXOsDB = prefixdb.New(merkleRewardUtxosPrefix, baseDB) - ) - - noOpTracer, err := trace.New(trace.Config{Enabled: false}) - if err != nil { - return nil, fmt.Errorf("failed creating noOpTraces: %w", err) - } - - merkleDB, err := merkledb.New(context.TODO(), baseMerkleDB, merkledb.Config{ - BranchFactor: merkledb.BranchFactor16, - HistoryLength: HistoryLength, - ValueNodeCacheSize: valueNodeCacheSize, - IntermediateNodeCacheSize: intermediateNodeCacheSize, - Reg: prometheus.NewRegistry(), - Tracer: noOpTracer, - }) - if err != nil { - return nil, fmt.Errorf("failed creating merkleDB: %w", err) - } - - rewardUTXOsCache, err := metercacher.New[ids.ID, []*avax.UTXO]( - "reward_utxos_cache", - metricsReg, - &cache.LRU[ids.ID, []*avax.UTXO]{Size: execCfg.RewardUTXOsCacheSize}, - ) - if err != nil { - return nil, err - } - - suppliesCache, err := metercacher.New[ids.ID, *uint64]( - "supply_cache", - metricsReg, - &cache.LRU[ids.ID, *uint64]{Size: execCfg.ChainCacheSize}, - ) - if err != nil { - return nil, err - } - - subnetOwnerCache, err := metercacher.New[ids.ID, fxOwnerAndSize]( - "subnet_owner_cache", - metricsReg, - cache.NewSizedLRU[ids.ID, fxOwnerAndSize](execCfg.FxOwnerCacheSize, func(_ ids.ID, f fxOwnerAndSize) int { - return ids.IDLen + f.size - }), - ) - if err != nil { - return nil, err - } - - transformedSubnetCache, err := metercacher.New( - "transformed_subnet_cache", - metricsReg, - cache.NewSizedLRU[ids.ID, *txs.Tx](execCfg.TransformedSubnetTxCacheSize, txSize), - ) - if err != nil { - return nil, err - } - - chainCache, err := metercacher.New[ids.ID, []*txs.Tx]( - "chain_cache", - metricsReg, - &cache.LRU[ids.ID, []*txs.Tx]{Size: execCfg.ChainCacheSize}, - ) - if err != nil { - return nil, err - } - - blockCache, err := metercacher.New[ids.ID, block.Block]( - "block_cache", - metricsReg, - cache.NewSizedLRU[ids.ID, block.Block](execCfg.BlockCacheSize, blockSize), - ) - if err != nil { - return nil, err - } - - blockIDCache, err := metercacher.New[uint64, ids.ID]( - "block_id_cache", - metricsReg, - &cache.LRU[uint64, ids.ID]{Size: execCfg.BlockIDCacheSize}, - ) - if err != nil { - return nil, err - } - - txCache, err := metercacher.New( - "tx_cache", - metricsReg, - cache.NewSizedLRU[ids.ID, *txAndStatus](execCfg.TxCacheSize, txAndStatusSize), - ) - if err != nil { - return nil, err - } - - return &merkleState{ - validators: validators, - ctx: ctx, - metrics: metrics, - rewards: rewards, - - baseDB: baseDB, - singletonDB: singletonDB, - baseMerkleDB: baseMerkleDB, - merkleDB: merkleDB, - - currentStakers: newBaseStakers(), - pendingStakers: newBaseStakers(), - - delegateeRewardCache: make(map[ids.NodeID]map[ids.ID]uint64), - modifiedDelegateeReward: make(map[ids.NodeID]set.Set[ids.ID]), - - modifiedUTXOs: make(map[ids.ID]*avax.UTXO), - utxoCache: &cache.LRU[ids.ID, *avax.UTXO]{Size: utxoCacheSize}, - - modifiedSupplies: make(map[ids.ID]uint64), - suppliesCache: suppliesCache, - - subnetOwners: make(map[ids.ID]fx.Owner), - subnetOwnerCache: subnetOwnerCache, - - addedPermissionedSubnets: make([]*txs.Tx, 0), - permissionedSubnetCache: nil, // created first time GetSubnets is called - addedElasticSubnets: make(map[ids.ID]*txs.Tx), - elasticSubnetCache: transformedSubnetCache, - - addedChains: make(map[ids.ID][]*txs.Tx), - chainCache: chainCache, - - addedBlocks: make(map[ids.ID]block.Block), - blockCache: blockCache, - blockDB: blockDB, - - addedBlockIDs: make(map[uint64]ids.ID), - blockIDCache: blockIDCache, - blockIDDB: blockIDsDB, - - addedTxs: make(map[ids.ID]*txAndStatus), - txCache: txCache, - txDB: txDB, - - indexedUTXOsDB: indexedUTXOsDB, - - localUptimesCache: make(map[ids.NodeID]map[ids.ID]*uptimes), - modifiedLocalUptimes: make(map[ids.NodeID]set.Set[ids.ID]), - localUptimesDB: localUptimesDB, - - flatValidatorWeightDiffsDB: flatValidatorWeightDiffsDB, - flatValidatorPublicKeyDiffsDB: flatValidatorPublicKeyDiffsDB, - - addedRewardUTXOs: make(map[ids.ID][]*avax.UTXO), - rewardUTXOsCache: rewardUTXOsCache, - rewardUTXOsDB: rewardUTXOsDB, - }, nil -} - -// Stores global state in a merkle trie. This means that each state corresponds -// to a unique merkle root. Specifically, the following state is merkleized. -// - Delegatee Rewards -// - UTXOs -// - Current Supply -// - Subnet Creation Transactions -// - Subnet Owners -// - Subnet Transformation Transactions -// - Chain Creation Transactions -// - Chain time -// - Last Accepted Block ID -// - Current Staker Set -// - Pending Staker Set -// -// Changing any of the above state will cause the merkle root to change. -// -// The following state is not merkleized: -// - Database Initialization Status -// - Blocks -// - Block IDs -// - Transactions (note some transactions are also stored merkleized) -// - Uptimes -// - Weight Diffs -// - BLS Key Diffs -// - Reward UTXOs -type merkleState struct { - validators validators.Manager - ctx *snow.Context - metrics metrics.Metrics - rewards reward.Calculator - - baseDB *versiondb.Database - singletonDB database.Database - baseMerkleDB database.Database - merkleDB merkledb.MerkleDB // Stores merkleized state - - // stakers section (missing Delegatee piece) - // TODO: Consider moving delegatee to UTXOs section - currentStakers *baseStakers - pendingStakers *baseStakers - - delegateeRewardCache map[ids.NodeID]map[ids.ID]uint64 - modifiedDelegateeReward map[ids.NodeID]set.Set[ids.ID] - - // UTXOs section - modifiedUTXOs map[ids.ID]*avax.UTXO // map of UTXO ID -> *UTXO - utxoCache cache.Cacher[ids.ID, *avax.UTXO] // UTXO ID -> *UTXO. If the *UTXO is nil the UTXO doesn't exist - - // Metadata section - chainTime, latestComittedChainTime time.Time - lastAcceptedBlkID, latestCommittedLastAcceptedBlkID ids.ID - lastAcceptedHeight uint64 // TODO: Should this be written to state?? - modifiedSupplies map[ids.ID]uint64 // map of subnetID -> current supply - suppliesCache cache.Cacher[ids.ID, *uint64] // cache of subnetID -> current supply if the entry is nil, it is not in the database - - // Subnets section - // Subnet ID --> Owner of the subnet - subnetOwners map[ids.ID]fx.Owner - subnetOwnerCache cache.Cacher[ids.ID, fxOwnerAndSize] // cache of subnetID -> owner if the entry is nil, it is not in the database - - addedPermissionedSubnets []*txs.Tx // added SubnetTxs, waiting to be committed - permissionedSubnetCache []*txs.Tx // nil if the subnets haven't been loaded - addedElasticSubnets map[ids.ID]*txs.Tx // map of subnetID -> transformSubnetTx - elasticSubnetCache cache.Cacher[ids.ID, *txs.Tx] // cache of subnetID -> transformSubnetTx if the entry is nil, it is not in the database - - // Chains section - addedChains map[ids.ID][]*txs.Tx // maps subnetID -> the newly added chains to the subnet - chainCache cache.Cacher[ids.ID, []*txs.Tx] // cache of subnetID -> the chains after all local modifications []*txs.Tx - - // Blocks section - // Note: addedBlocks is a list because multiple blocks can be committed at one (proposal + accepted option) - addedBlocks map[ids.ID]block.Block // map of blockID -> Block. - blockCache cache.Cacher[ids.ID, block.Block] // cache of blockID -> Block. If the entry is nil, it is not in the database - blockDB database.Database - - addedBlockIDs map[uint64]ids.ID // map of height -> blockID - blockIDCache cache.Cacher[uint64, ids.ID] // cache of height -> blockID. If the entry is ids.Empty, it is not in the database - blockIDDB database.Database - - // Txs section - // FIND a way to reduce use of these. No use in verification of addedTxs - // a limited windows to support APIs - addedTxs map[ids.ID]*txAndStatus // map of txID -> {*txs.Tx, Status} - txCache cache.Cacher[ids.ID, *txAndStatus] // txID -> {*txs.Tx, Status}. If the entry is nil, it isn't in the database - txDB database.Database - - indexedUTXOsDB database.Database - - localUptimesCache map[ids.NodeID]map[ids.ID]*uptimes // vdrID -> subnetID -> metadata - modifiedLocalUptimes map[ids.NodeID]set.Set[ids.ID] // vdrID -> subnetIDs - localUptimesDB database.Database - - flatValidatorWeightDiffsDB database.Database - flatValidatorPublicKeyDiffsDB database.Database - - // Reward UTXOs section - addedRewardUTXOs map[ids.ID][]*avax.UTXO // map of txID -> []*UTXO - rewardUTXOsCache cache.Cacher[ids.ID, []*avax.UTXO] // txID -> []*UTXO - rewardUTXOsDB database.Database -} - -// STAKERS section -func (ms *merkleState) GetCurrentValidator(subnetID ids.ID, nodeID ids.NodeID) (*Staker, error) { - return ms.currentStakers.GetValidator(subnetID, nodeID) -} - -func (ms *merkleState) PutCurrentValidator(staker *Staker) { - ms.currentStakers.PutValidator(staker) - - // make sure that each new validator has an uptime entry - // and a delegatee reward entry. MerkleState implementations - // of SetUptime and SetDelegateeReward must not err - err := ms.SetUptime(staker.NodeID, staker.SubnetID, 0 /*duration*/, staker.StartTime) - if err != nil { - panic(err) - } - err = ms.SetDelegateeReward(staker.SubnetID, staker.NodeID, 0) - if err != nil { - panic(err) - } -} - -func (ms *merkleState) DeleteCurrentValidator(staker *Staker) { - ms.currentStakers.DeleteValidator(staker) -} - -func (ms *merkleState) GetCurrentDelegatorIterator(subnetID ids.ID, nodeID ids.NodeID) (StakerIterator, error) { - return ms.currentStakers.GetDelegatorIterator(subnetID, nodeID), nil -} - -func (ms *merkleState) PutCurrentDelegator(staker *Staker) { - ms.currentStakers.PutDelegator(staker) -} - -func (ms *merkleState) DeleteCurrentDelegator(staker *Staker) { - ms.currentStakers.DeleteDelegator(staker) -} - -func (ms *merkleState) GetCurrentStakerIterator() (StakerIterator, error) { - return ms.currentStakers.GetStakerIterator(), nil -} - -func (ms *merkleState) GetPendingValidator(subnetID ids.ID, nodeID ids.NodeID) (*Staker, error) { - return ms.pendingStakers.GetValidator(subnetID, nodeID) -} - -func (ms *merkleState) PutPendingValidator(staker *Staker) { - ms.pendingStakers.PutValidator(staker) -} - -func (ms *merkleState) DeletePendingValidator(staker *Staker) { - ms.pendingStakers.DeleteValidator(staker) -} - -func (ms *merkleState) GetPendingDelegatorIterator(subnetID ids.ID, nodeID ids.NodeID) (StakerIterator, error) { - return ms.pendingStakers.GetDelegatorIterator(subnetID, nodeID), nil -} - -func (ms *merkleState) PutPendingDelegator(staker *Staker) { - ms.pendingStakers.PutDelegator(staker) -} - -func (ms *merkleState) DeletePendingDelegator(staker *Staker) { - ms.pendingStakers.DeleteDelegator(staker) -} - -func (ms *merkleState) GetPendingStakerIterator() (StakerIterator, error) { - return ms.pendingStakers.GetStakerIterator(), nil -} - -func (ms *merkleState) GetDelegateeReward(subnetID ids.ID, vdrID ids.NodeID) (uint64, error) { - nodeDelegateeRewards, exists := ms.delegateeRewardCache[vdrID] - if exists { - delegateeReward, exists := nodeDelegateeRewards[subnetID] - if exists { - return delegateeReward, nil - } - } - - // try loading from the db - key := merkleDelegateeRewardsKey(vdrID, subnetID) - amountBytes, err := ms.merkleDB.Get(key) - if err != nil { - return 0, err - } - delegateeReward, err := database.ParseUInt64(amountBytes) - if err != nil { - return 0, err - } - - if _, found := ms.delegateeRewardCache[vdrID]; !found { - ms.delegateeRewardCache[vdrID] = make(map[ids.ID]uint64) - } - ms.delegateeRewardCache[vdrID][subnetID] = delegateeReward - return delegateeReward, nil -} - -func (ms *merkleState) SetDelegateeReward(subnetID ids.ID, vdrID ids.NodeID, amount uint64) error { - nodeDelegateeRewards, exists := ms.delegateeRewardCache[vdrID] - if !exists { - nodeDelegateeRewards = make(map[ids.ID]uint64) - ms.delegateeRewardCache[vdrID] = nodeDelegateeRewards - } - nodeDelegateeRewards[subnetID] = amount - - // track diff - updatedDelegateeRewards, ok := ms.modifiedDelegateeReward[vdrID] - if !ok { - updatedDelegateeRewards = set.Set[ids.ID]{} - ms.modifiedDelegateeReward[vdrID] = updatedDelegateeRewards - } - updatedDelegateeRewards.Add(subnetID) - return nil -} - -// UTXOs section -func (ms *merkleState) GetUTXO(utxoID ids.ID) (*avax.UTXO, error) { - if utxo, exists := ms.modifiedUTXOs[utxoID]; exists { - if utxo == nil { - return nil, database.ErrNotFound - } - return utxo, nil - } - if utxo, found := ms.utxoCache.Get(utxoID); found { - if utxo == nil { - return nil, database.ErrNotFound - } - return utxo, nil - } - - key := merkleUtxoIDKey(utxoID) - - switch bytes, err := ms.merkleDB.Get(key); err { - case nil: - utxo := &avax.UTXO{} - if _, err := txs.GenesisCodec.Unmarshal(bytes, utxo); err != nil { - return nil, err - } - ms.utxoCache.Put(utxoID, utxo) - return utxo, nil - - case database.ErrNotFound: - ms.utxoCache.Put(utxoID, nil) - return nil, database.ErrNotFound - - default: - return nil, err - } -} - -func (ms *merkleState) UTXOIDs(addr []byte, start ids.ID, limit int) ([]ids.ID, error) { - var ( - prefix = slices.Clone(addr) - key = merkleUtxoIndexKey(addr, start) - ) - - iter := ms.indexedUTXOsDB.NewIteratorWithStartAndPrefix(key, prefix) - defer iter.Release() - - utxoIDs := []ids.ID(nil) - for len(utxoIDs) < limit && iter.Next() { - itAddr, utxoID := splitUtxoIndexKey(iter.Key()) - if !bytes.Equal(itAddr, addr) { - break - } - if utxoID == start { - continue - } - - start = ids.Empty - utxoIDs = append(utxoIDs, utxoID) - } - return utxoIDs, iter.Error() -} - -func (ms *merkleState) AddUTXO(utxo *avax.UTXO) { - ms.modifiedUTXOs[utxo.InputID()] = utxo -} - -func (ms *merkleState) DeleteUTXO(utxoID ids.ID) { - ms.modifiedUTXOs[utxoID] = nil -} - -// METADATA Section -func (ms *merkleState) GetTimestamp() time.Time { - return ms.chainTime -} - -func (ms *merkleState) SetTimestamp(tm time.Time) { - ms.chainTime = tm -} - -func (ms *merkleState) GetLastAccepted() ids.ID { - return ms.lastAcceptedBlkID -} - -func (ms *merkleState) SetLastAccepted(lastAccepted ids.ID) { - ms.lastAcceptedBlkID = lastAccepted -} - -func (ms *merkleState) SetHeight(height uint64) { - ms.lastAcceptedHeight = height -} - -func (ms *merkleState) GetCurrentSupply(subnetID ids.ID) (uint64, error) { - supply, ok := ms.modifiedSupplies[subnetID] - if ok { - return supply, nil - } - cachedSupply, ok := ms.suppliesCache.Get(subnetID) - if ok { - if cachedSupply == nil { - return 0, database.ErrNotFound - } - return *cachedSupply, nil - } - - key := merkleSuppliesKey(subnetID) - - switch supplyBytes, err := ms.merkleDB.Get(key); err { - case nil: - supply, err := database.ParseUInt64(supplyBytes) - if err != nil { - return 0, fmt.Errorf("failed parsing supply: %w", err) - } - ms.suppliesCache.Put(subnetID, &supply) - return supply, nil - - case database.ErrNotFound: - ms.suppliesCache.Put(subnetID, nil) - return 0, database.ErrNotFound - - default: - return 0, err - } -} - -func (ms *merkleState) SetCurrentSupply(subnetID ids.ID, cs uint64) { - ms.modifiedSupplies[subnetID] = cs -} - -// SUBNETS Section -func (ms *merkleState) GetSubnets() ([]*txs.Tx, error) { - // Note: we want all subnets, so we don't look at addedSubnets - // which are only part of them - if ms.permissionedSubnetCache != nil { - return ms.permissionedSubnetCache, nil - } - - subnets := make([]*txs.Tx, 0) - subnetDBIt := ms.merkleDB.NewIteratorWithPrefix(permissionedSubnetSectionPrefix) - defer subnetDBIt.Release() - - for subnetDBIt.Next() { - subnetTxBytes := subnetDBIt.Value() - subnetTx, err := txs.Parse(txs.GenesisCodec, subnetTxBytes) - if err != nil { - return nil, err - } - subnets = append(subnets, subnetTx) - } - if err := subnetDBIt.Error(); err != nil { - return nil, err - } - subnets = append(subnets, ms.addedPermissionedSubnets...) - ms.permissionedSubnetCache = subnets - return subnets, nil -} - -func (ms *merkleState) AddSubnet(createSubnetTx *txs.Tx) { - ms.addedPermissionedSubnets = append(ms.addedPermissionedSubnets, createSubnetTx) -} - -func (ms *merkleState) GetSubnetOwner(subnetID ids.ID) (fx.Owner, error) { - if owner, exists := ms.subnetOwners[subnetID]; exists { - return owner, nil - } - - if ownerAndSize, cached := ms.subnetOwnerCache.Get(subnetID); cached { - if ownerAndSize.owner == nil { - return nil, database.ErrNotFound - } - return ownerAndSize.owner, nil - } - - subnetIDKey := merkleSubnetOwnersKey(subnetID) - ownerBytes, err := ms.merkleDB.Get(subnetIDKey) - if err == nil { - var owner fx.Owner - if _, err := block.GenesisCodec.Unmarshal(ownerBytes, &owner); err != nil { - return nil, err - } - ms.subnetOwnerCache.Put(subnetID, fxOwnerAndSize{ - owner: owner, - size: len(ownerBytes), - }) - return owner, nil - } - if err != database.ErrNotFound { - return nil, err - } - - subnetIntf, _, err := ms.GetTx(subnetID) - if err != nil { - if err == database.ErrNotFound { - ms.subnetOwnerCache.Put(subnetID, fxOwnerAndSize{}) - } - return nil, err - } - - subnet, ok := subnetIntf.Unsigned.(*txs.CreateSubnetTx) - if !ok { - return nil, fmt.Errorf("%q %w", subnetID, errIsNotSubnet) - } - - ms.SetSubnetOwner(subnetID, subnet.Owner) - return subnet.Owner, nil -} - -func (ms *merkleState) SetSubnetOwner(subnetID ids.ID, owner fx.Owner) { - ms.subnetOwners[subnetID] = owner -} - -func (ms *merkleState) GetSubnetTransformation(subnetID ids.ID) (*txs.Tx, error) { - if tx, exists := ms.addedElasticSubnets[subnetID]; exists { - return tx, nil - } - - if tx, cached := ms.elasticSubnetCache.Get(subnetID); cached { - if tx == nil { - return nil, database.ErrNotFound - } - return tx, nil - } - - key := merkleElasticSubnetKey(subnetID) - transformSubnetTxBytes, err := ms.merkleDB.Get(key) - switch err { - case nil: - transformSubnetTx, err := txs.Parse(txs.GenesisCodec, transformSubnetTxBytes) - if err != nil { - return nil, err - } - ms.elasticSubnetCache.Put(subnetID, transformSubnetTx) - return transformSubnetTx, nil - - case database.ErrNotFound: - ms.elasticSubnetCache.Put(subnetID, nil) - return nil, database.ErrNotFound - - default: - return nil, err - } -} - -func (ms *merkleState) AddSubnetTransformation(transformSubnetTxIntf *txs.Tx) { - transformSubnetTx := transformSubnetTxIntf.Unsigned.(*txs.TransformSubnetTx) - ms.addedElasticSubnets[transformSubnetTx.Subnet] = transformSubnetTxIntf -} - -// CHAINS Section -func (ms *merkleState) GetChains(subnetID ids.ID) ([]*txs.Tx, error) { - if chains, cached := ms.chainCache.Get(subnetID); cached { - return chains, nil - } - chains := make([]*txs.Tx, 0) - - prefix := merkleChainPrefix(subnetID) - - chainDBIt := ms.merkleDB.NewIteratorWithPrefix(prefix) - defer chainDBIt.Release() - for chainDBIt.Next() { - chainTxBytes := chainDBIt.Value() - chainTx, err := txs.Parse(txs.GenesisCodec, chainTxBytes) - if err != nil { - return nil, err - } - chains = append(chains, chainTx) - } - if err := chainDBIt.Error(); err != nil { - return nil, err - } - chains = append(chains, ms.addedChains[subnetID]...) - ms.chainCache.Put(subnetID, chains) - return chains, nil -} - -func (ms *merkleState) AddChain(createChainTxIntf *txs.Tx) { - createChainTx := createChainTxIntf.Unsigned.(*txs.CreateChainTx) - subnetID := createChainTx.SubnetID - - ms.addedChains[subnetID] = append(ms.addedChains[subnetID], createChainTxIntf) -} - -// TXs Section -func (ms *merkleState) GetTx(txID ids.ID) (*txs.Tx, status.Status, error) { - if tx, exists := ms.addedTxs[txID]; exists { - return tx.tx, tx.status, nil - } - if tx, cached := ms.txCache.Get(txID); cached { - if tx == nil { - return nil, status.Unknown, database.ErrNotFound - } - return tx.tx, tx.status, nil - } - - txBytes, err := ms.txDB.Get(txID[:]) - switch err { - case nil: - stx := txBytesAndStatus{} - if _, err := txs.GenesisCodec.Unmarshal(txBytes, &stx); err != nil { - return nil, status.Unknown, err - } - - tx, err := txs.Parse(txs.GenesisCodec, stx.Tx) - if err != nil { - return nil, status.Unknown, err - } - - ptx := &txAndStatus{ - tx: tx, - status: stx.Status, - } - - ms.txCache.Put(txID, ptx) - return ptx.tx, ptx.status, nil - - case database.ErrNotFound: - ms.txCache.Put(txID, nil) - return nil, status.Unknown, database.ErrNotFound - - default: - return nil, status.Unknown, err - } -} - -func (ms *merkleState) AddTx(tx *txs.Tx, status status.Status) { - ms.addedTxs[tx.ID()] = &txAndStatus{ - tx: tx, - status: status, - } -} - -// BLOCKs Section -func (ms *merkleState) GetStatelessBlock(blockID ids.ID) (block.Block, error) { - if blk, exists := ms.addedBlocks[blockID]; exists { - return blk, nil - } - - if blk, cached := ms.blockCache.Get(blockID); cached { - if blk == nil { - return nil, database.ErrNotFound - } - - return blk, nil - } - - blkBytes, err := ms.blockDB.Get(blockID[:]) - switch err { - case nil: - // Note: stored blocks are verified, so it's safe to unmarshal them with GenesisCodec - blk, err := block.Parse(block.GenesisCodec, blkBytes) - if err != nil { - return nil, err - } - - ms.blockCache.Put(blockID, blk) - return blk, nil - - case database.ErrNotFound: - ms.blockCache.Put(blockID, nil) - return nil, database.ErrNotFound - - default: - return nil, err - } -} - -func (ms *merkleState) AddStatelessBlock(block block.Block) { - ms.addedBlocks[block.ID()] = block -} - -func (ms *merkleState) GetBlockIDAtHeight(height uint64) (ids.ID, error) { - if blkID, exists := ms.addedBlockIDs[height]; exists { - return blkID, nil - } - if blkID, cached := ms.blockIDCache.Get(height); cached { - if blkID == ids.Empty { - return ids.Empty, database.ErrNotFound - } - - return blkID, nil - } - - heightKey := database.PackUInt64(height) - - blkID, err := database.GetID(ms.blockIDDB, heightKey) - if err == database.ErrNotFound { - ms.blockIDCache.Put(height, ids.Empty) - return ids.Empty, database.ErrNotFound - } - if err != nil { - return ids.Empty, err - } - - ms.blockIDCache.Put(height, blkID) - return blkID, nil -} - -func (*merkleState) ShouldPrune() (bool, error) { - return false, nil // Nothing to do -} - -func (*merkleState) PruneAndIndex(sync.Locker, logging.Logger) error { - return nil // Nothing to do -} - -// UPTIMES SECTION -func (ms *merkleState) GetUptime(vdrID ids.NodeID, subnetID ids.ID) (upDuration time.Duration, lastUpdated time.Time, err error) { - nodeUptimes, exists := ms.localUptimesCache[vdrID] - if exists { - uptime, exists := nodeUptimes[subnetID] - if exists { - return uptime.Duration, uptime.lastUpdated, nil - } - } - - // try loading from DB - key := merkleLocalUptimesKey(vdrID, subnetID) - uptimeBytes, err := ms.localUptimesDB.Get(key) - switch err { - case nil: - upTm := &uptimes{} - if _, err := txs.GenesisCodec.Unmarshal(uptimeBytes, upTm); err != nil { - return 0, time.Time{}, err - } - upTm.lastUpdated = time.Unix(int64(upTm.LastUpdated), 0) - ms.localUptimesCache[vdrID] = make(map[ids.ID]*uptimes) - ms.localUptimesCache[vdrID][subnetID] = upTm - return upTm.Duration, upTm.lastUpdated, nil - - case database.ErrNotFound: - // no local data for this staker uptime - return 0, time.Time{}, database.ErrNotFound - default: - return 0, time.Time{}, err - } -} - -func (ms *merkleState) SetUptime(vdrID ids.NodeID, subnetID ids.ID, upDuration time.Duration, lastUpdated time.Time) error { - nodeUptimes, exists := ms.localUptimesCache[vdrID] - if !exists { - nodeUptimes = make(map[ids.ID]*uptimes) - ms.localUptimesCache[vdrID] = nodeUptimes - } - - nodeUptimes[subnetID] = &uptimes{ - Duration: upDuration, - LastUpdated: uint64(lastUpdated.Unix()), - lastUpdated: lastUpdated, - } - - // track diff - updatedNodeUptimes, ok := ms.modifiedLocalUptimes[vdrID] - if !ok { - updatedNodeUptimes = set.Set[ids.ID]{} - ms.modifiedLocalUptimes[vdrID] = updatedNodeUptimes - } - updatedNodeUptimes.Add(subnetID) - return nil -} - -func (ms *merkleState) GetStartTime(nodeID ids.NodeID, subnetID ids.ID) (time.Time, error) { - staker, err := ms.GetCurrentValidator(subnetID, nodeID) - if err != nil { - return time.Time{}, err - } - return staker.StartTime, nil -} - -// REWARD UTXOs SECTION -func (ms *merkleState) GetRewardUTXOs(txID ids.ID) ([]*avax.UTXO, error) { - if utxos, exists := ms.addedRewardUTXOs[txID]; exists { - return utxos, nil - } - if utxos, exists := ms.rewardUTXOsCache.Get(txID); exists { - return utxos, nil - } - - rawTxDB := prefixdb.New(txID[:], ms.rewardUTXOsDB) - txDB := linkeddb.NewDefault(rawTxDB) - it := txDB.NewIterator() - defer it.Release() - - utxos := []*avax.UTXO(nil) - for it.Next() { - utxo := &avax.UTXO{} - if _, err := txs.Codec.Unmarshal(it.Value(), utxo); err != nil { - return nil, err - } - utxos = append(utxos, utxo) - } - if err := it.Error(); err != nil { - return nil, err - } - - ms.rewardUTXOsCache.Put(txID, utxos) - return utxos, nil -} - -func (ms *merkleState) AddRewardUTXO(txID ids.ID, utxo *avax.UTXO) { - ms.addedRewardUTXOs[txID] = append(ms.addedRewardUTXOs[txID], utxo) -} - -// VALIDATORS Section -func (ms *merkleState) ApplyValidatorWeightDiffs( - ctx context.Context, - validators map[ids.NodeID]*validators.GetValidatorOutput, - startHeight uint64, - endHeight uint64, - subnetID ids.ID, -) error { - diffIter := ms.flatValidatorWeightDiffsDB.NewIteratorWithStartAndPrefix( - marshalStartDiffKey(subnetID, startHeight), - subnetID[:], - ) - defer diffIter.Release() - - for diffIter.Next() { - if err := ctx.Err(); err != nil { - return err - } - - _, parsedHeight, nodeID, err := unmarshalDiffKey(diffIter.Key()) - if err != nil { - return err - } - // If the parsedHeight is less than our target endHeight, then we have - // fully processed the diffs from startHeight through endHeight. - if parsedHeight < endHeight { - return diffIter.Error() - } - - weightDiff, err := unmarshalWeightDiff(diffIter.Value()) - if err != nil { - return err - } - - if err := applyWeightDiff(validators, nodeID, weightDiff); err != nil { - return err - } - } - - return diffIter.Error() -} - -func (ms *merkleState) ApplyValidatorPublicKeyDiffs( - ctx context.Context, - validators map[ids.NodeID]*validators.GetValidatorOutput, - startHeight uint64, - endHeight uint64, -) error { - diffIter := ms.flatValidatorPublicKeyDiffsDB.NewIteratorWithStartAndPrefix( - marshalStartDiffKey(constants.PrimaryNetworkID, startHeight), - constants.PrimaryNetworkID[:], - ) - defer diffIter.Release() - - for diffIter.Next() { - if err := ctx.Err(); err != nil { - return err - } - - _, parsedHeight, nodeID, err := unmarshalDiffKey(diffIter.Key()) - if err != nil { - return err - } - // If the parsedHeight is less than our target endHeight, then we have - // fully processed the diffs from startHeight through endHeight. - if parsedHeight < endHeight { - break - } - - vdr, ok := validators[nodeID] - if !ok { - continue - } - - pkBytes := diffIter.Value() - if len(pkBytes) == 0 { - vdr.PublicKey = nil - continue - } - - vdr.PublicKey = new(bls.PublicKey).Deserialize(pkBytes) - } - return diffIter.Error() -} - -// DB Operations -func (ms *merkleState) Abort() { - ms.baseDB.Abort() -} - -func (ms *merkleState) Commit() error { - defer ms.Abort() - batch, err := ms.CommitBatch() - if err != nil { - return err - } - return batch.Write() -} - -func (ms *merkleState) CommitBatch() (database.Batch, error) { - // updateValidators is set to true here so that the validator manager is - // kept up to date with the last accepted state. - if err := ms.write(true /*updateValidators*/, ms.lastAcceptedHeight); err != nil { - return nil, err - } - return ms.baseDB.CommitBatch() -} - -func (*merkleState) Checksum() ids.ID { - return ids.Empty -} - -func (ms *merkleState) Close() error { - return utils.Err( - ms.flatValidatorWeightDiffsDB.Close(), - ms.flatValidatorPublicKeyDiffsDB.Close(), - ms.localUptimesDB.Close(), - ms.indexedUTXOsDB.Close(), - ms.txDB.Close(), - ms.blockDB.Close(), - ms.blockIDDB.Close(), - ms.merkleDB.Close(), - ms.baseMerkleDB.Close(), - ) -} - -func (ms *merkleState) write(updateValidators bool, height uint64) error { - currentData, weightDiffs, blsKeyDiffs, valSetDiff, err := ms.processCurrentStakers() - if err != nil { - return err - } - pendingData, err := ms.processPendingStakers() - if err != nil { - return err - } - - return utils.Err( - ms.writeMerkleState(currentData, pendingData), - ms.writeBlocks(), - ms.writeTxs(), - ms.writeLocalUptimes(), - ms.writeWeightDiffs(height, weightDiffs), - ms.writeBlsKeyDiffs(height, blsKeyDiffs), - ms.writeRewardUTXOs(), - ms.updateValidatorSet(updateValidators, valSetDiff, weightDiffs), - ) -} - -func (ms *merkleState) processCurrentStakers() ( - map[ids.ID]*stakersData, - map[weightDiffKey]*ValidatorWeightDiff, - map[ids.NodeID]*bls.PublicKey, - map[weightDiffKey]*diffValidator, - error, -) { - var ( - outputStakers = make(map[ids.ID]*stakersData) - outputWeights = make(map[weightDiffKey]*ValidatorWeightDiff) - outputBlsKey = make(map[ids.NodeID]*bls.PublicKey) - outputValSet = make(map[weightDiffKey]*diffValidator) - ) - - for subnetID, subnetValidatorDiffs := range ms.currentStakers.validatorDiffs { - delete(ms.currentStakers.validatorDiffs, subnetID) - for nodeID, validatorDiff := range subnetValidatorDiffs { - weightKey := weightDiffKey{ - subnetID: subnetID, - nodeID: nodeID, - } - outputValSet[weightKey] = validatorDiff - - // make sure there is an entry for delegators even in case - // there are no validators modified. - outputWeights[weightKey] = &ValidatorWeightDiff{ - Decrease: validatorDiff.validatorStatus == deleted, - } - - switch validatorDiff.validatorStatus { - case added: - var ( - txID = validatorDiff.validator.TxID - potentialReward = validatorDiff.validator.PotentialReward - weight = validatorDiff.validator.Weight - blkKey = validatorDiff.validator.PublicKey - ) - tx, _, err := ms.GetTx(txID) - if err != nil { - return nil, nil, nil, nil, fmt.Errorf("failed loading current validator tx, %w", err) - } - - outputStakers[txID] = &stakersData{ - TxBytes: tx.Bytes(), - PotentialReward: potentialReward, - } - outputWeights[weightKey].Amount = weight - - if blkKey != nil { - // Record that the public key for the validator is being - // added. This means the prior value for the public key was - // nil. - outputBlsKey[nodeID] = nil - } - - case deleted: - var ( - txID = validatorDiff.validator.TxID - weight = validatorDiff.validator.Weight - blkKey = validatorDiff.validator.PublicKey - ) - - outputStakers[txID] = &stakersData{ - TxBytes: nil, - } - outputWeights[weightKey].Amount = weight - - if blkKey != nil { - // Record that the public key for the validator is being - // removed. This means we must record the prior value of the - // public key. - outputBlsKey[nodeID] = blkKey - } - } - - addedDelegatorIterator := NewTreeIterator(validatorDiff.addedDelegators) - defer addedDelegatorIterator.Release() - for addedDelegatorIterator.Next() { - staker := addedDelegatorIterator.Value() - tx, _, err := ms.GetTx(staker.TxID) - if err != nil { - return nil, nil, nil, nil, fmt.Errorf("failed loading current delegator tx, %w", err) - } - - outputStakers[staker.TxID] = &stakersData{ - TxBytes: tx.Bytes(), - PotentialReward: staker.PotentialReward, - } - if err := outputWeights[weightKey].Add(false, staker.Weight); err != nil { - return nil, nil, nil, nil, fmt.Errorf("failed to increase node weight diff: %w", err) - } - } - - for _, staker := range validatorDiff.deletedDelegators { - txID := staker.TxID - - outputStakers[txID] = &stakersData{ - TxBytes: nil, - } - if err := outputWeights[weightKey].Add(true, staker.Weight); err != nil { - return nil, nil, nil, nil, fmt.Errorf("failed to decrease node weight diff: %w", err) - } - } - } - } - return outputStakers, outputWeights, outputBlsKey, outputValSet, nil -} - -func (ms *merkleState) processPendingStakers() (map[ids.ID]*stakersData, error) { - output := make(map[ids.ID]*stakersData) - for subnetID, subnetValidatorDiffs := range ms.pendingStakers.validatorDiffs { - delete(ms.pendingStakers.validatorDiffs, subnetID) - for _, validatorDiff := range subnetValidatorDiffs { - // validatorDiff.validator is not guaranteed to be non-nil here. - // Access it only if validatorDiff.validatorStatus is added or deleted - switch validatorDiff.validatorStatus { - case added: - txID := validatorDiff.validator.TxID - tx, _, err := ms.GetTx(txID) - if err != nil { - return nil, fmt.Errorf("failed loading pending validator tx, %w", err) - } - output[txID] = &stakersData{ - TxBytes: tx.Bytes(), - PotentialReward: 0, - } - case deleted: - txID := validatorDiff.validator.TxID - output[txID] = &stakersData{ - TxBytes: nil, - } - } - - addedDelegatorIterator := NewTreeIterator(validatorDiff.addedDelegators) - defer addedDelegatorIterator.Release() - for addedDelegatorIterator.Next() { - staker := addedDelegatorIterator.Value() - tx, _, err := ms.GetTx(staker.TxID) - if err != nil { - return nil, fmt.Errorf("failed loading pending delegator tx, %w", err) - } - output[staker.TxID] = &stakersData{ - TxBytes: tx.Bytes(), - PotentialReward: 0, - } - } - - for _, staker := range validatorDiff.deletedDelegators { - txID := staker.TxID - output[txID] = &stakersData{ - TxBytes: nil, - } - } - } - } - return output, nil -} - -func (ms *merkleState) writeMerkleState(currentData, pendingData map[ids.ID]*stakersData) error { - batchOps := make([]database.BatchOp, 0) - err := utils.Err( - ms.writeMetadata(&batchOps), - ms.writePermissionedSubnets(&batchOps), - ms.writeSubnetOwners(&batchOps), - ms.writeElasticSubnets(&batchOps), - ms.writeChains(&batchOps), - ms.writeCurrentStakers(&batchOps, currentData), - ms.writePendingStakers(&batchOps, pendingData), - ms.writeDelegateeRewards(&batchOps), - ms.writeUTXOs(&batchOps), - ) - if err != nil { - return err - } - - if len(batchOps) == 0 { - // nothing to commit - return nil - } - - view, err := ms.merkleDB.NewView(context.TODO(), merkledb.ViewChanges{BatchOps: batchOps}) - if err != nil { - return fmt.Errorf("failed creating merkleDB view: %w", err) - } - if err := view.CommitToDB(context.TODO()); err != nil { - return fmt.Errorf("failed committing merkleDB view: %w", err) - } - return ms.logMerkleRoot(len(batchOps) != 0) -} - -func (ms *merkleState) writeMetadata(batchOps *[]database.BatchOp) error { - if !ms.chainTime.Equal(ms.latestComittedChainTime) { - encodedChainTime, err := ms.chainTime.MarshalBinary() - if err != nil { - return fmt.Errorf("failed to encoding chainTime: %w", err) - } - - *batchOps = append(*batchOps, database.BatchOp{ - Key: merkleChainTimeKey, - Value: encodedChainTime, - }) - ms.latestComittedChainTime = ms.chainTime - } - - if ms.lastAcceptedBlkID != ms.latestCommittedLastAcceptedBlkID { - *batchOps = append(*batchOps, database.BatchOp{ - Key: merkleLastAcceptedBlkIDKey, - Value: ms.lastAcceptedBlkID[:], - }) - ms.latestCommittedLastAcceptedBlkID = ms.lastAcceptedBlkID - } - - // lastAcceptedBlockHeight not persisted yet in merkleDB state. - // TODO: Consider if it should be - - for subnetID, supply := range ms.modifiedSupplies { - supply := supply - delete(ms.modifiedSupplies, subnetID) // clear up ms.supplies to avoid potential double commits - ms.suppliesCache.Put(subnetID, &supply) - - key := merkleSuppliesKey(subnetID) - *batchOps = append(*batchOps, database.BatchOp{ - Key: key, - Value: database.PackUInt64(supply), - }) - } - return nil -} - -func (ms *merkleState) writePermissionedSubnets(batchOps *[]database.BatchOp) error { //nolint:golint,unparam - for _, subnetTx := range ms.addedPermissionedSubnets { - key := merklePermissionedSubnetKey(subnetTx.ID()) - *batchOps = append(*batchOps, database.BatchOp{ - Key: key, - Value: subnetTx.Bytes(), - }) - } - ms.addedPermissionedSubnets = make([]*txs.Tx, 0) - return nil -} - -func (ms *merkleState) writeSubnetOwners(batchOps *[]database.BatchOp) error { - for subnetID, owner := range ms.subnetOwners { - owner := owner - - ownerBytes, err := block.GenesisCodec.Marshal(block.Version, &owner) - if err != nil { - return fmt.Errorf("failed to marshal subnet owner: %w", err) - } - - ms.subnetOwnerCache.Put(subnetID, fxOwnerAndSize{ - owner: owner, - size: len(ownerBytes), - }) - - key := merkleSubnetOwnersKey(subnetID) - *batchOps = append(*batchOps, database.BatchOp{ - Key: key, - Value: ownerBytes, - }) - } - maps.Clear(ms.subnetOwners) - return nil -} - -func (ms *merkleState) writeElasticSubnets(batchOps *[]database.BatchOp) error { //nolint:golint,unparam - for subnetID, transforkSubnetTx := range ms.addedElasticSubnets { - key := merkleElasticSubnetKey(subnetID) - *batchOps = append(*batchOps, database.BatchOp{ - Key: key, - Value: transforkSubnetTx.Bytes(), - }) - delete(ms.addedElasticSubnets, subnetID) - - // Note: Evict is used rather than Put here because tx may end up - // referencing additional data (because of shared byte slices) that - // would not be properly accounted for in the cache sizing. - ms.elasticSubnetCache.Evict(subnetID) - } - return nil -} - -func (ms *merkleState) writeChains(batchOps *[]database.BatchOp) error { //nolint:golint,unparam - for subnetID, chains := range ms.addedChains { - for _, chainTx := range chains { - key := merkleChainKey(subnetID, chainTx.ID()) - *batchOps = append(*batchOps, database.BatchOp{ - Key: key, - Value: chainTx.Bytes(), - }) - } - delete(ms.addedChains, subnetID) - } - return nil -} - -func (*merkleState) writeCurrentStakers(batchOps *[]database.BatchOp, currentData map[ids.ID]*stakersData) error { - for stakerTxID, data := range currentData { - key := merkleCurrentStakersKey(stakerTxID) - - if data.TxBytes == nil { - *batchOps = append(*batchOps, database.BatchOp{ - Key: key, - Delete: true, - }) - continue - } - - dataBytes, err := txs.GenesisCodec.Marshal(txs.Version, data) - if err != nil { - return fmt.Errorf("failed to serialize current stakers data, stakerTxID %v: %w", stakerTxID, err) - } - *batchOps = append(*batchOps, database.BatchOp{ - Key: key, - Value: dataBytes, - }) - } - return nil -} - -func (*merkleState) writePendingStakers(batchOps *[]database.BatchOp, pendingData map[ids.ID]*stakersData) error { - for stakerTxID, data := range pendingData { - key := merklePendingStakersKey(stakerTxID) - - if data.TxBytes == nil { - *batchOps = append(*batchOps, database.BatchOp{ - Key: key, - Delete: true, - }) - continue - } - - dataBytes, err := txs.GenesisCodec.Marshal(txs.Version, data) - if err != nil { - return fmt.Errorf("failed to serialize pending stakers data, stakerTxID %v: %w", stakerTxID, err) - } - *batchOps = append(*batchOps, database.BatchOp{ - Key: key, - Value: dataBytes, - }) - } - return nil -} - -func (ms *merkleState) writeUTXOs(batchOps *[]database.BatchOp) error { - for utxoID, utxo := range ms.modifiedUTXOs { - delete(ms.modifiedUTXOs, utxoID) - key := merkleUtxoIDKey(utxoID) - if utxo == nil { // delete the UTXO - switch utxo, err := ms.GetUTXO(utxoID); err { - case nil: - ms.utxoCache.Put(utxoID, nil) - *batchOps = append(*batchOps, database.BatchOp{ - Key: key, - Delete: true, - }) - // store the index - if err := ms.writeUTXOsIndex(utxo, false /*insertUtxo*/); err != nil { - return err - } - // go process next utxo - continue - - case database.ErrNotFound: - // trying to delete a non-existing utxo. - continue - - default: - return err - } - } - - // insert the UTXO - utxoBytes, err := txs.GenesisCodec.Marshal(txs.Version, utxo) - if err != nil { - return err - } - *batchOps = append(*batchOps, database.BatchOp{ - Key: key, - Value: utxoBytes, - }) - - // store the index - if err := ms.writeUTXOsIndex(utxo, true /*insertUtxo*/); err != nil { - return err - } - } - return nil -} - -func (ms *merkleState) writeDelegateeRewards(batchOps *[]database.BatchOp) error { //nolint:golint,unparam - for nodeID, nodeDelegateeRewards := range ms.modifiedDelegateeReward { - nodeDelegateeRewardsList := nodeDelegateeRewards.List() - for _, subnetID := range nodeDelegateeRewardsList { - delegateeReward := ms.delegateeRewardCache[nodeID][subnetID] - - key := merkleDelegateeRewardsKey(nodeID, subnetID) - *batchOps = append(*batchOps, database.BatchOp{ - Key: key, - Value: database.PackUInt64(delegateeReward), - }) - } - delete(ms.modifiedDelegateeReward, nodeID) - } - return nil -} - -func (ms *merkleState) writeBlocks() error { - for blkID, blk := range ms.addedBlocks { - var ( - blkID = blkID - blkHeight = blk.Height() - ) - - delete(ms.addedBlockIDs, blkHeight) - ms.blockIDCache.Put(blkHeight, blkID) - if err := database.PutID(ms.blockIDDB, database.PackUInt64(blkHeight), blkID); err != nil { - return fmt.Errorf("failed to write block height index: %w", err) - } - - delete(ms.addedBlocks, blkID) - // Note: Evict is used rather than Put here because blk may end up - // referencing additional data (because of shared byte slices) that - // would not be properly accounted for in the cache sizing. - ms.blockCache.Evict(blkID) - - if err := ms.blockDB.Put(blkID[:], blk.Bytes()); err != nil { - return fmt.Errorf("failed to write block %s: %w", blkID, err) - } - } - return nil -} - -func (ms *merkleState) writeTxs() error { - for txID, txStatus := range ms.addedTxs { - txID := txID - - stx := txBytesAndStatus{ - Tx: txStatus.tx.Bytes(), - Status: txStatus.status, - } - - // Note that we're serializing a [txBytesAndStatus] here, not a - // *txs.Tx, so we don't use [txs.Codec]. - txBytes, err := txs.GenesisCodec.Marshal(txs.Version, &stx) - if err != nil { - return fmt.Errorf("failed to serialize tx: %w", err) - } - - delete(ms.addedTxs, txID) - // Note: Evict is used rather than Put here because stx may end up - // referencing additional data (because of shared byte slices) that - // would not be properly accounted for in the cache sizing. - ms.txCache.Evict(txID) - if err := ms.txDB.Put(txID[:], txBytes); err != nil { - return fmt.Errorf("failed to add tx: %w", err) - } - } - return nil -} - -func (ms *merkleState) writeUTXOsIndex(utxo *avax.UTXO, insertUtxo bool) error { - addressable, ok := utxo.Out.(avax.Addressable) - if !ok { - return nil - } - addresses := addressable.Addresses() - - for _, addr := range addresses { - key := merkleUtxoIndexKey(addr, utxo.InputID()) - - if insertUtxo { - if err := ms.indexedUTXOsDB.Put(key, nil); err != nil { - return err - } - } else { - if err := ms.indexedUTXOsDB.Delete(key); err != nil { - return err - } - } - } - return nil -} - -func (ms *merkleState) writeLocalUptimes() error { - for vdrID, updatedSubnets := range ms.modifiedLocalUptimes { - for subnetID := range updatedSubnets { - key := merkleLocalUptimesKey(vdrID, subnetID) - - uptimes := ms.localUptimesCache[vdrID][subnetID] - uptimeBytes, err := txs.GenesisCodec.Marshal(txs.Version, uptimes) - if err != nil { - return err - } - - if err := ms.localUptimesDB.Put(key, uptimeBytes); err != nil { - return fmt.Errorf("failed to add local uptimes: %w", err) - } - } - delete(ms.modifiedLocalUptimes, vdrID) - } - return nil -} - -func (ms *merkleState) writeWeightDiffs(height uint64, weightDiffs map[weightDiffKey]*ValidatorWeightDiff) error { - for weightKey, weightDiff := range weightDiffs { - if weightDiff.Amount == 0 { - // No weight change to record; go to next validator. - continue - } - - key := marshalDiffKey(weightKey.subnetID, height, weightKey.nodeID) - weightDiffBytes := marshalWeightDiff(weightDiff) - if err := ms.flatValidatorWeightDiffsDB.Put(key, weightDiffBytes); err != nil { - return fmt.Errorf("failed to add weight diffs: %w", err) - } - } - return nil -} - -func (ms *merkleState) writeBlsKeyDiffs(height uint64, blsKeyDiffs map[ids.NodeID]*bls.PublicKey) error { - for nodeID, blsKey := range blsKeyDiffs { - key := marshalDiffKey(constants.PrimaryNetworkID, height, nodeID) - blsKeyBytes := []byte{} - if blsKey != nil { - // Note: We store the uncompressed public key here as it is - // significantly more efficient to parse when applying - // diffs. - blsKeyBytes = blsKey.Serialize() - } - if err := ms.flatValidatorPublicKeyDiffsDB.Put(key, blsKeyBytes); err != nil { - return fmt.Errorf("failed to add bls key diffs: %w", err) - } - } - return nil -} - -func (ms *merkleState) writeRewardUTXOs() error { - for txID, utxos := range ms.addedRewardUTXOs { - delete(ms.addedRewardUTXOs, txID) - ms.rewardUTXOsCache.Put(txID, utxos) - rawTxDB := prefixdb.New(txID[:], ms.rewardUTXOsDB) - txDB := linkeddb.NewDefault(rawTxDB) - - for _, utxo := range utxos { - utxoBytes, err := txs.GenesisCodec.Marshal(txs.Version, utxo) - if err != nil { - return fmt.Errorf("failed to serialize reward UTXO: %w", err) - } - utxoID := utxo.InputID() - if err := txDB.Put(utxoID[:], utxoBytes); err != nil { - return fmt.Errorf("failed to add reward UTXO: %w", err) - } - } - } - return nil -} - -func (ms *merkleState) updateValidatorSet( - updateValidators bool, - valSetDiff map[weightDiffKey]*diffValidator, - weightDiffs map[weightDiffKey]*ValidatorWeightDiff, -) error { - if !updateValidators { - return nil - } - - for weightKey, weightDiff := range weightDiffs { - var ( - subnetID = weightKey.subnetID - nodeID = weightKey.nodeID - validatorDiff = valSetDiff[weightKey] - err error - ) - - if weightDiff.Amount == 0 { - // No weight change to record; go to next validator. - continue - } - - if weightDiff.Decrease { - err = ms.validators.RemoveWeight(subnetID, nodeID, weightDiff.Amount) - } else { - if validatorDiff.validatorStatus == added { - staker := validatorDiff.validator - err = ms.validators.AddStaker( - subnetID, - nodeID, - staker.PublicKey, - staker.TxID, - weightDiff.Amount, - ) - } else { - err = ms.validators.AddWeight(subnetID, nodeID, weightDiff.Amount) - } - } - if err != nil { - return fmt.Errorf("failed to update validator weight: %w", err) - } - } - - ms.metrics.SetLocalStake(ms.validators.GetWeight(constants.PrimaryNetworkID, ms.ctx.NodeID)) - totalWeight, err := ms.validators.TotalWeight(constants.PrimaryNetworkID) - if err != nil { - return fmt.Errorf("failed to get total weight: %w", err) - } - ms.metrics.SetTotalStake(totalWeight) - return nil -} - -func (ms *merkleState) logMerkleRoot(hasChanges bool) error { - // get current Height - blk, err := ms.GetStatelessBlock(ms.GetLastAccepted()) - if err != nil { - // may happen in tests. Let's just skip - return nil - } - - if !hasChanges { - ms.ctx.Log.Info("merkle root", - zap.Uint64("height", blk.Height()), - zap.Stringer("blkID", blk.ID()), - zap.String("merkle root", "no changes to merkle state"), - ) - return nil - } - - view, err := ms.merkleDB.NewView(context.TODO(), merkledb.ViewChanges{}) - if err != nil { - return fmt.Errorf("failed creating merkleDB view: %w", err) - } - root, err := view.GetMerkleRoot(context.TODO()) - if err != nil { - return fmt.Errorf("failed pulling merkle root: %w", err) - } - - ms.ctx.Log.Info("merkle root", - zap.Uint64("height", blk.Height()), - zap.Stringer("blkID", blk.ID()), - zap.String("merkle root", root.String()), - ) - return nil -} diff --git a/vms/platformvm/state/metadata_delegator.go b/vms/platformvm/state/metadata_delegator.go deleted file mode 100644 index 04e7ef6a8795..000000000000 --- a/vms/platformvm/state/metadata_delegator.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package state - -import ( - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/ids" -) - -type delegatorMetadata struct { - PotentialReward uint64 - - txID ids.ID -} - -func parseDelegatorMetadata(bytes []byte, metadata *delegatorMetadata) error { - var err error - metadata.PotentialReward, err = database.ParseUInt64(bytes) - return err -} - -func writeDelegatorMetadata(db database.KeyValueWriter, metadata *delegatorMetadata) error { - return database.PutUInt64(db, metadata.txID[:], metadata.PotentialReward) -} diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 199b245008f7..8b2c4066ddd2 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -4,18 +4,17 @@ package state import ( + "bytes" "context" "errors" "fmt" - "math" "sync" "time" - "github.com/google/btree" - - "go.uber.org/zap" - "github.com/prometheus/client_golang/prometheus" + "go.uber.org/zap" + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/cache/metercacher" @@ -25,71 +24,35 @@ import ( "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/trace" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" - "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/timer" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/block" "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/fx" - "github.com/ava-labs/avalanchego/vms/platformvm/genesis" "github.com/ava-labs/avalanchego/vms/platformvm/metrics" "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/x/merkledb" safemath "github.com/ava-labs/avalanchego/utils/math" ) const ( - pruneCommitLimit = 1024 - pruneCommitSleepMultiplier = 5 - pruneCommitSleepCap = 10 * time.Second - pruneUpdateFrequency = 30 * time.Second -) - -var ( - _ State = (*state)(nil) + HistoryLength = uint(256) - errValidatorSetAlreadyPopulated = errors.New("validator set already populated") - errIsNotSubnet = errors.New("is not a subnet") - - blockIDPrefix = []byte("blockID") - blockPrefix = []byte("block") - validatorsPrefix = []byte("validators") - currentPrefix = []byte("current") - pendingPrefix = []byte("pending") - validatorPrefix = []byte("validator") - delegatorPrefix = []byte("delegator") - subnetValidatorPrefix = []byte("subnetValidator") - subnetDelegatorPrefix = []byte("subnetDelegator") - nestedValidatorWeightDiffsPrefix = []byte("validatorDiffs") - nestedValidatorPublicKeyDiffsPrefix = []byte("publicKeyDiffs") - flatValidatorWeightDiffsPrefix = []byte("flatValidatorDiffs") - flatValidatorPublicKeyDiffsPrefix = []byte("flatPublicKeyDiffs") - txPrefix = []byte("tx") - rewardUTXOsPrefix = []byte("rewardUTXOs") - utxoPrefix = []byte("utxo") - subnetPrefix = []byte("subnet") - subnetOwnerPrefix = []byte("subnetOwner") - transformedSubnetPrefix = []byte("transformedSubnet") - supplyPrefix = []byte("supply") - chainPrefix = []byte("chain") - singletonPrefix = []byte("singleton") - - timestampKey = []byte("timestamp") - currentSupplyKey = []byte("current supply") - lastAcceptedKey = []byte("last accepted") - heightsIndexedKey = []byte("heights indexed") - initializedKey = []byte("initialized") - prunedKey = []byte("pruned") + valueNodeCacheSize = 512 * units.MiB + intermediateNodeCacheSize = 512 * units.MiB + utxoCacheSize = 8192 // from avax/utxo_state.go ) // Chain collects all methods to manage the state of the chain for block @@ -206,249 +169,43 @@ type State interface { Close() error } -// TODO: Remove after v1.11.x is activated -type stateBlk struct { - Blk block.Block - Bytes []byte `serialize:"true"` - Status choices.Status `serialize:"true"` -} - -/* - * VMDB - * |-. validators - * | |-. current - * | | |-. validator - * | | | '-. list - * | | | '-- txID -> uptime + potential reward + potential delegatee reward - * | | |-. delegator - * | | | '-. list - * | | | '-- txID -> potential reward - * | | |-. subnetValidator - * | | | '-. list - * | | | '-- txID -> uptime + potential reward + potential delegatee reward - * | | '-. subnetDelegator - * | | '-. list - * | | '-- txID -> potential reward - * | |-. pending - * | | |-. validator - * | | | '-. list - * | | | '-- txID -> nil - * | | |-. delegator - * | | | '-. list - * | | | '-- txID -> nil - * | | |-. subnetValidator - * | | | '-. list - * | | | '-- txID -> nil - * | | '-. subnetDelegator - * | | '-. list - * | | '-- txID -> nil - * | |-. nested weight diffs TODO: Remove once only the flat db is needed - * | | '-. height+subnet - * | | '-. list - * | | '-- nodeID -> weightChange - * | |-. nested pub key diffs TODO: Remove once only the flat db is needed - * | | '-. height - * | | '-. list - * | | '-- nodeID -> compressed public key - * | |-. flat weight diffs - * | | '-- subnet+height+nodeID -> weightChange - * | '-. flat pub key diffs - * | '-- subnet+height+nodeID -> uncompressed public key or nil - * |-. blockIDs - * | '-- height -> blockID - * |-. blocks - * | '-- blockID -> block bytes - * |-. txs - * | '-- txID -> tx bytes + tx status - * |- rewardUTXOs - * | '-. txID - * | '-. list - * | '-- utxoID -> utxo bytes - * |- utxos - * | '-- utxoDB - * |-. subnets - * | '-. list - * | '-- txID -> nil - * |-. subnetOwners - * | '-. subnetID -> owner - * |-. chains - * | '-. subnetID - * | '-. list - * | '-- txID -> nil - * '-. singletons - * |-- initializedKey -> nil - * |-- prunedKey -> nil - * |-- timestampKey -> timestamp - * |-- currentSupplyKey -> currentSupply - * |-- lastAcceptedKey -> lastAccepted - * '-- heightsIndexKey -> startIndexHeight + endIndexHeight - */ -type state struct { - validatorState - - validators validators.Manager - ctx *snow.Context - metrics metrics.Metrics - rewards reward.Calculator - - baseDB *versiondb.Database - - currentStakers *baseStakers - pendingStakers *baseStakers - - currentHeight uint64 - - addedBlockIDs map[uint64]ids.ID // map of height -> blockID - blockIDCache cache.Cacher[uint64, ids.ID] // cache of height -> blockID. If the entry is ids.Empty, it is not in the database - blockIDDB database.Database - - addedBlocks map[ids.ID]block.Block // map of blockID -> Block - blockCache cache.Cacher[ids.ID, block.Block] // cache of blockID -> Block. If the entry is nil, it is not in the database - blockDB database.Database - - validatorsDB database.Database - currentValidatorsDB database.Database - currentValidatorBaseDB database.Database - currentValidatorList linkeddb.LinkedDB - currentDelegatorBaseDB database.Database - currentDelegatorList linkeddb.LinkedDB - currentSubnetValidatorBaseDB database.Database - currentSubnetValidatorList linkeddb.LinkedDB - currentSubnetDelegatorBaseDB database.Database - currentSubnetDelegatorList linkeddb.LinkedDB - pendingValidatorsDB database.Database - pendingValidatorBaseDB database.Database - pendingValidatorList linkeddb.LinkedDB - pendingDelegatorBaseDB database.Database - pendingDelegatorList linkeddb.LinkedDB - pendingSubnetValidatorBaseDB database.Database - pendingSubnetValidatorList linkeddb.LinkedDB - pendingSubnetDelegatorBaseDB database.Database - pendingSubnetDelegatorList linkeddb.LinkedDB - - nestedValidatorWeightDiffsDB database.Database - nestedValidatorPublicKeyDiffsDB database.Database - flatValidatorWeightDiffsDB database.Database - flatValidatorPublicKeyDiffsDB database.Database - - addedTxs map[ids.ID]*txAndStatus // map of txID -> {*txs.Tx, Status} - txCache cache.Cacher[ids.ID, *txAndStatus] // txID -> {*txs.Tx, Status}. If the entry is nil, it isn't in the database - txDB database.Database - - addedRewardUTXOs map[ids.ID][]*avax.UTXO // map of txID -> []*UTXO - rewardUTXOsCache cache.Cacher[ids.ID, []*avax.UTXO] // txID -> []*UTXO - rewardUTXODB database.Database - - modifiedUTXOs map[ids.ID]*avax.UTXO // map of modified UTXOID -> *UTXO if the UTXO is nil, it has been removed - utxoDB database.Database - utxoState avax.UTXOState - - cachedSubnets []*txs.Tx // nil if the subnets haven't been loaded - addedSubnets []*txs.Tx - subnetBaseDB database.Database - subnetDB linkeddb.LinkedDB - - // Subnet ID --> Owner of the subnet - subnetOwners map[ids.ID]fx.Owner - subnetOwnerCache cache.Cacher[ids.ID, fxOwnerAndSize] // cache of subnetID -> owner if the entry is nil, it is not in the database - subnetOwnerDB database.Database - - transformedSubnets map[ids.ID]*txs.Tx // map of subnetID -> transformSubnetTx - transformedSubnetCache cache.Cacher[ids.ID, *txs.Tx] // cache of subnetID -> transformSubnetTx if the entry is nil, it is not in the database - transformedSubnetDB database.Database - - modifiedSupplies map[ids.ID]uint64 // map of subnetID -> current supply - supplyCache cache.Cacher[ids.ID, *uint64] // cache of subnetID -> current supply if the entry is nil, it is not in the database - supplyDB database.Database - - addedChains map[ids.ID][]*txs.Tx // maps subnetID -> the newly added chains to the subnet - chainCache cache.Cacher[ids.ID, []*txs.Tx] // cache of subnetID -> the chains after all local modifications []*txs.Tx - chainDBCache cache.Cacher[ids.ID, linkeddb.LinkedDB] // cache of subnetID -> linkedDB - chainDB database.Database - - // The persisted fields represent the current database value - timestamp, persistedTimestamp time.Time - currentSupply, persistedCurrentSupply uint64 - // [lastAccepted] is the most recently accepted block. - lastAccepted, persistedLastAccepted ids.ID - indexedHeights *heightRange - singletonDB database.Database -} - -// heightRange is used to track which heights are safe to use the native DB -// iterator for querying validator diffs. -// -// TODO: Remove once we are guaranteed nodes can not rollback to not support the -// new indexing mechanism. -type heightRange struct { - LowerBound uint64 `serialize:"true"` - UpperBound uint64 `serialize:"true"` -} - -type ValidatorWeightDiff struct { - Decrease bool `serialize:"true"` - Amount uint64 `serialize:"true"` -} - -func (v *ValidatorWeightDiff) Add(negative bool, amount uint64) error { - if v.Decrease == negative { - var err error - v.Amount, err = safemath.Add64(v.Amount, amount) - return err - } - - if v.Amount > amount { - v.Amount -= amount - } else { - v.Amount = safemath.AbsDiff(v.Amount, amount) - v.Decrease = negative - } - return nil -} - -type heightWithSubnet struct { - Height uint64 `serialize:"true"` - SubnetID ids.ID `serialize:"true"` -} - -type txBytesAndStatus struct { - Tx []byte `serialize:"true"` - Status status.Status `serialize:"true"` -} - -type txAndStatus struct { - tx *txs.Tx - status status.Status -} - -type fxOwnerAndSize struct { - owner fx.Owner - size int -} - -func txSize(_ ids.ID, tx *txs.Tx) int { - if tx == nil { - return ids.IDLen + constants.PointerOverhead - } - return ids.IDLen + len(tx.Bytes()) + constants.PointerOverhead -} +var ( + _ State = (*state)(nil) -func txAndStatusSize(_ ids.ID, t *txAndStatus) int { - if t == nil { - return ids.IDLen + constants.PointerOverhead - } - return ids.IDLen + len(t.tx.Bytes()) + wrappers.IntLen + 2*constants.PointerOverhead -} + errValidatorSetAlreadyPopulated = errors.New("validator set already populated") + errIsNotSubnet = errors.New("is not a subnet") -func blockSize(_ ids.ID, blk block.Block) int { - if blk == nil { - return ids.IDLen + constants.PointerOverhead - } - return ids.IDLen + len(blk.Bytes()) + constants.PointerOverhead -} + merkleStatePrefix = []byte{0x00} + merkleSingletonPrefix = []byte{0x01} + merkleBlockPrefix = []byte{0x02} + merkleBlockIDsPrefix = []byte{0x03} + merkleTxPrefix = []byte{0x04} + merkleIndexUTXOsPrefix = []byte{0x05} // to serve UTXOIDs(addr) + merkleUptimesPrefix = []byte{0x06} // locally measured uptimes + merkleWeightDiffPrefix = []byte{0x07} // non-merkleized validators weight diff. TODO: should we merkleize them? + merkleBlsKeyDiffPrefix = []byte{0x08} + merkleRewardUtxosPrefix = []byte{0x09} + + initializedKey = []byte("initialized") + + // merkle db sections + metadataSectionPrefix = byte(0x00) + merkleChainTimeKey = []byte{metadataSectionPrefix, 0x00} + merkleLastAcceptedBlkIDKey = []byte{metadataSectionPrefix, 0x01} + merkleSuppliesPrefix = []byte{metadataSectionPrefix, 0x02} + + permissionedSubnetSectionPrefix = []byte{0x01} + elasticSubnetSectionPrefix = []byte{0x02} + chainsSectionPrefix = []byte{0x03} + utxosSectionPrefix = []byte{0x04} + currentStakersSectionPrefix = []byte{0x05} + pendingStakersSectionPrefix = []byte{0x06} + delegateeRewardsPrefix = []byte{0x07} + subnetOwnersPrefix = []byte{0x08} +) func New( - db database.Database, + rawDB database.Database, genesisBytes []byte, metricsReg prometheus.Registerer, validators validators.Manager, @@ -457,8 +214,8 @@ func New( metrics metrics.Metrics, rewards reward.Calculator, ) (State, error) { - s, err := newState( - db, + res, err := newState( + rawDB, metrics, validators, execCfg, @@ -470,39 +227,17 @@ func New( return nil, err } - if err := s.sync(genesisBytes); err != nil { + if err := res.sync(genesisBytes); err != nil { // Drop any errors on close to return the first error - _ = s.Close() - - return nil, err - } - - // Before we start accepting new blocks, we check if the pruning process needs - // to be run. - // - // TODO: Cleanup after v1.11.x is activated - shouldPrune, err := s.ShouldPrune() - if err != nil { + _ = res.Close() return nil, err } - if shouldPrune { - // If the pruned key is on disk, we must delete it to ensure our disk - // can't get into a partially pruned state if the node restarts mid-way - // through pruning. - if err := s.singletonDB.Delete(prunedKey); err != nil { - return nil, fmt.Errorf("failed to remove prunedKey from singletonDB: %w", err) - } - - if err := s.Commit(); err != nil { - return nil, fmt.Errorf("failed to commit to baseDB: %w", err) - } - } - return s, nil + return res, nil } func newState( - db database.Database, + rawDB database.Database, metrics metrics.Metrics, validators validators.Manager, execCfg *config.ExecutionConfig, @@ -510,55 +245,37 @@ func newState( metricsReg prometheus.Registerer, rewards reward.Calculator, ) (*state, error) { - blockIDCache, err := metercacher.New[uint64, ids.ID]( - "block_id_cache", - metricsReg, - &cache.LRU[uint64, ids.ID]{Size: execCfg.BlockIDCacheSize}, + var ( + baseDB = versiondb.New(rawDB) + baseMerkleDB = prefixdb.New(merkleStatePrefix, baseDB) + singletonDB = prefixdb.New(merkleSingletonPrefix, baseDB) + blockDB = prefixdb.New(merkleBlockPrefix, baseDB) + blockIDsDB = prefixdb.New(merkleBlockIDsPrefix, baseDB) + txDB = prefixdb.New(merkleTxPrefix, baseDB) + indexedUTXOsDB = prefixdb.New(merkleIndexUTXOsPrefix, baseDB) + localUptimesDB = prefixdb.New(merkleUptimesPrefix, baseDB) + flatValidatorWeightDiffsDB = prefixdb.New(merkleWeightDiffPrefix, baseDB) + flatValidatorPublicKeyDiffsDB = prefixdb.New(merkleBlsKeyDiffPrefix, baseDB) + rewardUTXOsDB = prefixdb.New(merkleRewardUtxosPrefix, baseDB) ) - if err != nil { - return nil, err - } - blockCache, err := metercacher.New[ids.ID, block.Block]( - "block_cache", - metricsReg, - cache.NewSizedLRU[ids.ID, block.Block](execCfg.BlockCacheSize, blockSize), - ) + noOpTracer, err := trace.New(trace.Config{Enabled: false}) if err != nil { - return nil, err + return nil, fmt.Errorf("failed creating noOpTraces: %w", err) } - baseDB := versiondb.New(db) - - validatorsDB := prefixdb.New(validatorsPrefix, baseDB) - - currentValidatorsDB := prefixdb.New(currentPrefix, validatorsDB) - currentValidatorBaseDB := prefixdb.New(validatorPrefix, currentValidatorsDB) - currentDelegatorBaseDB := prefixdb.New(delegatorPrefix, currentValidatorsDB) - currentSubnetValidatorBaseDB := prefixdb.New(subnetValidatorPrefix, currentValidatorsDB) - currentSubnetDelegatorBaseDB := prefixdb.New(subnetDelegatorPrefix, currentValidatorsDB) - - pendingValidatorsDB := prefixdb.New(pendingPrefix, validatorsDB) - pendingValidatorBaseDB := prefixdb.New(validatorPrefix, pendingValidatorsDB) - pendingDelegatorBaseDB := prefixdb.New(delegatorPrefix, pendingValidatorsDB) - pendingSubnetValidatorBaseDB := prefixdb.New(subnetValidatorPrefix, pendingValidatorsDB) - pendingSubnetDelegatorBaseDB := prefixdb.New(subnetDelegatorPrefix, pendingValidatorsDB) - - nestedValidatorWeightDiffsDB := prefixdb.New(nestedValidatorWeightDiffsPrefix, validatorsDB) - nestedValidatorPublicKeyDiffsDB := prefixdb.New(nestedValidatorPublicKeyDiffsPrefix, validatorsDB) - flatValidatorWeightDiffsDB := prefixdb.New(flatValidatorWeightDiffsPrefix, validatorsDB) - flatValidatorPublicKeyDiffsDB := prefixdb.New(flatValidatorPublicKeyDiffsPrefix, validatorsDB) - - txCache, err := metercacher.New( - "tx_cache", - metricsReg, - cache.NewSizedLRU[ids.ID, *txAndStatus](execCfg.TxCacheSize, txAndStatusSize), - ) + merkleDB, err := merkledb.New(context.TODO(), baseMerkleDB, merkledb.Config{ + BranchFactor: merkledb.BranchFactor16, + HistoryLength: HistoryLength, + ValueNodeCacheSize: valueNodeCacheSize, + IntermediateNodeCacheSize: intermediateNodeCacheSize, + Reg: prometheus.NewRegistry(), + Tracer: noOpTracer, + }) if err != nil { - return nil, err + return nil, fmt.Errorf("failed creating merkleDB: %w", err) } - rewardUTXODB := prefixdb.New(rewardUTXOsPrefix, baseDB) rewardUTXOsCache, err := metercacher.New[ids.ID, []*avax.UTXO]( "reward_utxos_cache", metricsReg, @@ -568,15 +285,15 @@ func newState( return nil, err } - utxoDB := prefixdb.New(utxoPrefix, baseDB) - utxoState, err := avax.NewMeteredUTXOState(utxoDB, txs.GenesisCodec, metricsReg, execCfg.ChecksumsEnabled) + suppliesCache, err := metercacher.New[ids.ID, *uint64]( + "supply_cache", + metricsReg, + &cache.LRU[ids.ID, *uint64]{Size: execCfg.ChainCacheSize}, + ) if err != nil { return nil, err } - subnetBaseDB := prefixdb.New(subnetPrefix, baseDB) - - subnetOwnerDB := prefixdb.New(subnetOwnerPrefix, baseDB) subnetOwnerCache, err := metercacher.New[ids.ID, fxOwnerAndSize]( "subnet_owner_cache", metricsReg, @@ -597,119 +314,223 @@ func newState( return nil, err } - supplyCache, err := metercacher.New[ids.ID, *uint64]( - "supply_cache", + chainCache, err := metercacher.New[ids.ID, []*txs.Tx]( + "chain_cache", metricsReg, - &cache.LRU[ids.ID, *uint64]{Size: execCfg.ChainCacheSize}, + &cache.LRU[ids.ID, []*txs.Tx]{Size: execCfg.ChainCacheSize}, ) if err != nil { return nil, err } - chainCache, err := metercacher.New[ids.ID, []*txs.Tx]( - "chain_cache", + blockCache, err := metercacher.New[ids.ID, block.Block]( + "block_cache", metricsReg, - &cache.LRU[ids.ID, []*txs.Tx]{Size: execCfg.ChainCacheSize}, + cache.NewSizedLRU[ids.ID, block.Block](execCfg.BlockCacheSize, blockSize), ) if err != nil { return nil, err } - chainDBCache, err := metercacher.New[ids.ID, linkeddb.LinkedDB]( - "chain_db_cache", + blockIDCache, err := metercacher.New[uint64, ids.ID]( + "block_id_cache", metricsReg, - &cache.LRU[ids.ID, linkeddb.LinkedDB]{Size: execCfg.ChainDBCacheSize}, + &cache.LRU[uint64, ids.ID]{Size: execCfg.BlockIDCacheSize}, ) if err != nil { return nil, err } - return &state{ - validatorState: newValidatorState(), + txCache, err := metercacher.New( + "tx_cache", + metricsReg, + cache.NewSizedLRU[ids.ID, *txAndStatus](execCfg.TxCacheSize, txAndStatusSize), + ) + if err != nil { + return nil, err + } + return &state{ validators: validators, ctx: ctx, metrics: metrics, rewards: rewards, - baseDB: baseDB, - - addedBlockIDs: make(map[uint64]ids.ID), - blockIDCache: blockIDCache, - blockIDDB: prefixdb.New(blockIDPrefix, baseDB), - addedBlocks: make(map[ids.ID]block.Block), - blockCache: blockCache, - blockDB: prefixdb.New(blockPrefix, baseDB), + baseDB: baseDB, + singletonDB: singletonDB, + baseMerkleDB: baseMerkleDB, + merkleDB: merkleDB, currentStakers: newBaseStakers(), pendingStakers: newBaseStakers(), - validatorsDB: validatorsDB, - currentValidatorsDB: currentValidatorsDB, - currentValidatorBaseDB: currentValidatorBaseDB, - currentValidatorList: linkeddb.NewDefault(currentValidatorBaseDB), - currentDelegatorBaseDB: currentDelegatorBaseDB, - currentDelegatorList: linkeddb.NewDefault(currentDelegatorBaseDB), - currentSubnetValidatorBaseDB: currentSubnetValidatorBaseDB, - currentSubnetValidatorList: linkeddb.NewDefault(currentSubnetValidatorBaseDB), - currentSubnetDelegatorBaseDB: currentSubnetDelegatorBaseDB, - currentSubnetDelegatorList: linkeddb.NewDefault(currentSubnetDelegatorBaseDB), - pendingValidatorsDB: pendingValidatorsDB, - pendingValidatorBaseDB: pendingValidatorBaseDB, - pendingValidatorList: linkeddb.NewDefault(pendingValidatorBaseDB), - pendingDelegatorBaseDB: pendingDelegatorBaseDB, - pendingDelegatorList: linkeddb.NewDefault(pendingDelegatorBaseDB), - pendingSubnetValidatorBaseDB: pendingSubnetValidatorBaseDB, - pendingSubnetValidatorList: linkeddb.NewDefault(pendingSubnetValidatorBaseDB), - pendingSubnetDelegatorBaseDB: pendingSubnetDelegatorBaseDB, - pendingSubnetDelegatorList: linkeddb.NewDefault(pendingSubnetDelegatorBaseDB), - nestedValidatorWeightDiffsDB: nestedValidatorWeightDiffsDB, - nestedValidatorPublicKeyDiffsDB: nestedValidatorPublicKeyDiffsDB, - flatValidatorWeightDiffsDB: flatValidatorWeightDiffsDB, - flatValidatorPublicKeyDiffsDB: flatValidatorPublicKeyDiffsDB, + delegateeRewardCache: make(map[ids.NodeID]map[ids.ID]uint64), + modifiedDelegateeReward: make(map[ids.NodeID]set.Set[ids.ID]), + + modifiedUTXOs: make(map[ids.ID]*avax.UTXO), + utxoCache: &cache.LRU[ids.ID, *avax.UTXO]{Size: utxoCacheSize}, + + modifiedSupplies: make(map[ids.ID]uint64), + suppliesCache: suppliesCache, + + subnetOwners: make(map[ids.ID]fx.Owner), + subnetOwnerCache: subnetOwnerCache, + + addedPermissionedSubnets: make([]*txs.Tx, 0), + permissionedSubnetCache: nil, // created first time GetSubnets is called + addedElasticSubnets: make(map[ids.ID]*txs.Tx), + elasticSubnetCache: transformedSubnetCache, + + addedChains: make(map[ids.ID][]*txs.Tx), + chainCache: chainCache, + + addedBlocks: make(map[ids.ID]block.Block), + blockCache: blockCache, + blockDB: blockDB, + + addedBlockIDs: make(map[uint64]ids.ID), + blockIDCache: blockIDCache, + blockIDDB: blockIDsDB, addedTxs: make(map[ids.ID]*txAndStatus), - txDB: prefixdb.New(txPrefix, baseDB), txCache: txCache, + txDB: txDB, + + indexedUTXOsDB: indexedUTXOsDB, + + localUptimesCache: make(map[ids.NodeID]map[ids.ID]*uptimes), + modifiedLocalUptimes: make(map[ids.NodeID]set.Set[ids.ID]), + localUptimesDB: localUptimesDB, + + flatValidatorWeightDiffsDB: flatValidatorWeightDiffsDB, + flatValidatorPublicKeyDiffsDB: flatValidatorPublicKeyDiffsDB, addedRewardUTXOs: make(map[ids.ID][]*avax.UTXO), - rewardUTXODB: rewardUTXODB, rewardUTXOsCache: rewardUTXOsCache, + rewardUTXOsDB: rewardUTXOsDB, + }, nil +} - modifiedUTXOs: make(map[ids.ID]*avax.UTXO), - utxoDB: utxoDB, - utxoState: utxoState, +// Stores global state in a merkle trie. This means that each state corresponds +// to a unique merkle root. Specifically, the following state is merkleized. +// - Delegatee Rewards +// - UTXOs +// - Current Supply +// - Subnet Creation Transactions +// - Subnet Owners +// - Subnet Transformation Transactions +// - Chain Creation Transactions +// - Chain time +// - Last Accepted Block ID +// - Current Staker Set +// - Pending Staker Set +// +// Changing any of the above state will cause the merkle root to change. +// +// The following state is not merkleized: +// - Database Initialization Status +// - Blocks +// - Block IDs +// - Transactions (note some transactions are also stored merkleized) +// - Uptimes +// - Weight Diffs +// - BLS Key Diffs +// - Reward UTXOs +type state struct { + validators validators.Manager + ctx *snow.Context + metrics metrics.Metrics + rewards reward.Calculator - subnetBaseDB: subnetBaseDB, - subnetDB: linkeddb.NewDefault(subnetBaseDB), + baseDB *versiondb.Database + singletonDB database.Database + baseMerkleDB database.Database + merkleDB merkledb.MerkleDB // Stores merkleized state - subnetOwners: make(map[ids.ID]fx.Owner), - subnetOwnerDB: subnetOwnerDB, - subnetOwnerCache: subnetOwnerCache, + // stakers section (missing Delegatee piece) + // TODO: Consider moving delegatee to UTXOs section + currentStakers *baseStakers + pendingStakers *baseStakers - transformedSubnets: make(map[ids.ID]*txs.Tx), - transformedSubnetCache: transformedSubnetCache, - transformedSubnetDB: prefixdb.New(transformedSubnetPrefix, baseDB), + delegateeRewardCache map[ids.NodeID]map[ids.ID]uint64 + modifiedDelegateeReward map[ids.NodeID]set.Set[ids.ID] - modifiedSupplies: make(map[ids.ID]uint64), - supplyCache: supplyCache, - supplyDB: prefixdb.New(supplyPrefix, baseDB), + // UTXOs section + modifiedUTXOs map[ids.ID]*avax.UTXO // map of UTXO ID -> *UTXO + utxoCache cache.Cacher[ids.ID, *avax.UTXO] // UTXO ID -> *UTXO. If the *UTXO is nil the UTXO doesn't exist - addedChains: make(map[ids.ID][]*txs.Tx), - chainDB: prefixdb.New(chainPrefix, baseDB), - chainCache: chainCache, - chainDBCache: chainDBCache, + // Metadata section + chainTime, latestComittedChainTime time.Time + lastAcceptedBlkID, latestCommittedLastAcceptedBlkID ids.ID + lastAcceptedHeight uint64 // TODO: Should this be written to state?? + modifiedSupplies map[ids.ID]uint64 // map of subnetID -> current supply + suppliesCache cache.Cacher[ids.ID, *uint64] // cache of subnetID -> current supply if the entry is nil, it is not in the database - singletonDB: prefixdb.New(singletonPrefix, baseDB), - }, nil + // Subnets section + // Subnet ID --> Owner of the subnet + subnetOwners map[ids.ID]fx.Owner + subnetOwnerCache cache.Cacher[ids.ID, fxOwnerAndSize] // cache of subnetID -> owner if the entry is nil, it is not in the database + + addedPermissionedSubnets []*txs.Tx // added SubnetTxs, waiting to be committed + permissionedSubnetCache []*txs.Tx // nil if the subnets haven't been loaded + addedElasticSubnets map[ids.ID]*txs.Tx // map of subnetID -> transformSubnetTx + elasticSubnetCache cache.Cacher[ids.ID, *txs.Tx] // cache of subnetID -> transformSubnetTx if the entry is nil, it is not in the database + + // Chains section + addedChains map[ids.ID][]*txs.Tx // maps subnetID -> the newly added chains to the subnet + chainCache cache.Cacher[ids.ID, []*txs.Tx] // cache of subnetID -> the chains after all local modifications []*txs.Tx + + // Blocks section + // Note: addedBlocks is a list because multiple blocks can be committed at one (proposal + accepted option) + addedBlocks map[ids.ID]block.Block // map of blockID -> Block. + blockCache cache.Cacher[ids.ID, block.Block] // cache of blockID -> Block. If the entry is nil, it is not in the database + blockDB database.Database + + addedBlockIDs map[uint64]ids.ID // map of height -> blockID + blockIDCache cache.Cacher[uint64, ids.ID] // cache of height -> blockID. If the entry is ids.Empty, it is not in the database + blockIDDB database.Database + + // Txs section + // FIND a way to reduce use of these. No use in verification of addedTxs + // a limited windows to support APIs + addedTxs map[ids.ID]*txAndStatus // map of txID -> {*txs.Tx, Status} + txCache cache.Cacher[ids.ID, *txAndStatus] // txID -> {*txs.Tx, Status}. If the entry is nil, it isn't in the database + txDB database.Database + + indexedUTXOsDB database.Database + + localUptimesCache map[ids.NodeID]map[ids.ID]*uptimes // vdrID -> subnetID -> metadata + modifiedLocalUptimes map[ids.NodeID]set.Set[ids.ID] // vdrID -> subnetIDs + localUptimesDB database.Database + + flatValidatorWeightDiffsDB database.Database + flatValidatorPublicKeyDiffsDB database.Database + + // Reward UTXOs section + addedRewardUTXOs map[ids.ID][]*avax.UTXO // map of txID -> []*UTXO + rewardUTXOsCache cache.Cacher[ids.ID, []*avax.UTXO] // txID -> []*UTXO + rewardUTXOsDB database.Database } +// STAKERS section func (s *state) GetCurrentValidator(subnetID ids.ID, nodeID ids.NodeID) (*Staker, error) { return s.currentStakers.GetValidator(subnetID, nodeID) } func (s *state) PutCurrentValidator(staker *Staker) { s.currentStakers.PutValidator(staker) + + // make sure that each new validator has an uptime entry + // and a delegatee reward entry. MerkleState implementations + // of SetUptime and SetDelegateeReward must not err + err := s.SetUptime(staker.NodeID, staker.SubnetID, 0 /*duration*/, staker.StartTime) + if err != nil { + panic(err) + } + err = s.SetDelegateeReward(staker.SubnetID, staker.NodeID, 0) + if err != nil { + panic(err) + } } func (s *state) DeleteCurrentValidator(staker *Staker) { @@ -760,80 +581,212 @@ func (s *state) GetPendingStakerIterator() (StakerIterator, error) { return s.pendingStakers.GetStakerIterator(), nil } -func (s *state) shouldInit() (bool, error) { - has, err := s.singletonDB.Has(initializedKey) - return !has, err -} - -func (s *state) doneInit() error { - return s.singletonDB.Put(initializedKey, nil) -} - -func (s *state) ShouldPrune() (bool, error) { - has, err := s.singletonDB.Has(prunedKey) - if err != nil { - return true, err +func (s *state) GetDelegateeReward(subnetID ids.ID, vdrID ids.NodeID) (uint64, error) { + nodeDelegateeRewards, exists := s.delegateeRewardCache[vdrID] + if exists { + delegateeReward, exists := nodeDelegateeRewards[subnetID] + if exists { + return delegateeReward, nil + } } - // If [prunedKey] is not in [singletonDB], [PruneAndIndex()] did not finish - // execution. - if !has { - return true, nil + // try loading from the db + key := merkleDelegateeRewardsKey(vdrID, subnetID) + amountBytes, err := s.merkleDB.Get(key) + if err != nil { + return 0, err } - - // To ensure the db was not modified since we last ran [PruneAndIndex()], we - // must verify that [s.lastAccepted] is height indexed. - blk, err := s.GetStatelessBlock(s.lastAccepted) + delegateeReward, err := database.ParseUInt64(amountBytes) if err != nil { - return true, err + return 0, err } - _, err = s.GetBlockIDAtHeight(blk.Height()) - if err == database.ErrNotFound { - return true, nil + if _, found := s.delegateeRewardCache[vdrID]; !found { + s.delegateeRewardCache[vdrID] = make(map[ids.ID]uint64) } - - return false, err + s.delegateeRewardCache[vdrID][subnetID] = delegateeReward + return delegateeReward, nil } -func (s *state) donePrune() error { - return s.singletonDB.Put(prunedKey, nil) -} +func (s *state) SetDelegateeReward(subnetID ids.ID, vdrID ids.NodeID, amount uint64) error { + nodeDelegateeRewards, exists := s.delegateeRewardCache[vdrID] + if !exists { + nodeDelegateeRewards = make(map[ids.ID]uint64) + s.delegateeRewardCache[vdrID] = nodeDelegateeRewards + } + nodeDelegateeRewards[subnetID] = amount + + // track diff + updatedDelegateeRewards, ok := s.modifiedDelegateeReward[vdrID] + if !ok { + updatedDelegateeRewards = set.Set[ids.ID]{} + s.modifiedDelegateeReward[vdrID] = updatedDelegateeRewards + } + updatedDelegateeRewards.Add(subnetID) + return nil +} + +// UTXOs section +func (s *state) GetUTXO(utxoID ids.ID) (*avax.UTXO, error) { + if utxo, exists := s.modifiedUTXOs[utxoID]; exists { + if utxo == nil { + return nil, database.ErrNotFound + } + return utxo, nil + } + if utxo, found := s.utxoCache.Get(utxoID); found { + if utxo == nil { + return nil, database.ErrNotFound + } + return utxo, nil + } + + key := merkleUtxoIDKey(utxoID) + + switch bytes, err := s.merkleDB.Get(key); err { + case nil: + utxo := &avax.UTXO{} + if _, err := txs.GenesisCodec.Unmarshal(bytes, utxo); err != nil { + return nil, err + } + s.utxoCache.Put(utxoID, utxo) + return utxo, nil + + case database.ErrNotFound: + s.utxoCache.Put(utxoID, nil) + return nil, database.ErrNotFound + + default: + return nil, err + } +} + +func (s *state) UTXOIDs(addr []byte, start ids.ID, limit int) ([]ids.ID, error) { + var ( + prefix = slices.Clone(addr) + key = merkleUtxoIndexKey(addr, start) + ) + + iter := s.indexedUTXOsDB.NewIteratorWithStartAndPrefix(key, prefix) + defer iter.Release() + + utxoIDs := []ids.ID(nil) + for len(utxoIDs) < limit && iter.Next() { + itAddr, utxoID := splitUtxoIndexKey(iter.Key()) + if !bytes.Equal(itAddr, addr) { + break + } + if utxoID == start { + continue + } + + start = ids.Empty + utxoIDs = append(utxoIDs, utxoID) + } + return utxoIDs, iter.Error() +} + +func (s *state) AddUTXO(utxo *avax.UTXO) { + s.modifiedUTXOs[utxo.InputID()] = utxo +} + +func (s *state) DeleteUTXO(utxoID ids.ID) { + s.modifiedUTXOs[utxoID] = nil +} + +// METADATA Section +func (s *state) GetTimestamp() time.Time { + return s.chainTime +} + +func (s *state) SetTimestamp(tm time.Time) { + s.chainTime = tm +} + +func (s *state) GetLastAccepted() ids.ID { + return s.lastAcceptedBlkID +} + +func (s *state) SetLastAccepted(lastAccepted ids.ID) { + s.lastAcceptedBlkID = lastAccepted +} + +func (s *state) SetHeight(height uint64) { + s.lastAcceptedHeight = height +} + +func (s *state) GetCurrentSupply(subnetID ids.ID) (uint64, error) { + supply, ok := s.modifiedSupplies[subnetID] + if ok { + return supply, nil + } + cachedSupply, ok := s.suppliesCache.Get(subnetID) + if ok { + if cachedSupply == nil { + return 0, database.ErrNotFound + } + return *cachedSupply, nil + } + + key := merkleSuppliesKey(subnetID) + + switch supplyBytes, err := s.merkleDB.Get(key); err { + case nil: + supply, err := database.ParseUInt64(supplyBytes) + if err != nil { + return 0, fmt.Errorf("failed parsing supply: %w", err) + } + s.suppliesCache.Put(subnetID, &supply) + return supply, nil + + case database.ErrNotFound: + s.suppliesCache.Put(subnetID, nil) + return 0, database.ErrNotFound + + default: + return 0, err + } +} + +func (s *state) SetCurrentSupply(subnetID ids.ID, cs uint64) { + s.modifiedSupplies[subnetID] = cs +} + +// SUBNETS Section +type fxOwnerAndSize struct { + owner fx.Owner + size int +} func (s *state) GetSubnets() ([]*txs.Tx, error) { - if s.cachedSubnets != nil { - return s.cachedSubnets, nil + // Note: we want all subnets, so we don't look at addedSubnets + // which are only part of them + if s.permissionedSubnetCache != nil { + return s.permissionedSubnetCache, nil } - subnetDBIt := s.subnetDB.NewIterator() + subnets := make([]*txs.Tx, 0) + subnetDBIt := s.merkleDB.NewIteratorWithPrefix(permissionedSubnetSectionPrefix) defer subnetDBIt.Release() - txs := []*txs.Tx(nil) for subnetDBIt.Next() { - subnetIDBytes := subnetDBIt.Key() - subnetID, err := ids.ToID(subnetIDBytes) - if err != nil { - return nil, err - } - subnetTx, _, err := s.GetTx(subnetID) + subnetTxBytes := subnetDBIt.Value() + subnetTx, err := txs.Parse(txs.GenesisCodec, subnetTxBytes) if err != nil { return nil, err } - txs = append(txs, subnetTx) + subnets = append(subnets, subnetTx) } if err := subnetDBIt.Error(); err != nil { return nil, err } - txs = append(txs, s.addedSubnets...) - s.cachedSubnets = txs - return txs, nil + subnets = append(subnets, s.addedPermissionedSubnets...) + s.permissionedSubnetCache = subnets + return subnets, nil } func (s *state) AddSubnet(createSubnetTx *txs.Tx) { - s.addedSubnets = append(s.addedSubnets, createSubnetTx) - if s.cachedSubnets != nil { - s.cachedSubnets = append(s.cachedSubnets, createSubnetTx) - } + s.addedPermissionedSubnets = append(s.addedPermissionedSubnets, createSubnetTx) } func (s *state) GetSubnetOwner(subnetID ids.ID) (fx.Owner, error) { @@ -848,7 +801,8 @@ func (s *state) GetSubnetOwner(subnetID ids.ID) (fx.Owner, error) { return ownerAndSize.owner, nil } - ownerBytes, err := s.subnetOwnerDB.Get(subnetID[:]) + subnetIDKey := merkleSubnetOwnersKey(subnetID) + ownerBytes, err := s.merkleDB.Get(subnetIDKey) if err == nil { var owner fx.Owner if _, err := block.GenesisCodec.Unmarshal(ownerBytes, &owner); err != nil { @@ -886,86 +840,99 @@ func (s *state) SetSubnetOwner(subnetID ids.ID, owner fx.Owner) { } func (s *state) GetSubnetTransformation(subnetID ids.ID) (*txs.Tx, error) { - if tx, exists := s.transformedSubnets[subnetID]; exists { + if tx, exists := s.addedElasticSubnets[subnetID]; exists { return tx, nil } - if tx, cached := s.transformedSubnetCache.Get(subnetID); cached { + if tx, cached := s.elasticSubnetCache.Get(subnetID); cached { if tx == nil { return nil, database.ErrNotFound } return tx, nil } - transformSubnetTxID, err := database.GetID(s.transformedSubnetDB, subnetID[:]) - if err == database.ErrNotFound { - s.transformedSubnetCache.Put(subnetID, nil) + key := merkleElasticSubnetKey(subnetID) + transformSubnetTxBytes, err := s.merkleDB.Get(key) + switch err { + case nil: + transformSubnetTx, err := txs.Parse(txs.GenesisCodec, transformSubnetTxBytes) + if err != nil { + return nil, err + } + s.elasticSubnetCache.Put(subnetID, transformSubnetTx) + return transformSubnetTx, nil + + case database.ErrNotFound: + s.elasticSubnetCache.Put(subnetID, nil) return nil, database.ErrNotFound - } - if err != nil { - return nil, err - } - transformSubnetTx, _, err := s.GetTx(transformSubnetTxID) - if err != nil { + default: return nil, err } - s.transformedSubnetCache.Put(subnetID, transformSubnetTx) - return transformSubnetTx, nil } func (s *state) AddSubnetTransformation(transformSubnetTxIntf *txs.Tx) { transformSubnetTx := transformSubnetTxIntf.Unsigned.(*txs.TransformSubnetTx) - s.transformedSubnets[transformSubnetTx.Subnet] = transformSubnetTxIntf + s.addedElasticSubnets[transformSubnetTx.Subnet] = transformSubnetTxIntf } +// CHAINS Section func (s *state) GetChains(subnetID ids.ID) ([]*txs.Tx, error) { if chains, cached := s.chainCache.Get(subnetID); cached { return chains, nil } - chainDB := s.getChainDB(subnetID) - chainDBIt := chainDB.NewIterator() - defer chainDBIt.Release() + chains := make([]*txs.Tx, 0) + + prefix := merkleChainPrefix(subnetID) - txs := []*txs.Tx(nil) + chainDBIt := s.merkleDB.NewIteratorWithPrefix(prefix) + defer chainDBIt.Release() for chainDBIt.Next() { - chainIDBytes := chainDBIt.Key() - chainID, err := ids.ToID(chainIDBytes) - if err != nil { - return nil, err - } - chainTx, _, err := s.GetTx(chainID) + chainTxBytes := chainDBIt.Value() + chainTx, err := txs.Parse(txs.GenesisCodec, chainTxBytes) if err != nil { return nil, err } - txs = append(txs, chainTx) + chains = append(chains, chainTx) } if err := chainDBIt.Error(); err != nil { return nil, err } - txs = append(txs, s.addedChains[subnetID]...) - s.chainCache.Put(subnetID, txs) - return txs, nil + chains = append(chains, s.addedChains[subnetID]...) + s.chainCache.Put(subnetID, chains) + return chains, nil } func (s *state) AddChain(createChainTxIntf *txs.Tx) { createChainTx := createChainTxIntf.Unsigned.(*txs.CreateChainTx) subnetID := createChainTx.SubnetID + s.addedChains[subnetID] = append(s.addedChains[subnetID], createChainTxIntf) - if chains, cached := s.chainCache.Get(subnetID); cached { - chains = append(chains, createChainTxIntf) - s.chainCache.Put(subnetID, chains) +} + +// TXs Section +type txBytesAndStatus struct { + Tx []byte `serialize:"true"` + Status status.Status `serialize:"true"` +} + +type txAndStatus struct { + tx *txs.Tx + status status.Status +} + +func txSize(_ ids.ID, tx *txs.Tx) int { + if tx == nil { + return ids.IDLen + constants.PointerOverhead } + return ids.IDLen + len(tx.Bytes()) + constants.PointerOverhead } -func (s *state) getChainDB(subnetID ids.ID) linkeddb.LinkedDB { - if chainDB, cached := s.chainDBCache.Get(subnetID); cached { - return chainDB +func txAndStatusSize(_ ids.ID, t *txAndStatus) int { + if t == nil { + return ids.IDLen + constants.PointerOverhead } - rawChainDB := prefixdb.New(subnetID[:], s.chainDB) - chainDB := linkeddb.NewDefault(rawChainDB) - s.chainDBCache.Put(subnetID, chainDB) - return chainDB + return ids.IDLen + len(t.tx.Bytes()) + wrappers.IntLen + 2*constants.PointerOverhead } func (s *state) GetTx(txID ids.ID) (*txs.Tx, status.Status, error) { @@ -978,40 +945,189 @@ func (s *state) GetTx(txID ids.ID) (*txs.Tx, status.Status, error) { } return tx.tx, tx.status, nil } + txBytes, err := s.txDB.Get(txID[:]) - if err == database.ErrNotFound { + switch err { + case nil: + stx := txBytesAndStatus{} + if _, err := txs.GenesisCodec.Unmarshal(txBytes, &stx); err != nil { + return nil, status.Unknown, err + } + + tx, err := txs.Parse(txs.GenesisCodec, stx.Tx) + if err != nil { + return nil, status.Unknown, err + } + + ptx := &txAndStatus{ + tx: tx, + status: stx.Status, + } + + s.txCache.Put(txID, ptx) + return ptx.tx, ptx.status, nil + + case database.ErrNotFound: s.txCache.Put(txID, nil) return nil, status.Unknown, database.ErrNotFound - } else if err != nil { + + default: return nil, status.Unknown, err } +} + +func (s *state) AddTx(tx *txs.Tx, status status.Status) { + s.addedTxs[tx.ID()] = &txAndStatus{ + tx: tx, + status: status, + } +} - stx := txBytesAndStatus{} - if _, err := txs.GenesisCodec.Unmarshal(txBytes, &stx); err != nil { - return nil, status.Unknown, err +// BLOCKs Section +func blockSize(_ ids.ID, blk block.Block) int { + if blk == nil { + return ids.IDLen + constants.PointerOverhead + } + return ids.IDLen + len(blk.Bytes()) + constants.PointerOverhead +} + +func (s *state) GetStatelessBlock(blockID ids.ID) (block.Block, error) { + if blk, exists := s.addedBlocks[blockID]; exists { + return blk, nil + } + + if blk, cached := s.blockCache.Get(blockID); cached { + if blk == nil { + return nil, database.ErrNotFound + } + + return blk, nil + } + + blkBytes, err := s.blockDB.Get(blockID[:]) + switch err { + case nil: + // Note: stored blocks are verified, so it's safe to unmarshal them with GenesisCodec + blk, err := block.Parse(block.GenesisCodec, blkBytes) + if err != nil { + return nil, err + } + + s.blockCache.Put(blockID, blk) + return blk, nil + + case database.ErrNotFound: + s.blockCache.Put(blockID, nil) + return nil, database.ErrNotFound + + default: + return nil, err + } +} + +func (s *state) AddStatelessBlock(block block.Block) { + s.addedBlocks[block.ID()] = block +} + +func (s *state) GetBlockIDAtHeight(height uint64) (ids.ID, error) { + if blkID, exists := s.addedBlockIDs[height]; exists { + return blkID, nil } + if blkID, cached := s.blockIDCache.Get(height); cached { + if blkID == ids.Empty { + return ids.Empty, database.ErrNotFound + } + + return blkID, nil + } + + heightKey := database.PackUInt64(height) - tx, err := txs.Parse(txs.GenesisCodec, stx.Tx) + blkID, err := database.GetID(s.blockIDDB, heightKey) + if err == database.ErrNotFound { + s.blockIDCache.Put(height, ids.Empty) + return ids.Empty, database.ErrNotFound + } if err != nil { - return nil, status.Unknown, err + return ids.Empty, err } - ptx := &txAndStatus{ - tx: tx, - status: stx.Status, + s.blockIDCache.Put(height, blkID) + return blkID, nil +} + +func (*state) ShouldPrune() (bool, error) { + return false, nil // Nothing to do +} + +func (*state) PruneAndIndex(sync.Locker, logging.Logger) error { + return nil // Nothing to do +} + +// UPTIMES SECTION +func (s *state) GetUptime(vdrID ids.NodeID, subnetID ids.ID) (upDuration time.Duration, lastUpdated time.Time, err error) { + nodeUptimes, exists := s.localUptimesCache[vdrID] + if exists { + uptime, exists := nodeUptimes[subnetID] + if exists { + return uptime.Duration, uptime.lastUpdated, nil + } } - s.txCache.Put(txID, ptx) - return ptx.tx, ptx.status, nil + // try loading from DB + key := merkleLocalUptimesKey(vdrID, subnetID) + uptimeBytes, err := s.localUptimesDB.Get(key) + switch err { + case nil: + upTm := &uptimes{} + if _, err := txs.GenesisCodec.Unmarshal(uptimeBytes, upTm); err != nil { + return 0, time.Time{}, err + } + upTm.lastUpdated = time.Unix(int64(upTm.LastUpdated), 0) + s.localUptimesCache[vdrID] = make(map[ids.ID]*uptimes) + s.localUptimesCache[vdrID][subnetID] = upTm + return upTm.Duration, upTm.lastUpdated, nil + + case database.ErrNotFound: + // no local data for this staker uptime + return 0, time.Time{}, database.ErrNotFound + default: + return 0, time.Time{}, err + } } -func (s *state) AddTx(tx *txs.Tx, status status.Status) { - s.addedTxs[tx.ID()] = &txAndStatus{ - tx: tx, - status: status, +func (s *state) SetUptime(vdrID ids.NodeID, subnetID ids.ID, upDuration time.Duration, lastUpdated time.Time) error { + nodeUptimes, exists := s.localUptimesCache[vdrID] + if !exists { + nodeUptimes = make(map[ids.ID]*uptimes) + s.localUptimesCache[vdrID] = nodeUptimes + } + + nodeUptimes[subnetID] = &uptimes{ + Duration: upDuration, + LastUpdated: uint64(lastUpdated.Unix()), + lastUpdated: lastUpdated, + } + + // track diff + updatedNodeUptimes, ok := s.modifiedLocalUptimes[vdrID] + if !ok { + updatedNodeUptimes = set.Set[ids.ID]{} + s.modifiedLocalUptimes[vdrID] = updatedNodeUptimes + } + updatedNodeUptimes.Add(subnetID) + return nil +} + +func (s *state) GetStartTime(nodeID ids.NodeID, subnetID ids.ID) (time.Time, error) { + staker, err := s.GetCurrentValidator(subnetID, nodeID) + if err != nil { + return time.Time{}, err } + return staker.StartTime, nil } +// REWARD UTXOs SECTION func (s *state) GetRewardUTXOs(txID ids.ID) ([]*avax.UTXO, error) { if utxos, exists := s.addedRewardUTXOs[txID]; exists { return utxos, nil @@ -1020,7 +1136,7 @@ func (s *state) GetRewardUTXOs(txID ids.ID) ([]*avax.UTXO, error) { return utxos, nil } - rawTxDB := prefixdb.New(txID[:], s.rewardUTXODB) + rawTxDB := prefixdb.New(txID[:], s.rewardUTXOsDB) txDB := linkeddb.NewDefault(rawTxDB) it := txDB.NewIterator() defer it.Release() @@ -1045,89 +1161,63 @@ func (s *state) AddRewardUTXO(txID ids.ID, utxo *avax.UTXO) { s.addedRewardUTXOs[txID] = append(s.addedRewardUTXOs[txID], utxo) } -func (s *state) GetUTXO(utxoID ids.ID) (*avax.UTXO, error) { - if utxo, exists := s.modifiedUTXOs[utxoID]; exists { - if utxo == nil { - return nil, database.ErrNotFound - } - return utxo, nil - } - return s.utxoState.GetUTXO(utxoID) +// VALIDATORS Section +type ValidatorWeightDiff struct { + Decrease bool `serialize:"true"` + Amount uint64 `serialize:"true"` } -func (s *state) UTXOIDs(addr []byte, start ids.ID, limit int) ([]ids.ID, error) { - return s.utxoState.UTXOIDs(addr, start, limit) -} - -func (s *state) AddUTXO(utxo *avax.UTXO) { - s.modifiedUTXOs[utxo.InputID()] = utxo -} - -func (s *state) DeleteUTXO(utxoID ids.ID) { - s.modifiedUTXOs[utxoID] = nil -} - -func (s *state) GetStartTime(nodeID ids.NodeID, subnetID ids.ID) (time.Time, error) { - staker, err := s.currentStakers.GetValidator(subnetID, nodeID) - if err != nil { - return time.Time{}, err - } - return staker.StartTime, nil -} - -func (s *state) GetTimestamp() time.Time { - return s.timestamp -} - -func (s *state) SetTimestamp(tm time.Time) { - s.timestamp = tm -} - -func (s *state) GetLastAccepted() ids.ID { - return s.lastAccepted -} - -func (s *state) SetLastAccepted(lastAccepted ids.ID) { - s.lastAccepted = lastAccepted -} - -func (s *state) GetCurrentSupply(subnetID ids.ID) (uint64, error) { - if subnetID == constants.PrimaryNetworkID { - return s.currentSupply, nil +func (v *ValidatorWeightDiff) Add(negative bool, amount uint64) error { + if v.Decrease == negative { + var err error + v.Amount, err = safemath.Add64(v.Amount, amount) + return err } - supply, ok := s.modifiedSupplies[subnetID] - if ok { - return supply, nil + if v.Amount > amount { + v.Amount -= amount + } else { + v.Amount = safemath.AbsDiff(v.Amount, amount) + v.Decrease = negative } + return nil +} - cachedSupply, ok := s.supplyCache.Get(subnetID) - if ok { - if cachedSupply == nil { - return 0, database.ErrNotFound +func applyWeightDiff( + vdrs map[ids.NodeID]*validators.GetValidatorOutput, + nodeID ids.NodeID, + weightDiff *ValidatorWeightDiff, +) error { + vdr, ok := vdrs[nodeID] + if !ok { + // This node isn't in the current validator set. + vdr = &validators.GetValidatorOutput{ + NodeID: nodeID, } - return *cachedSupply, nil + vdrs[nodeID] = vdr } - supply, err := database.GetUInt64(s.supplyDB, subnetID[:]) - if err == database.ErrNotFound { - s.supplyCache.Put(subnetID, nil) - return 0, database.ErrNotFound + // The weight of this node changed at this block. + var err error + if weightDiff.Decrease { + // The validator's weight was decreased at this block, so in the + // prior block it was higher. + vdr.Weight, err = safemath.Add64(vdr.Weight, weightDiff.Amount) + } else { + // The validator's weight was increased at this block, so in the + // prior block it was lower. + vdr.Weight, err = safemath.Sub(vdr.Weight, weightDiff.Amount) } if err != nil { - return 0, err + return err } - s.supplyCache.Put(subnetID, &supply) - return supply, nil -} - -func (s *state) SetCurrentSupply(subnetID ids.ID, cs uint64) { - if subnetID == constants.PrimaryNetworkID { - s.currentSupply = cs - } else { - s.modifiedSupplies[subnetID] = cs + if vdr.Weight == 0 { + // The validator's weight was 0 before this block so they weren't in the + // validator set. + delete(vdrs, nodeID) } + return nil } func (s *state) ApplyValidatorWeightDiffs( @@ -1143,10 +1233,7 @@ func (s *state) ApplyValidatorWeightDiffs( ) defer diffIter.Release() - prevHeight := startHeight + 1 - // TODO: Remove the index continuity checks once we are guaranteed nodes can - // not rollback to not support the new indexing mechanism. - for diffIter.Next() && s.indexedHeights != nil && s.indexedHeights.LowerBound <= endHeight { + for diffIter.Next() { if err := ctx.Err(); err != nil { return err } @@ -1161,8 +1248,6 @@ func (s *state) ApplyValidatorWeightDiffs( return diffIter.Error() } - prevHeight = parsedHeight - weightDiff, err := unmarshalWeightDiff(diffIter.Value()) if err != nil { return err @@ -1172,87 +1257,8 @@ func (s *state) ApplyValidatorWeightDiffs( return err } } - if err := diffIter.Error(); err != nil { - return err - } - - // TODO: Remove this once it is assumed that all subnet validators have - // adopted the new indexing. - for height := prevHeight - 1; height >= endHeight; height-- { - if err := ctx.Err(); err != nil { - return err - } - - prefixStruct := heightWithSubnet{ - Height: height, - SubnetID: subnetID, - } - prefixBytes, err := block.GenesisCodec.Marshal(block.Version, prefixStruct) - if err != nil { - return err - } - - rawDiffDB := prefixdb.New(prefixBytes, s.nestedValidatorWeightDiffsDB) - diffDB := linkeddb.NewDefault(rawDiffDB) - diffIter := diffDB.NewIterator() - defer diffIter.Release() - - for diffIter.Next() { - nodeID, err := ids.ToNodeID(diffIter.Key()) - if err != nil { - return err - } - - weightDiff := ValidatorWeightDiff{} - _, err = block.GenesisCodec.Unmarshal(diffIter.Value(), &weightDiff) - if err != nil { - return err - } - - if err := applyWeightDiff(validators, nodeID, &weightDiff); err != nil { - return err - } - } - } - - return nil -} - -func applyWeightDiff( - vdrs map[ids.NodeID]*validators.GetValidatorOutput, - nodeID ids.NodeID, - weightDiff *ValidatorWeightDiff, -) error { - vdr, ok := vdrs[nodeID] - if !ok { - // This node isn't in the current validator set. - vdr = &validators.GetValidatorOutput{ - NodeID: nodeID, - } - vdrs[nodeID] = vdr - } - // The weight of this node changed at this block. - var err error - if weightDiff.Decrease { - // The validator's weight was decreased at this block, so in the - // prior block it was higher. - vdr.Weight, err = safemath.Add64(vdr.Weight, weightDiff.Amount) - } else { - // The validator's weight was increased at this block, so in the - // prior block it was lower. - vdr.Weight, err = safemath.Sub(vdr.Weight, weightDiff.Amount) - } - if err != nil { - return err - } - - if vdr.Weight == 0 { - // The validator's weight was 0 before this block so they weren't in the - // validator set. - delete(vdrs, nodeID) - } - return nil + return diffIter.Error() } func (s *state) ApplyValidatorPublicKeyDiffs( @@ -1293,927 +1299,508 @@ func (s *state) ApplyValidatorPublicKeyDiffs( continue } - vdr.PublicKey = bls.DeserializePublicKey(pkBytes) + vdr.PublicKey = new(bls.PublicKey).Deserialize(pkBytes) } - - // Note: this does not fallback to the linkeddb index because the linkeddb - // index does not contain entries for when to remove the public key. - // - // Nodes may see inconsistent public keys for heights before the new public - // key index was populated. return diffIter.Error() } -func (s *state) syncGenesis(genesisBlk block.Block, genesis *genesis.Genesis) error { - genesisBlkID := genesisBlk.ID() - s.SetLastAccepted(genesisBlkID) - s.SetTimestamp(time.Unix(int64(genesis.Timestamp), 0)) - s.SetCurrentSupply(constants.PrimaryNetworkID, genesis.InitialSupply) - s.AddStatelessBlock(genesisBlk) - - // Persist UTXOs that exist at genesis - for _, utxo := range genesis.UTXOs { - avaxUTXO := utxo.UTXO - s.AddUTXO(&avaxUTXO) - } - - // Persist primary network validator set at genesis - for _, vdrTx := range genesis.Validators { - tx, ok := vdrTx.Unsigned.(*txs.AddValidatorTx) - if !ok { - return fmt.Errorf("expected tx type *txs.AddValidatorTx but got %T", vdrTx.Unsigned) - } - - stakeAmount := tx.Validator.Wght - stakeDuration := tx.Validator.Duration() - currentSupply, err := s.GetCurrentSupply(constants.PrimaryNetworkID) - if err != nil { - return err - } - - potentialReward := s.rewards.Calculate( - stakeDuration, - stakeAmount, - currentSupply, - ) - newCurrentSupply, err := safemath.Add64(currentSupply, potentialReward) - if err != nil { - return err - } - - staker, err := NewCurrentStaker(vdrTx.ID(), tx, potentialReward) - if err != nil { - return err - } +// DB Operations +func (s *state) Abort() { + s.baseDB.Abort() +} - s.PutCurrentValidator(staker) - s.AddTx(vdrTx, status.Committed) - s.SetCurrentSupply(constants.PrimaryNetworkID, newCurrentSupply) +func (s *state) Commit() error { + defer s.Abort() + batch, err := s.CommitBatch() + if err != nil { + return err } + return batch.Write() +} - for _, chain := range genesis.Chains { - unsignedChain, ok := chain.Unsigned.(*txs.CreateChainTx) - if !ok { - return fmt.Errorf("expected tx type *txs.CreateChainTx but got %T", chain.Unsigned) - } - - // Ensure all chains that the genesis bytes say to create have the right - // network ID - if unsignedChain.NetworkID != s.ctx.NetworkID { - return avax.ErrWrongNetworkID - } - - s.AddChain(chain) - s.AddTx(chain, status.Committed) +func (s *state) CommitBatch() (database.Batch, error) { + // updateValidators is set to true here so that the validator manager is + // kept up to date with the last accepted state. + if err := s.write(true /*updateValidators*/, s.lastAcceptedHeight); err != nil { + return nil, err } + return s.baseDB.CommitBatch() +} - // updateValidators is set to false here to maintain the invariant that the - // primary network's validator set is empty before the validator sets are - // initialized. - return s.write(false /*=updateValidators*/, 0) +func (*state) Checksum() ids.ID { + return ids.Empty } -// Load pulls data previously stored on disk that is expected to be in memory. -func (s *state) load() error { +func (s *state) Close() error { return utils.Err( - s.loadMetadata(), - s.loadCurrentValidators(), - s.loadPendingValidators(), - s.initValidatorSets(), + s.flatValidatorWeightDiffsDB.Close(), + s.flatValidatorPublicKeyDiffsDB.Close(), + s.localUptimesDB.Close(), + s.indexedUTXOsDB.Close(), + s.txDB.Close(), + s.blockDB.Close(), + s.blockIDDB.Close(), + s.merkleDB.Close(), + s.baseMerkleDB.Close(), ) } -func (s *state) loadMetadata() error { - timestamp, err := database.GetTimestamp(s.singletonDB, timestampKey) - if err != nil { - return err - } - s.persistedTimestamp = timestamp - s.SetTimestamp(timestamp) - - currentSupply, err := database.GetUInt64(s.singletonDB, currentSupplyKey) - if err != nil { - return err - } - s.persistedCurrentSupply = currentSupply - s.SetCurrentSupply(constants.PrimaryNetworkID, currentSupply) - - lastAccepted, err := database.GetID(s.singletonDB, lastAcceptedKey) - if err != nil { - return err - } - s.persistedLastAccepted = lastAccepted - s.lastAccepted = lastAccepted - - // Lookup the most recently indexed range on disk. If we haven't started - // indexing the weights, then we keep the indexed heights as nil. - indexedHeightsBytes, err := s.singletonDB.Get(heightsIndexedKey) - if err == database.ErrNotFound { - return nil - } +func (s *state) write(updateValidators bool, height uint64) error { + currentData, weightDiffs, blsKeyDiffs, valSetDiff, err := s.processCurrentStakers() if err != nil { return err } - - indexedHeights := &heightRange{} - _, err = block.GenesisCodec.Unmarshal(indexedHeightsBytes, indexedHeights) + pendingData, err := s.processPendingStakers() if err != nil { return err } - // If the indexed range is not up to date, then we will act as if the range - // doesn't exist. - lastAcceptedBlock, err := s.GetStatelessBlock(lastAccepted) - if err != nil { - return err - } - if indexedHeights.UpperBound != lastAcceptedBlock.Height() { - return nil - } - s.indexedHeights = indexedHeights - return nil + return utils.Err( + s.writeMerkleState(currentData, pendingData), + s.writeBlocks(), + s.writeTxs(), + s.writeLocalUptimes(), + s.writeWeightDiffs(height, weightDiffs), + s.writeBlsKeyDiffs(height, blsKeyDiffs), + s.writeRewardUTXOs(), + s.updateValidatorSet(updateValidators, valSetDiff, weightDiffs), + ) } -func (s *state) loadCurrentValidators() error { - s.currentStakers = newBaseStakers() +func (s *state) processCurrentStakers() ( + map[ids.ID]*stakersData, + map[weightDiffKey]*ValidatorWeightDiff, + map[ids.NodeID]*bls.PublicKey, + map[weightDiffKey]*diffValidator, + error, +) { + var ( + outputStakers = make(map[ids.ID]*stakersData) + outputWeights = make(map[weightDiffKey]*ValidatorWeightDiff) + outputBlsKey = make(map[ids.NodeID]*bls.PublicKey) + outputValSet = make(map[weightDiffKey]*diffValidator) + ) - validatorIt := s.currentValidatorList.NewIterator() - defer validatorIt.Release() - for validatorIt.Next() { - txIDBytes := validatorIt.Key() - txID, err := ids.ToID(txIDBytes) - if err != nil { - return err - } - tx, _, err := s.GetTx(txID) - if err != nil { - return err - } + for subnetID, subnetValidatorDiffs := range s.currentStakers.validatorDiffs { + delete(s.currentStakers.validatorDiffs, subnetID) + for nodeID, validatorDiff := range subnetValidatorDiffs { + weightKey := weightDiffKey{ + subnetID: subnetID, + nodeID: nodeID, + } + outputValSet[weightKey] = validatorDiff - metadataBytes := validatorIt.Value() - metadata := &validatorMetadata{ - txID: txID, - // Note: we don't provide [LastUpdated] here because we expect it to - // always be present on disk. - } - if err := parseValidatorMetadata(metadataBytes, metadata); err != nil { - return err - } + // make sure there is an entry for delegators even in case + // there are no validators modified. + outputWeights[weightKey] = &ValidatorWeightDiff{ + Decrease: validatorDiff.validatorStatus == deleted, + } - stakerTx, ok := tx.Unsigned.(txs.Staker) - if !ok { - return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned) - } + switch validatorDiff.validatorStatus { + case added: + var ( + txID = validatorDiff.validator.TxID + potentialReward = validatorDiff.validator.PotentialReward + weight = validatorDiff.validator.Weight + blkKey = validatorDiff.validator.PublicKey + ) + tx, _, err := s.GetTx(txID) + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("failed loading current validator tx, %w", err) + } - staker, err := NewCurrentStaker(txID, stakerTx, metadata.PotentialReward) - if err != nil { - return err - } + outputStakers[txID] = &stakersData{ + TxBytes: tx.Bytes(), + PotentialReward: potentialReward, + } + outputWeights[weightKey].Amount = weight - validator := s.currentStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID) - validator.validator = staker + if blkKey != nil { + // Record that the public key for the validator is being + // added. This means the prior value for the public key was + // nil. + outputBlsKey[nodeID] = nil + } - s.currentStakers.stakers.ReplaceOrInsert(staker) + case deleted: + var ( + txID = validatorDiff.validator.TxID + weight = validatorDiff.validator.Weight + blkKey = validatorDiff.validator.PublicKey + ) - s.validatorState.LoadValidatorMetadata(staker.NodeID, staker.SubnetID, metadata) - } + outputStakers[txID] = &stakersData{ + TxBytes: nil, + } + outputWeights[weightKey].Amount = weight - subnetValidatorIt := s.currentSubnetValidatorList.NewIterator() - defer subnetValidatorIt.Release() - for subnetValidatorIt.Next() { - txIDBytes := subnetValidatorIt.Key() - txID, err := ids.ToID(txIDBytes) - if err != nil { - return err - } - tx, _, err := s.GetTx(txID) - if err != nil { - return err - } - - stakerTx, ok := tx.Unsigned.(txs.Staker) - if !ok { - return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned) - } - - metadataBytes := subnetValidatorIt.Value() - metadata := &validatorMetadata{ - txID: txID, - // use the start time as the fallback value - // in case it's not stored in the database - LastUpdated: uint64(stakerTx.StartTime().Unix()), - } - if err := parseValidatorMetadata(metadataBytes, metadata); err != nil { - return err - } - - staker, err := NewCurrentStaker(txID, stakerTx, metadata.PotentialReward) - if err != nil { - return err - } - validator := s.currentStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID) - validator.validator = staker - - s.currentStakers.stakers.ReplaceOrInsert(staker) - - s.validatorState.LoadValidatorMetadata(staker.NodeID, staker.SubnetID, metadata) - } - - delegatorIt := s.currentDelegatorList.NewIterator() - defer delegatorIt.Release() - - subnetDelegatorIt := s.currentSubnetDelegatorList.NewIterator() - defer subnetDelegatorIt.Release() - - for _, delegatorIt := range []database.Iterator{delegatorIt, subnetDelegatorIt} { - for delegatorIt.Next() { - txIDBytes := delegatorIt.Key() - txID, err := ids.ToID(txIDBytes) - if err != nil { - return err - } - tx, _, err := s.GetTx(txID) - if err != nil { - return err + if blkKey != nil { + // Record that the public key for the validator is being + // removed. This means we must record the prior value of the + // public key. + outputBlsKey[nodeID] = blkKey + } } - metadata := &delegatorMetadata{ - txID: txID, - } - err = parseDelegatorMetadata(delegatorIt.Value(), metadata) - if err != nil { - return err - } + addedDelegatorIterator := NewTreeIterator(validatorDiff.addedDelegators) + defer addedDelegatorIterator.Release() + for addedDelegatorIterator.Next() { + staker := addedDelegatorIterator.Value() + tx, _, err := s.GetTx(staker.TxID) + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("failed loading current delegator tx, %w", err) + } - stakerTx, ok := tx.Unsigned.(txs.Staker) - if !ok { - return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned) + outputStakers[staker.TxID] = &stakersData{ + TxBytes: tx.Bytes(), + PotentialReward: staker.PotentialReward, + } + if err := outputWeights[weightKey].Add(false, staker.Weight); err != nil { + return nil, nil, nil, nil, fmt.Errorf("failed to increase node weight diff: %w", err) + } } - staker, err := NewCurrentStaker(txID, stakerTx, metadata.PotentialReward) - if err != nil { - return err - } + for _, staker := range validatorDiff.deletedDelegators { + txID := staker.TxID - validator := s.currentStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID) - if validator.delegators == nil { - validator.delegators = btree.NewG(defaultTreeDegree, (*Staker).Less) + outputStakers[txID] = &stakersData{ + TxBytes: nil, + } + if err := outputWeights[weightKey].Add(true, staker.Weight); err != nil { + return nil, nil, nil, nil, fmt.Errorf("failed to decrease node weight diff: %w", err) + } } - validator.delegators.ReplaceOrInsert(staker) - - s.currentStakers.stakers.ReplaceOrInsert(staker) } } - - return utils.Err( - validatorIt.Error(), - subnetValidatorIt.Error(), - delegatorIt.Error(), - subnetDelegatorIt.Error(), - ) + return outputStakers, outputWeights, outputBlsKey, outputValSet, nil } -func (s *state) loadPendingValidators() error { - s.pendingStakers = newBaseStakers() - - validatorIt := s.pendingValidatorList.NewIterator() - defer validatorIt.Release() - - subnetValidatorIt := s.pendingSubnetValidatorList.NewIterator() - defer subnetValidatorIt.Release() - - for _, validatorIt := range []database.Iterator{validatorIt, subnetValidatorIt} { - for validatorIt.Next() { - txIDBytes := validatorIt.Key() - txID, err := ids.ToID(txIDBytes) - if err != nil { - return err - } - tx, _, err := s.GetTx(txID) - if err != nil { - return err - } - - stakerTx, ok := tx.Unsigned.(txs.Staker) - if !ok { - return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned) - } - - staker, err := NewPendingStaker(txID, stakerTx) - if err != nil { - return err - } - - validator := s.pendingStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID) - validator.validator = staker - - s.pendingStakers.stakers.ReplaceOrInsert(staker) - } - } - - delegatorIt := s.pendingDelegatorList.NewIterator() - defer delegatorIt.Release() - - subnetDelegatorIt := s.pendingSubnetDelegatorList.NewIterator() - defer subnetDelegatorIt.Release() - - for _, delegatorIt := range []database.Iterator{delegatorIt, subnetDelegatorIt} { - for delegatorIt.Next() { - txIDBytes := delegatorIt.Key() - txID, err := ids.ToID(txIDBytes) - if err != nil { - return err - } - tx, _, err := s.GetTx(txID) - if err != nil { - return err - } - - stakerTx, ok := tx.Unsigned.(txs.Staker) - if !ok { - return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned) - } - - staker, err := NewPendingStaker(txID, stakerTx) - if err != nil { - return err - } - - validator := s.pendingStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID) - if validator.delegators == nil { - validator.delegators = btree.NewG(defaultTreeDegree, (*Staker).Less) +func (s *state) processPendingStakers() (map[ids.ID]*stakersData, error) { + output := make(map[ids.ID]*stakersData) + for subnetID, subnetValidatorDiffs := range s.pendingStakers.validatorDiffs { + delete(s.pendingStakers.validatorDiffs, subnetID) + for _, validatorDiff := range subnetValidatorDiffs { + // validatorDiff.validator is not guaranteed to be non-nil here. + // Access it only if validatorDiff.validatorStatus is added or deleted + switch validatorDiff.validatorStatus { + case added: + txID := validatorDiff.validator.TxID + tx, _, err := s.GetTx(txID) + if err != nil { + return nil, fmt.Errorf("failed loading pending validator tx, %w", err) + } + output[txID] = &stakersData{ + TxBytes: tx.Bytes(), + PotentialReward: 0, + } + case deleted: + txID := validatorDiff.validator.TxID + output[txID] = &stakersData{ + TxBytes: nil, + } } - validator.delegators.ReplaceOrInsert(staker) - s.pendingStakers.stakers.ReplaceOrInsert(staker) - } - } - - return utils.Err( - validatorIt.Error(), - subnetValidatorIt.Error(), - delegatorIt.Error(), - subnetDelegatorIt.Error(), - ) -} - -// Invariant: initValidatorSets requires loadCurrentValidators to have already -// been called. -func (s *state) initValidatorSets() error { - for subnetID, validators := range s.currentStakers.validators { - if s.validators.Count(subnetID) != 0 { - // Enforce the invariant that the validator set is empty here. - return fmt.Errorf("%w: %s", errValidatorSetAlreadyPopulated, subnetID) - } - - for nodeID, validator := range validators { - validatorStaker := validator.validator - if err := s.validators.AddStaker(subnetID, nodeID, validatorStaker.PublicKey, validatorStaker.TxID, validatorStaker.Weight); err != nil { - return err + addedDelegatorIterator := NewTreeIterator(validatorDiff.addedDelegators) + defer addedDelegatorIterator.Release() + for addedDelegatorIterator.Next() { + staker := addedDelegatorIterator.Value() + tx, _, err := s.GetTx(staker.TxID) + if err != nil { + return nil, fmt.Errorf("failed loading pending delegator tx, %w", err) + } + output[staker.TxID] = &stakersData{ + TxBytes: tx.Bytes(), + PotentialReward: 0, + } } - delegatorIterator := NewTreeIterator(validator.delegators) - for delegatorIterator.Next() { - delegatorStaker := delegatorIterator.Value() - if err := s.validators.AddWeight(subnetID, nodeID, delegatorStaker.Weight); err != nil { - delegatorIterator.Release() - return err + for _, staker := range validatorDiff.deletedDelegators { + txID := staker.TxID + output[txID] = &stakersData{ + TxBytes: nil, } } - delegatorIterator.Release() } } - - s.metrics.SetLocalStake(s.validators.GetWeight(constants.PrimaryNetworkID, s.ctx.NodeID)) - totalWeight, err := s.validators.TotalWeight(constants.PrimaryNetworkID) - if err != nil { - return fmt.Errorf("failed to get total weight of primary network validators: %w", err) - } - s.metrics.SetTotalStake(totalWeight) - return nil + return output, nil } -func (s *state) write(updateValidators bool, height uint64) error { - return utils.Err( - s.writeBlocks(), - s.writeCurrentStakers(updateValidators, height), - s.writePendingStakers(), - s.WriteValidatorMetadata(s.currentValidatorList, s.currentSubnetValidatorList), // Must be called after writeCurrentStakers - s.writeTXs(), - s.writeRewardUTXOs(), - s.writeUTXOs(), - s.writeSubnets(), - s.writeSubnetOwners(), - s.writeTransformedSubnets(), - s.writeSubnetSupplies(), - s.writeChains(), - s.writeMetadata(), +func (s *state) writeMerkleState(currentData, pendingData map[ids.ID]*stakersData) error { + batchOps := make([]database.BatchOp, 0) + err := utils.Err( + s.writeMetadata(&batchOps), + s.writePermissionedSubnets(&batchOps), + s.writeSubnetOwners(&batchOps), + s.writeElasticSubnets(&batchOps), + s.writeChains(&batchOps), + s.writeCurrentStakers(&batchOps, currentData), + s.writePendingStakers(&batchOps, pendingData), + s.writeDelegateeRewards(&batchOps), + s.writeUTXOs(&batchOps), ) -} - -func (s *state) Close() error { - return utils.Err( - s.pendingSubnetValidatorBaseDB.Close(), - s.pendingSubnetDelegatorBaseDB.Close(), - s.pendingDelegatorBaseDB.Close(), - s.pendingValidatorBaseDB.Close(), - s.pendingValidatorsDB.Close(), - s.currentSubnetValidatorBaseDB.Close(), - s.currentSubnetDelegatorBaseDB.Close(), - s.currentDelegatorBaseDB.Close(), - s.currentValidatorBaseDB.Close(), - s.currentValidatorsDB.Close(), - s.validatorsDB.Close(), - s.txDB.Close(), - s.rewardUTXODB.Close(), - s.utxoDB.Close(), - s.subnetBaseDB.Close(), - s.transformedSubnetDB.Close(), - s.supplyDB.Close(), - s.chainDB.Close(), - s.singletonDB.Close(), - s.blockDB.Close(), - s.blockIDDB.Close(), - ) -} - -func (s *state) sync(genesis []byte) error { - shouldInit, err := s.shouldInit() - if err != nil { - return fmt.Errorf( - "failed to check if the database is initialized: %w", - err, - ) - } - - // If the database is empty, create the platform chain anew using the - // provided genesis state - if shouldInit { - if err := s.init(genesis); err != nil { - return fmt.Errorf( - "failed to initialize the database: %w", - err, - ) - } - } - - if err := s.load(); err != nil { - return fmt.Errorf( - "failed to load the database state: %w", - err, - ) - } - return nil -} - -func (s *state) init(genesisBytes []byte) error { - // Create the genesis block and save it as being accepted (We don't do - // genesisBlock.Accept() because then it'd look for genesisBlock's - // non-existent parent) - genesisID := hashing.ComputeHash256Array(genesisBytes) - genesisBlock, err := block.NewApricotCommitBlock(genesisID, 0 /*height*/) - if err != nil { - return err - } - - genesis, err := genesis.Parse(genesisBytes) - if err != nil { - return err - } - if err := s.syncGenesis(genesisBlock, genesis); err != nil { - return err - } - - if err := s.doneInit(); err != nil { - return err - } - - return s.Commit() -} - -func (s *state) AddStatelessBlock(block block.Block) { - blkID := block.ID() - s.addedBlockIDs[block.Height()] = blkID - s.addedBlocks[blkID] = block -} - -func (s *state) SetHeight(height uint64) { - if s.indexedHeights == nil { - // If indexedHeights hasn't been created yet, then we are newly tracking - // the range. This means we should initialize the LowerBound to the - // current height. - s.indexedHeights = &heightRange{ - LowerBound: height, - } - } - - s.indexedHeights.UpperBound = height - s.currentHeight = height -} - -func (s *state) Commit() error { - defer s.Abort() - batch, err := s.CommitBatch() if err != nil { return err } - return batch.Write() -} - -func (s *state) Abort() { - s.baseDB.Abort() -} - -func (s *state) Checksum() ids.ID { - return s.utxoState.Checksum() -} - -func (s *state) CommitBatch() (database.Batch, error) { - // updateValidators is set to true here so that the validator manager is - // kept up to date with the last accepted state. - if err := s.write(true /*=updateValidators*/, s.currentHeight); err != nil { - return nil, err - } - return s.baseDB.CommitBatch() -} - -func (s *state) writeBlocks() error { - for blkID, blk := range s.addedBlocks { - blkID := blkID - blkBytes := blk.Bytes() - blkHeight := blk.Height() - heightKey := database.PackUInt64(blkHeight) - delete(s.addedBlockIDs, blkHeight) - s.blockIDCache.Put(blkHeight, blkID) - if err := database.PutID(s.blockIDDB, heightKey, blkID); err != nil { - return fmt.Errorf("failed to add blockID: %w", err) - } - - delete(s.addedBlocks, blkID) - // Note: Evict is used rather than Put here because blk may end up - // referencing additional data (because of shared byte slices) that - // would not be properly accounted for in the cache sizing. - s.blockCache.Evict(blkID) - if err := s.blockDB.Put(blkID[:], blkBytes); err != nil { - return fmt.Errorf("failed to write block %s: %w", blkID, err) - } - } - return nil -} - -func (s *state) GetStatelessBlock(blockID ids.ID) (block.Block, error) { - if blk, exists := s.addedBlocks[blockID]; exists { - return blk, nil - } - if blk, cached := s.blockCache.Get(blockID); cached { - if blk == nil { - return nil, database.ErrNotFound - } - - return blk, nil + if len(batchOps) == 0 { + // nothing to commit + return nil } - blkBytes, err := s.blockDB.Get(blockID[:]) - if err == database.ErrNotFound { - s.blockCache.Put(blockID, nil) - return nil, database.ErrNotFound - } + view, err := s.merkleDB.NewView(context.TODO(), merkledb.ViewChanges{BatchOps: batchOps}) if err != nil { - return nil, err + return fmt.Errorf("failed creating merkleDB view: %w", err) } - - blk, status, _, err := parseStoredBlock(blkBytes) - if err != nil { - return nil, err + if err := view.CommitToDB(context.TODO()); err != nil { + return fmt.Errorf("failed committing merkleDB view: %w", err) } - - if status != choices.Accepted { - s.blockCache.Put(blockID, nil) - return nil, database.ErrNotFound - } - - s.blockCache.Put(blockID, blk) - return blk, nil + return s.logMerkleRoot(len(batchOps) != 0) } -func (s *state) GetBlockIDAtHeight(height uint64) (ids.ID, error) { - if blkID, exists := s.addedBlockIDs[height]; exists { - return blkID, nil - } - if blkID, cached := s.blockIDCache.Get(height); cached { - if blkID == ids.Empty { - return ids.Empty, database.ErrNotFound +func (s *state) writeMetadata(batchOps *[]database.BatchOp) error { + if !s.chainTime.Equal(s.latestComittedChainTime) { + encodedChainTime, err := s.chainTime.MarshalBinary() + if err != nil { + return fmt.Errorf("failed to encoding chainTime: %w", err) } - return blkID, nil + *batchOps = append(*batchOps, database.BatchOp{ + Key: merkleChainTimeKey, + Value: encodedChainTime, + }) + s.latestComittedChainTime = s.chainTime } - heightKey := database.PackUInt64(height) - - blkID, err := database.GetID(s.blockIDDB, heightKey) - if err == database.ErrNotFound { - s.blockIDCache.Put(height, ids.Empty) - return ids.Empty, database.ErrNotFound - } - if err != nil { - return ids.Empty, err + if s.lastAcceptedBlkID != s.latestCommittedLastAcceptedBlkID { + *batchOps = append(*batchOps, database.BatchOp{ + Key: merkleLastAcceptedBlkIDKey, + Value: s.lastAcceptedBlkID[:], + }) + s.latestCommittedLastAcceptedBlkID = s.lastAcceptedBlkID } - s.blockIDCache.Put(height, blkID) - return blkID, nil -} - -func (s *state) writeCurrentStakers(updateValidators bool, height uint64) error { - heightBytes := database.PackUInt64(height) - rawNestedPublicKeyDiffDB := prefixdb.New(heightBytes, s.nestedValidatorPublicKeyDiffsDB) - nestedPKDiffDB := linkeddb.NewDefault(rawNestedPublicKeyDiffDB) - - for subnetID, validatorDiffs := range s.currentStakers.validatorDiffs { - delete(s.currentStakers.validatorDiffs, subnetID) - - // Select db to write to - validatorDB := s.currentSubnetValidatorList - delegatorDB := s.currentSubnetDelegatorList - if subnetID == constants.PrimaryNetworkID { - validatorDB = s.currentValidatorList - delegatorDB = s.currentDelegatorList - } - - prefixStruct := heightWithSubnet{ - Height: height, - SubnetID: subnetID, - } - prefixBytes, err := block.GenesisCodec.Marshal(block.Version, prefixStruct) - if err != nil { - return fmt.Errorf("failed to create prefix bytes: %w", err) - } - rawNestedWeightDiffDB := prefixdb.New(prefixBytes, s.nestedValidatorWeightDiffsDB) - nestedWeightDiffDB := linkeddb.NewDefault(rawNestedWeightDiffDB) - - // Record the change in weight and/or public key for each validator. - for nodeID, validatorDiff := range validatorDiffs { - // Copy [nodeID] so it doesn't get overwritten next iteration. - nodeID := nodeID - - weightDiff := &ValidatorWeightDiff{ - Decrease: validatorDiff.validatorStatus == deleted, - } - switch validatorDiff.validatorStatus { - case added: - staker := validatorDiff.validator - weightDiff.Amount = staker.Weight - - // Invariant: Only the Primary Network contains non-nil public - // keys. - if staker.PublicKey != nil { - // Record that the public key for the validator is being - // added. This means the prior value for the public key was - // nil. - err := s.flatValidatorPublicKeyDiffsDB.Put( - marshalDiffKey(constants.PrimaryNetworkID, height, nodeID), - nil, - ) - if err != nil { - return err - } - } + // lastAcceptedBlockHeight not persisted yet in merkleDB state. + // TODO: Consider if it should be - // The validator is being added. - // - // Invariant: It's impossible for a delegator to have been - // rewarded in the same block that the validator was added. - metadata := &validatorMetadata{ - txID: staker.TxID, - lastUpdated: staker.StartTime, - - UpDuration: 0, - LastUpdated: uint64(staker.StartTime.Unix()), - PotentialReward: staker.PotentialReward, - PotentialDelegateeReward: 0, - } - - metadataBytes, err := metadataCodec.Marshal(v0, metadata) - if err != nil { - return fmt.Errorf("failed to serialize current validator: %w", err) - } - - if err = validatorDB.Put(staker.TxID[:], metadataBytes); err != nil { - return fmt.Errorf("failed to write current validator to list: %w", err) - } - - s.validatorState.LoadValidatorMetadata(nodeID, subnetID, metadata) - case deleted: - staker := validatorDiff.validator - weightDiff.Amount = staker.Weight - - // Invariant: Only the Primary Network contains non-nil public - // keys. - if staker.PublicKey != nil { - // Record that the public key for the validator is being - // removed. This means we must record the prior value of the - // public key. - // - // Note: We store the uncompressed public key here as it is - // significantly more efficient to parse when applying - // diffs. - err := s.flatValidatorPublicKeyDiffsDB.Put( - marshalDiffKey(constants.PrimaryNetworkID, height, nodeID), - bls.SerializePublicKey(staker.PublicKey), - ) - if err != nil { - return err - } - - // TODO: Remove this once we no longer support version - // rollbacks. - // - // Note: We store the compressed public key here. - pkBytes := bls.PublicKeyToBytes(staker.PublicKey) - if err := nestedPKDiffDB.Put(nodeID.Bytes(), pkBytes); err != nil { - return err - } - } - - if err := validatorDB.Delete(staker.TxID[:]); err != nil { - return fmt.Errorf("failed to delete current staker: %w", err) - } - - s.validatorState.DeleteValidatorMetadata(nodeID, subnetID) - } - - err := writeCurrentDelegatorDiff( - delegatorDB, - weightDiff, - validatorDiff, - ) - if err != nil { - return err - } - - if weightDiff.Amount == 0 { - // No weight change to record; go to next validator. - continue - } + for subnetID, supply := range s.modifiedSupplies { + supply := supply + delete(s.modifiedSupplies, subnetID) // clear up s.supplies to avoid potential double commits + s.suppliesCache.Put(subnetID, &supply) - err = s.flatValidatorWeightDiffsDB.Put( - marshalDiffKey(subnetID, height, nodeID), - marshalWeightDiff(weightDiff), - ) - if err != nil { - return err - } + key := merkleSuppliesKey(subnetID) + *batchOps = append(*batchOps, database.BatchOp{ + Key: key, + Value: database.PackUInt64(supply), + }) + } + return nil +} - // TODO: Remove this once we no longer support version rollbacks. - weightDiffBytes, err := block.GenesisCodec.Marshal(block.Version, weightDiff) - if err != nil { - return fmt.Errorf("failed to serialize validator weight diff: %w", err) - } - if err := nestedWeightDiffDB.Put(nodeID.Bytes(), weightDiffBytes); err != nil { - return err - } +func (s *state) writePermissionedSubnets(batchOps *[]database.BatchOp) error { //nolint:golint,unparam + for _, subnetTx := range s.addedPermissionedSubnets { + key := merklePermissionedSubnetKey(subnetTx.ID()) + *batchOps = append(*batchOps, database.BatchOp{ + Key: key, + Value: subnetTx.Bytes(), + }) + } + s.addedPermissionedSubnets = make([]*txs.Tx, 0) + return nil +} - // TODO: Move the validator set management out of the state package - if !updateValidators { - continue - } +func (s *state) writeSubnetOwners(batchOps *[]database.BatchOp) error { + for subnetID, owner := range s.subnetOwners { + owner := owner - if weightDiff.Decrease { - err = s.validators.RemoveWeight(subnetID, nodeID, weightDiff.Amount) - } else { - if validatorDiff.validatorStatus == added { - staker := validatorDiff.validator - err = s.validators.AddStaker( - subnetID, - nodeID, - staker.PublicKey, - staker.TxID, - weightDiff.Amount, - ) - } else { - err = s.validators.AddWeight(subnetID, nodeID, weightDiff.Amount) - } - } - if err != nil { - return fmt.Errorf("failed to update validator weight: %w", err) - } + ownerBytes, err := block.GenesisCodec.Marshal(block.Version, &owner) + if err != nil { + return fmt.Errorf("failed to marshal subnet owner: %w", err) } - } - // TODO: Move validator set management out of the state package - // - // Attempt to update the stake metrics - if !updateValidators { - return nil + s.subnetOwnerCache.Put(subnetID, fxOwnerAndSize{ + owner: owner, + size: len(ownerBytes), + }) + + key := merkleSubnetOwnersKey(subnetID) + *batchOps = append(*batchOps, database.BatchOp{ + Key: key, + Value: ownerBytes, + }) } + maps.Clear(s.subnetOwners) + return nil +} - totalWeight, err := s.validators.TotalWeight(constants.PrimaryNetworkID) - if err != nil { - return fmt.Errorf("failed to get total weight of primary network: %w", err) +func (s *state) writeElasticSubnets(batchOps *[]database.BatchOp) error { //nolint:golint,unparam + for subnetID, transforkSubnetTx := range s.addedElasticSubnets { + key := merkleElasticSubnetKey(subnetID) + *batchOps = append(*batchOps, database.BatchOp{ + Key: key, + Value: transforkSubnetTx.Bytes(), + }) + delete(s.addedElasticSubnets, subnetID) + + // Note: Evict is used rather than Put here because tx may end up + // referencing additional data (because of shared byte slices) that + // would not be properly accounted for in the cache sizing. + s.elasticSubnetCache.Evict(subnetID) } + return nil +} - s.metrics.SetLocalStake(s.validators.GetWeight(constants.PrimaryNetworkID, s.ctx.NodeID)) - s.metrics.SetTotalStake(totalWeight) +func (s *state) writeChains(batchOps *[]database.BatchOp) error { //nolint:golint,unparam + for subnetID, chains := range s.addedChains { + for _, chainTx := range chains { + key := merkleChainKey(subnetID, chainTx.ID()) + *batchOps = append(*batchOps, database.BatchOp{ + Key: key, + Value: chainTx.Bytes(), + }) + } + delete(s.addedChains, subnetID) + } return nil } -func writeCurrentDelegatorDiff( - currentDelegatorList linkeddb.LinkedDB, - weightDiff *ValidatorWeightDiff, - validatorDiff *diffValidator, -) error { - addedDelegatorIterator := NewTreeIterator(validatorDiff.addedDelegators) - defer addedDelegatorIterator.Release() - for addedDelegatorIterator.Next() { - staker := addedDelegatorIterator.Value() +func (*state) writeCurrentStakers(batchOps *[]database.BatchOp, currentData map[ids.ID]*stakersData) error { + for stakerTxID, data := range currentData { + key := merkleCurrentStakersKey(stakerTxID) - if err := weightDiff.Add(false, staker.Weight); err != nil { - return fmt.Errorf("failed to increase node weight diff: %w", err) + if data.TxBytes == nil { + *batchOps = append(*batchOps, database.BatchOp{ + Key: key, + Delete: true, + }) + continue } - metadata := &delegatorMetadata{ - txID: staker.TxID, - PotentialReward: staker.PotentialReward, - } - if err := writeDelegatorMetadata(currentDelegatorList, metadata); err != nil { - return fmt.Errorf("failed to write current delegator to list: %w", err) + dataBytes, err := txs.GenesisCodec.Marshal(txs.Version, data) + if err != nil { + return fmt.Errorf("failed to serialize current stakers data, stakerTxID %v: %w", stakerTxID, err) } + *batchOps = append(*batchOps, database.BatchOp{ + Key: key, + Value: dataBytes, + }) } + return nil +} - for _, staker := range validatorDiff.deletedDelegators { - if err := weightDiff.Add(true, staker.Weight); err != nil { - return fmt.Errorf("failed to decrease node weight diff: %w", err) +func (*state) writePendingStakers(batchOps *[]database.BatchOp, pendingData map[ids.ID]*stakersData) error { + for stakerTxID, data := range pendingData { + key := merklePendingStakersKey(stakerTxID) + + if data.TxBytes == nil { + *batchOps = append(*batchOps, database.BatchOp{ + Key: key, + Delete: true, + }) + continue } - if err := currentDelegatorList.Delete(staker.TxID[:]); err != nil { - return fmt.Errorf("failed to delete current staker: %w", err) + dataBytes, err := txs.GenesisCodec.Marshal(txs.Version, data) + if err != nil { + return fmt.Errorf("failed to serialize pending stakers data, stakerTxID %v: %w", stakerTxID, err) } + *batchOps = append(*batchOps, database.BatchOp{ + Key: key, + Value: dataBytes, + }) } return nil } -func (s *state) writePendingStakers() error { - for subnetID, subnetValidatorDiffs := range s.pendingStakers.validatorDiffs { - delete(s.pendingStakers.validatorDiffs, subnetID) +func (s *state) writeUTXOs(batchOps *[]database.BatchOp) error { + for utxoID, utxo := range s.modifiedUTXOs { + delete(s.modifiedUTXOs, utxoID) + key := merkleUtxoIDKey(utxoID) + if utxo == nil { // delete the UTXO + switch utxo, err := s.GetUTXO(utxoID); err { + case nil: + s.utxoCache.Put(utxoID, nil) + *batchOps = append(*batchOps, database.BatchOp{ + Key: key, + Delete: true, + }) + // store the index + if err := s.writeUTXOsIndex(utxo, false /*insertUtxo*/); err != nil { + return err + } + // go process next utxo + continue - validatorDB := s.pendingSubnetValidatorList - delegatorDB := s.pendingSubnetDelegatorList - if subnetID == constants.PrimaryNetworkID { - validatorDB = s.pendingValidatorList - delegatorDB = s.pendingDelegatorList - } + case database.ErrNotFound: + // trying to delete a non-existing utxo. + continue - for _, validatorDiff := range subnetValidatorDiffs { - err := writePendingDiff( - validatorDB, - delegatorDB, - validatorDiff, - ) - if err != nil { + default: return err } } - } - return nil -} -func writePendingDiff( - pendingValidatorList linkeddb.LinkedDB, - pendingDelegatorList linkeddb.LinkedDB, - validatorDiff *diffValidator, -) error { - switch validatorDiff.validatorStatus { - case added: - err := pendingValidatorList.Put(validatorDiff.validator.TxID[:], nil) + // insert the UTXO + utxoBytes, err := txs.GenesisCodec.Marshal(txs.Version, utxo) if err != nil { - return fmt.Errorf("failed to add pending validator: %w", err) + return err } - case deleted: - err := pendingValidatorList.Delete(validatorDiff.validator.TxID[:]) - if err != nil { - return fmt.Errorf("failed to delete pending validator: %w", err) + *batchOps = append(*batchOps, database.BatchOp{ + Key: key, + Value: utxoBytes, + }) + + // store the index + if err := s.writeUTXOsIndex(utxo, true /*insertUtxo*/); err != nil { + return err } } + return nil +} - addedDelegatorIterator := NewTreeIterator(validatorDiff.addedDelegators) - defer addedDelegatorIterator.Release() - for addedDelegatorIterator.Next() { - staker := addedDelegatorIterator.Value() +func (s *state) writeDelegateeRewards(batchOps *[]database.BatchOp) error { //nolint:golint,unparam + for nodeID, nodeDelegateeRewards := range s.modifiedDelegateeReward { + nodeDelegateeRewardsList := nodeDelegateeRewards.List() + for _, subnetID := range nodeDelegateeRewardsList { + delegateeReward := s.delegateeRewardCache[nodeID][subnetID] - if err := pendingDelegatorList.Put(staker.TxID[:], nil); err != nil { - return fmt.Errorf("failed to write pending delegator to list: %w", err) + key := merkleDelegateeRewardsKey(nodeID, subnetID) + *batchOps = append(*batchOps, database.BatchOp{ + Key: key, + Value: database.PackUInt64(delegateeReward), + }) } + delete(s.modifiedDelegateeReward, nodeID) } + return nil +} + +func (s *state) writeBlocks() error { + for blkID, blk := range s.addedBlocks { + var ( + blkID = blkID + blkHeight = blk.Height() + ) + + delete(s.addedBlockIDs, blkHeight) + s.blockIDCache.Put(blkHeight, blkID) + if err := database.PutID(s.blockIDDB, database.PackUInt64(blkHeight), blkID); err != nil { + return fmt.Errorf("failed to write block height index: %w", err) + } + + delete(s.addedBlocks, blkID) + // Note: Evict is used rather than Put here because blk may end up + // referencing additional data (because of shared byte slices) that + // would not be properly accounted for in the cache sizing. + s.blockCache.Evict(blkID) - for _, staker := range validatorDiff.deletedDelegators { - if err := pendingDelegatorList.Delete(staker.TxID[:]); err != nil { - return fmt.Errorf("failed to delete pending delegator: %w", err) + if err := s.blockDB.Put(blkID[:], blk.Bytes()); err != nil { + return fmt.Errorf("failed to write block %s: %w", blkID, err) } } return nil } -func (s *state) writeTXs() error { +func (s *state) writeTxs() error { for txID, txStatus := range s.addedTxs { txID := txID @@ -2241,335 +1828,185 @@ func (s *state) writeTXs() error { return nil } -func (s *state) writeRewardUTXOs() error { - for txID, utxos := range s.addedRewardUTXOs { - delete(s.addedRewardUTXOs, txID) - s.rewardUTXOsCache.Put(txID, utxos) - rawTxDB := prefixdb.New(txID[:], s.rewardUTXODB) - txDB := linkeddb.NewDefault(rawTxDB) +func (s *state) writeUTXOsIndex(utxo *avax.UTXO, insertUtxo bool) error { + addressable, ok := utxo.Out.(avax.Addressable) + if !ok { + return nil + } + addresses := addressable.Addresses() - for _, utxo := range utxos { - utxoBytes, err := txs.GenesisCodec.Marshal(txs.Version, utxo) - if err != nil { - return fmt.Errorf("failed to serialize reward UTXO: %w", err) + for _, addr := range addresses { + key := merkleUtxoIndexKey(addr, utxo.InputID()) + + if insertUtxo { + if err := s.indexedUTXOsDB.Put(key, nil); err != nil { + return err } - utxoID := utxo.InputID() - if err := txDB.Put(utxoID[:], utxoBytes); err != nil { - return fmt.Errorf("failed to add reward UTXO: %w", err) + } else { + if err := s.indexedUTXOsDB.Delete(key); err != nil { + return err } } } return nil } -func (s *state) writeUTXOs() error { - for utxoID, utxo := range s.modifiedUTXOs { - delete(s.modifiedUTXOs, utxoID) +func (s *state) writeLocalUptimes() error { + for vdrID, updatedSubnets := range s.modifiedLocalUptimes { + for subnetID := range updatedSubnets { + key := merkleLocalUptimesKey(vdrID, subnetID) - if utxo == nil { - if err := s.utxoState.DeleteUTXO(utxoID); err != nil { - return fmt.Errorf("failed to delete UTXO: %w", err) + uptimes := s.localUptimesCache[vdrID][subnetID] + uptimeBytes, err := txs.GenesisCodec.Marshal(txs.Version, uptimes) + if err != nil { + return err } - continue - } - if err := s.utxoState.PutUTXO(utxo); err != nil { - return fmt.Errorf("failed to add UTXO: %w", err) - } - } - return nil -} -func (s *state) writeSubnets() error { - for _, subnet := range s.addedSubnets { - subnetID := subnet.ID() - - if err := s.subnetDB.Put(subnetID[:], nil); err != nil { - return fmt.Errorf("failed to write subnet: %w", err) + if err := s.localUptimesDB.Put(key, uptimeBytes); err != nil { + return fmt.Errorf("failed to add local uptimes: %w", err) + } } + delete(s.modifiedLocalUptimes, vdrID) } - s.addedSubnets = nil return nil } -func (s *state) writeSubnetOwners() error { - for subnetID, owner := range s.subnetOwners { - subnetID := subnetID - owner := owner - delete(s.subnetOwners, subnetID) - - ownerBytes, err := block.GenesisCodec.Marshal(block.Version, &owner) - if err != nil { - return fmt.Errorf("failed to marshal subnet owner: %w", err) +func (s *state) writeWeightDiffs(height uint64, weightDiffs map[weightDiffKey]*ValidatorWeightDiff) error { + for weightKey, weightDiff := range weightDiffs { + if weightDiff.Amount == 0 { + // No weight change to record; go to next validator. + continue } - s.subnetOwnerCache.Put(subnetID, fxOwnerAndSize{ - owner: owner, - size: len(ownerBytes), - }) - - if err := s.subnetOwnerDB.Put(subnetID[:], ownerBytes); err != nil { - return fmt.Errorf("failed to write subnet owner: %w", err) + key := marshalDiffKey(weightKey.subnetID, height, weightKey.nodeID) + weightDiffBytes := marshalWeightDiff(weightDiff) + if err := s.flatValidatorWeightDiffsDB.Put(key, weightDiffBytes); err != nil { + return fmt.Errorf("failed to add weight diffs: %w", err) } } return nil } -func (s *state) writeTransformedSubnets() error { - for subnetID, tx := range s.transformedSubnets { - txID := tx.ID() - - delete(s.transformedSubnets, subnetID) - // Note: Evict is used rather than Put here because tx may end up - // referencing additional data (because of shared byte slices) that - // would not be properly accounted for in the cache sizing. - s.transformedSubnetCache.Evict(subnetID) - if err := database.PutID(s.transformedSubnetDB, subnetID[:], txID); err != nil { - return fmt.Errorf("failed to write transformed subnet: %w", err) +func (s *state) writeBlsKeyDiffs(height uint64, blsKeyDiffs map[ids.NodeID]*bls.PublicKey) error { + for nodeID, blsKey := range blsKeyDiffs { + key := marshalDiffKey(constants.PrimaryNetworkID, height, nodeID) + blsKeyBytes := []byte{} + if blsKey != nil { + // Note: We store the uncompressed public key here as it is + // significantly more efficient to parse when applying + // diffs. + blsKeyBytes = blsKey.Serialize() } - } - return nil -} - -func (s *state) writeSubnetSupplies() error { - for subnetID, supply := range s.modifiedSupplies { - supply := supply - delete(s.modifiedSupplies, subnetID) - s.supplyCache.Put(subnetID, &supply) - if err := database.PutUInt64(s.supplyDB, subnetID[:], supply); err != nil { - return fmt.Errorf("failed to write subnet supply: %w", err) + if err := s.flatValidatorPublicKeyDiffsDB.Put(key, blsKeyBytes); err != nil { + return fmt.Errorf("failed to add bls key diffs: %w", err) } } return nil } -func (s *state) writeChains() error { - for subnetID, chains := range s.addedChains { - for _, chain := range chains { - chainDB := s.getChainDB(subnetID) +func (s *state) writeRewardUTXOs() error { + for txID, utxos := range s.addedRewardUTXOs { + delete(s.addedRewardUTXOs, txID) + s.rewardUTXOsCache.Put(txID, utxos) + rawTxDB := prefixdb.New(txID[:], s.rewardUTXOsDB) + txDB := linkeddb.NewDefault(rawTxDB) - chainID := chain.ID() - if err := chainDB.Put(chainID[:], nil); err != nil { - return fmt.Errorf("failed to write chain: %w", err) + for _, utxo := range utxos { + utxoBytes, err := txs.GenesisCodec.Marshal(txs.Version, utxo) + if err != nil { + return fmt.Errorf("failed to serialize reward UTXO: %w", err) + } + utxoID := utxo.InputID() + if err := txDB.Put(utxoID[:], utxoBytes); err != nil { + return fmt.Errorf("failed to add reward UTXO: %w", err) } } - delete(s.addedChains, subnetID) } return nil } -func (s *state) writeMetadata() error { - if !s.persistedTimestamp.Equal(s.timestamp) { - if err := database.PutTimestamp(s.singletonDB, timestampKey, s.timestamp); err != nil { - return fmt.Errorf("failed to write timestamp: %w", err) - } - s.persistedTimestamp = s.timestamp - } - if s.persistedCurrentSupply != s.currentSupply { - if err := database.PutUInt64(s.singletonDB, currentSupplyKey, s.currentSupply); err != nil { - return fmt.Errorf("failed to write current supply: %w", err) - } - s.persistedCurrentSupply = s.currentSupply +func (s *state) updateValidatorSet( + updateValidators bool, + valSetDiff map[weightDiffKey]*diffValidator, + weightDiffs map[weightDiffKey]*ValidatorWeightDiff, +) error { + if !updateValidators { + return nil } - if s.persistedLastAccepted != s.lastAccepted { - if err := database.PutID(s.singletonDB, lastAcceptedKey, s.lastAccepted); err != nil { - return fmt.Errorf("failed to write last accepted: %w", err) + + for weightKey, weightDiff := range weightDiffs { + var ( + subnetID = weightKey.subnetID + nodeID = weightKey.nodeID + validatorDiff = valSetDiff[weightKey] + err error + ) + + if weightDiff.Amount == 0 { + // No weight change to record; go to next validator. + continue } - s.persistedLastAccepted = s.lastAccepted - } - if s.indexedHeights != nil { - indexedHeightsBytes, err := block.GenesisCodec.Marshal(block.Version, s.indexedHeights) - if err != nil { - return err + if weightDiff.Decrease { + err = s.validators.RemoveWeight(subnetID, nodeID, weightDiff.Amount) + } else { + if validatorDiff.validatorStatus == added { + staker := validatorDiff.validator + err = s.validators.AddStaker( + subnetID, + nodeID, + staker.PublicKey, + staker.TxID, + weightDiff.Amount, + ) + } else { + err = s.validators.AddWeight(subnetID, nodeID, weightDiff.Amount) + } } - if err := s.singletonDB.Put(heightsIndexedKey, indexedHeightsBytes); err != nil { - return fmt.Errorf("failed to write indexed range: %w", err) + if err != nil { + return fmt.Errorf("failed to update validator weight: %w", err) } } + s.metrics.SetLocalStake(s.validators.GetWeight(constants.PrimaryNetworkID, s.ctx.NodeID)) + totalWeight, err := s.validators.TotalWeight(constants.PrimaryNetworkID) + if err != nil { + return fmt.Errorf("failed to get total weight: %w", err) + } + s.metrics.SetTotalStake(totalWeight) return nil } -// Returns the block, status of the block, and whether it is a [stateBlk]. -// Invariant: blkBytes is safe to parse with blocks.GenesisCodec -// -// TODO: Remove after v1.11.x is activated -func parseStoredBlock(blkBytes []byte) (block.Block, choices.Status, bool, error) { - // Attempt to parse as blocks.Block - blk, err := block.Parse(block.GenesisCodec, blkBytes) - if err == nil { - return blk, choices.Accepted, false, nil - } - - // Fallback to [stateBlk] - blkState := stateBlk{} - if _, err := block.GenesisCodec.Unmarshal(blkBytes, &blkState); err != nil { - return nil, choices.Processing, false, err - } - - blkState.Blk, err = block.Parse(block.GenesisCodec, blkState.Bytes) +func (s *state) logMerkleRoot(hasChanges bool) error { + // get current Height + blk, err := s.GetStatelessBlock(s.GetLastAccepted()) if err != nil { - return nil, choices.Processing, false, err + // may happen in tests. Let's just skip + return nil } - return blkState.Blk, blkState.Status, true, nil -} - -func (s *state) PruneAndIndex(lock sync.Locker, log logging.Logger) error { - lock.Lock() - // It is possible that new blocks are added after grabbing this iterator. New - // blocks are guaranteed to be accepted and height-indexed, so we don't need to - // check them. - blockIterator := s.blockDB.NewIterator() - // Releasing is done using a closure to ensure that updating blockIterator will - // result in having the most recent iterator released when executing the - // deferred function. - defer func() { - blockIterator.Release() - }() - - // While we are pruning the disk, we disable caching of the data we are - // modifying. Caching is re-enabled when pruning finishes. - // - // Note: If an unexpected error occurs the caches are never re-enabled. - // That's fine as the node is going to be in an unhealthy state regardless. - oldBlockIDCache := s.blockIDCache - s.blockIDCache = &cache.Empty[uint64, ids.ID]{} - lock.Unlock() - - log.Info("starting state pruning and indexing") - - var ( - startTime = time.Now() - lastCommit = startTime - lastUpdate = startTime - numPruned = 0 - numIndexed = 0 - ) - - for blockIterator.Next() { - blkBytes := blockIterator.Value() - - blk, status, isStateBlk, err := parseStoredBlock(blkBytes) - if err != nil { - return err - } - - if status != choices.Accepted { - // Remove non-accepted blocks from disk. - if err := s.blockDB.Delete(blockIterator.Key()); err != nil { - return fmt.Errorf("failed to delete block: %w", err) - } - - numPruned++ - - // We don't index the height of non-accepted blocks. - continue - } - - blkHeight := blk.Height() - blkID := blk.ID() - - // Populate the map of height -> blockID. - heightKey := database.PackUInt64(blkHeight) - if err := database.PutID(s.blockIDDB, heightKey, blkID); err != nil { - return fmt.Errorf("failed to add blockID: %w", err) - } - - // Since we only store accepted blocks on disk, we only need to store a map of - // ids.ID to Block. - if isStateBlk { - if err := s.blockDB.Put(blkID[:], blkBytes); err != nil { - return fmt.Errorf("failed to write block: %w", err) - } - } - - numIndexed++ - - if numIndexed%pruneCommitLimit == 0 { - // We must hold the lock during committing to make sure we don't - // attempt to commit to disk while a block is concurrently being - // accepted. - lock.Lock() - err := utils.Err( - s.Commit(), - blockIterator.Error(), - ) - lock.Unlock() - if err != nil { - return err - } - - // We release the iterator here to allow the underlying database to - // clean up deleted state. - blockIterator.Release() - - now := time.Now() - if now.Sub(lastUpdate) > pruneUpdateFrequency { - lastUpdate = now - - progress := timer.ProgressFromHash(blkID[:]) - eta := timer.EstimateETA( - startTime, - progress, - math.MaxUint64, - ) - - log.Info("committing state pruning and indexing", - zap.Int("numPruned", numPruned), - zap.Int("numIndexed", numIndexed), - zap.Duration("eta", eta), - ) - } - - // We take the minimum here because it's possible that the node is - // currently bootstrapping. This would mean that grabbing the lock - // could take an extremely long period of time; which we should not - // delay processing for. - pruneDuration := now.Sub(lastCommit) - sleepDuration := safemath.Min( - pruneCommitSleepMultiplier*pruneDuration, - pruneCommitSleepCap, - ) - time.Sleep(sleepDuration) - - // Make sure not to include the sleep duration into the next prune - // duration. - lastCommit = time.Now() - - blockIterator = s.blockDB.NewIteratorWithStart(blkID[:]) - } + if !hasChanges { + s.ctx.Log.Info("merkle root", + zap.Uint64("height", blk.Height()), + zap.Stringer("blkID", blk.ID()), + zap.String("merkle root", "no changes to merkle state"), + ) + return nil } - // Ensure we fully iterated over all blocks before writing that pruning has - // finished. - // - // Note: This is needed because a transient read error could cause the - // iterator to stop early. - if err := blockIterator.Error(); err != nil { - return err + view, err := s.merkleDB.NewView(context.TODO(), merkledb.ViewChanges{}) + if err != nil { + return fmt.Errorf("failed creating merkleDB view: %w", err) } - - if err := s.donePrune(); err != nil { - return err + root, err := view.GetMerkleRoot(context.TODO()) + if err != nil { + return fmt.Errorf("failed pulling merkle root: %w", err) } - // We must hold the lock during committing to make sure we don't - // attempt to commit to disk while a block is concurrently being - // accepted. - lock.Lock() - defer lock.Unlock() - - // Make sure we flush the original cache before re-enabling it to prevent - // surfacing any stale data. - oldBlockIDCache.Flush() - s.blockIDCache = oldBlockIDCache - - log.Info("finished state pruning and indexing", - zap.Int("numPruned", numPruned), - zap.Int("numIndexed", numIndexed), - zap.Duration("duration", time.Since(startTime)), + s.ctx.Log.Info("merkle root", + zap.Uint64("height", blk.Height()), + zap.Stringer("blkID", blk.ID()), + zap.String("merkle root", root.String()), ) - - return s.Commit() + return nil } diff --git a/vms/platformvm/state/merkle_state_helpers.go b/vms/platformvm/state/state_helpers.go similarity index 100% rename from vms/platformvm/state/merkle_state_helpers.go rename to vms/platformvm/state/state_helpers.go diff --git a/vms/platformvm/state/merkle_state_test.go b/vms/platformvm/state/state_helpers_test.go similarity index 100% rename from vms/platformvm/state/merkle_state_test.go rename to vms/platformvm/state/state_helpers_test.go diff --git a/vms/platformvm/state/merkle_state_load_ops.go b/vms/platformvm/state/state_load_ops.go similarity index 66% rename from vms/platformvm/state/merkle_state_load_ops.go rename to vms/platformvm/state/state_load_ops.go index c9c17a8ddbec..f03f869af910 100644 --- a/vms/platformvm/state/merkle_state_load_ops.go +++ b/vms/platformvm/state/state_load_ops.go @@ -25,8 +25,8 @@ import ( // If [ms] isn't initialized, initializes it with [genesis]. // Then loads [ms] from disk. -func (ms *merkleState) sync(genesis []byte) error { - shouldInit, err := ms.shouldInit() +func (s *state) sync(genesis []byte) error { + shouldInit, err := s.shouldInit() if err != nil { return fmt.Errorf( "failed to check if the database is initialized: %w", @@ -37,7 +37,7 @@ func (ms *merkleState) sync(genesis []byte) error { // If the database is empty, create the platform chain anew using the // provided genesis state if shouldInit { - if err := ms.init(genesis); err != nil { + if err := s.init(genesis); err != nil { return fmt.Errorf( "failed to initialize the database: %w", err, @@ -45,20 +45,20 @@ func (ms *merkleState) sync(genesis []byte) error { } } - return ms.load(shouldInit) + return s.load(shouldInit) } -func (ms *merkleState) shouldInit() (bool, error) { - has, err := ms.singletonDB.Has(initializedKey) +func (s *state) shouldInit() (bool, error) { + has, err := s.singletonDB.Has(initializedKey) return !has, err } -func (ms *merkleState) doneInit() error { - return ms.singletonDB.Put(initializedKey, nil) +func (s *state) doneInit() error { + return s.singletonDB.Put(initializedKey, nil) } // Creates a genesis from [genesisBytes] and initializes [ms] with it. -func (ms *merkleState) init(genesisBytes []byte) error { +func (s *state) init(genesisBytes []byte) error { // Create the genesis block and save it as being accepted (We don't do // genesisBlock.Accept() because then it'd look for genesisBlock's // non-existent parent) @@ -72,28 +72,28 @@ func (ms *merkleState) init(genesisBytes []byte) error { if err != nil { return err } - if err := ms.syncGenesis(genesisBlock, genesisState); err != nil { + if err := s.syncGenesis(genesisBlock, genesisState); err != nil { return err } - if err := ms.doneInit(); err != nil { + if err := s.doneInit(); err != nil { return err } - return ms.Commit() + return s.Commit() } // Loads the state from [genesisBls] and [genesis] into [ms]. -func (ms *merkleState) syncGenesis(genesisBlk block.Block, genesis *genesis.Genesis) error { - ms.SetLastAccepted(genesisBlk.ID()) - ms.SetTimestamp(time.Unix(int64(genesis.Timestamp), 0)) - ms.SetCurrentSupply(constants.PrimaryNetworkID, genesis.InitialSupply) - ms.AddStatelessBlock(genesisBlk) +func (s *state) syncGenesis(genesisBlk block.Block, genesis *genesis.Genesis) error { + s.SetLastAccepted(genesisBlk.ID()) + s.SetTimestamp(time.Unix(int64(genesis.Timestamp), 0)) + s.SetCurrentSupply(constants.PrimaryNetworkID, genesis.InitialSupply) + s.AddStatelessBlock(genesisBlk) // Persist UTXOs that exist at genesis for _, utxo := range genesis.UTXOs { avaxUTXO := utxo.UTXO - ms.AddUTXO(&avaxUTXO) + s.AddUTXO(&avaxUTXO) } // Persist primary network validator set at genesis @@ -105,12 +105,12 @@ func (ms *merkleState) syncGenesis(genesisBlk block.Block, genesis *genesis.Gene stakeAmount := tx.Validator.Wght stakeDuration := tx.Validator.Duration() - currentSupply, err := ms.GetCurrentSupply(constants.PrimaryNetworkID) + currentSupply, err := s.GetCurrentSupply(constants.PrimaryNetworkID) if err != nil { return err } - potentialReward := ms.rewards.Calculate( + potentialReward := s.rewards.Calculate( stakeDuration, stakeAmount, currentSupply, @@ -125,9 +125,9 @@ func (ms *merkleState) syncGenesis(genesisBlk block.Block, genesis *genesis.Gene return err } - ms.PutCurrentValidator(staker) - ms.AddTx(vdrTx, status.Committed) - ms.SetCurrentSupply(constants.PrimaryNetworkID, newCurrentSupply) + s.PutCurrentValidator(staker) + s.AddTx(vdrTx, status.Committed) + s.SetCurrentSupply(constants.PrimaryNetworkID, newCurrentSupply) } for _, chain := range genesis.Chains { @@ -138,37 +138,37 @@ func (ms *merkleState) syncGenesis(genesisBlk block.Block, genesis *genesis.Gene // Ensure all chains that the genesis bytes say to create have the right // network ID - if unsignedChain.NetworkID != ms.ctx.NetworkID { + if unsignedChain.NetworkID != s.ctx.NetworkID { return avax.ErrWrongNetworkID } - ms.AddChain(chain) - ms.AddTx(chain, status.Committed) + s.AddChain(chain) + s.AddTx(chain, status.Committed) } // updateValidators is set to false here to maintain the invariant that the // primary network's validator set is empty before the validator sets are // initialized. - return ms.write(false /*=updateValidators*/, 0) + return s.write(false /*=updateValidators*/, 0) } // Load pulls data previously stored on disk that is expected to be in memory. -func (ms *merkleState) load(hasSynced bool) error { +func (s *state) load(hasSynced bool) error { return utils.Err( - ms.loadMerkleMetadata(), - ms.loadCurrentStakers(), - ms.loadPendingStakers(), - ms.initValidatorSets(), + s.loadMerkleMetadata(), + s.loadCurrentStakers(), + s.loadPendingStakers(), + s.initValidatorSets(), - ms.logMerkleRoot(!hasSynced), // we already logged if sync has happened + s.logMerkleRoot(!hasSynced), // we already logged if sync has happened ) } // Loads the chain time and last accepted block ID from disk // and populates them in [ms]. -func (ms *merkleState) loadMerkleMetadata() error { +func (s *state) loadMerkleMetadata() error { // load chain time - chainTimeBytes, err := ms.merkleDB.Get(merkleChainTimeKey) + chainTimeBytes, err := s.merkleDB.Get(merkleChainTimeKey) if err != nil { return err } @@ -176,18 +176,18 @@ func (ms *merkleState) loadMerkleMetadata() error { if err := chainTime.UnmarshalBinary(chainTimeBytes); err != nil { return err } - ms.latestComittedChainTime = chainTime - ms.SetTimestamp(chainTime) + s.latestComittedChainTime = chainTime + s.SetTimestamp(chainTime) // load last accepted block - blkIDBytes, err := ms.merkleDB.Get(merkleLastAcceptedBlkIDKey) + blkIDBytes, err := s.merkleDB.Get(merkleLastAcceptedBlkIDKey) if err != nil { return err } lastAcceptedBlkID := ids.Empty copy(lastAcceptedBlkID[:], blkIDBytes) - ms.latestCommittedLastAcceptedBlkID = lastAcceptedBlkID - ms.SetLastAccepted(lastAcceptedBlkID) + s.latestCommittedLastAcceptedBlkID = lastAcceptedBlkID + s.SetLastAccepted(lastAcceptedBlkID) // We don't need to load supplies. Unlike chain time and last block ID, // which have the persisted* attribute, we signify that a supply hasn't @@ -196,14 +196,14 @@ func (ms *merkleState) loadMerkleMetadata() error { } // Loads current stakes from disk and populates them in [ms]. -func (ms *merkleState) loadCurrentStakers() error { +func (s *state) loadCurrentStakers() error { // TODO ABENEGIA: Check missing metadata - ms.currentStakers = newBaseStakers() + s.currentStakers = newBaseStakers() prefix := make([]byte, len(currentStakersSectionPrefix)) copy(prefix, currentStakersSectionPrefix) - iter := ms.merkleDB.NewIteratorWithPrefix(prefix) + iter := s.merkleDB.NewIteratorWithPrefix(prefix) defer iter.Release() for iter.Next() { data := &stakersData{} @@ -226,29 +226,29 @@ func (ms *merkleState) loadCurrentStakers() error { } if staker.Priority.IsValidator() { // TODO: why not PutValidator/PutDelegator?? - validator := ms.currentStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID) + validator := s.currentStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID) validator.validator = staker - ms.currentStakers.stakers.ReplaceOrInsert(staker) + s.currentStakers.stakers.ReplaceOrInsert(staker) } else { - validator := ms.currentStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID) + validator := s.currentStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID) if validator.delegators == nil { validator.delegators = btree.NewG(defaultTreeDegree, (*Staker).Less) } validator.delegators.ReplaceOrInsert(staker) - ms.currentStakers.stakers.ReplaceOrInsert(staker) + s.currentStakers.stakers.ReplaceOrInsert(staker) } } return iter.Error() } -func (ms *merkleState) loadPendingStakers() error { +func (s *state) loadPendingStakers() error { // TODO ABENEGIA: Check missing metadata - ms.pendingStakers = newBaseStakers() + s.pendingStakers = newBaseStakers() prefix := make([]byte, len(pendingStakersSectionPrefix)) copy(prefix, pendingStakersSectionPrefix) - iter := ms.merkleDB.NewIteratorWithPrefix(prefix) + iter := s.merkleDB.NewIteratorWithPrefix(prefix) defer iter.Release() for iter.Next() { data := &stakersData{} @@ -270,16 +270,16 @@ func (ms *merkleState) loadPendingStakers() error { return err } if staker.Priority.IsValidator() { - validator := ms.pendingStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID) + validator := s.pendingStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID) validator.validator = staker - ms.pendingStakers.stakers.ReplaceOrInsert(staker) + s.pendingStakers.stakers.ReplaceOrInsert(staker) } else { - validator := ms.pendingStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID) + validator := s.pendingStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID) if validator.delegators == nil { validator.delegators = btree.NewG(defaultTreeDegree, (*Staker).Less) } validator.delegators.ReplaceOrInsert(staker) - ms.pendingStakers.stakers.ReplaceOrInsert(staker) + s.pendingStakers.stakers.ReplaceOrInsert(staker) } } return iter.Error() @@ -287,23 +287,23 @@ func (ms *merkleState) loadPendingStakers() error { // Invariant: initValidatorSets requires loadCurrentValidators to have already // been called. -func (ms *merkleState) initValidatorSets() error { - for subnetID, validators := range ms.currentStakers.validators { - if ms.validators.Count(subnetID) != 0 { +func (s *state) initValidatorSets() error { + for subnetID, validators := range s.currentStakers.validators { + if s.validators.Count(subnetID) != 0 { // Enforce the invariant that the validator set is empty here. return fmt.Errorf("%w: %s", errValidatorSetAlreadyPopulated, subnetID) } for nodeID, validator := range validators { validatorStaker := validator.validator - if err := ms.validators.AddStaker(subnetID, nodeID, validatorStaker.PublicKey, validatorStaker.TxID, validatorStaker.Weight); err != nil { + if err := s.validators.AddStaker(subnetID, nodeID, validatorStaker.PublicKey, validatorStaker.TxID, validatorStaker.Weight); err != nil { return err } delegatorIterator := NewTreeIterator(validator.delegators) for delegatorIterator.Next() { delegatorStaker := delegatorIterator.Value() - if err := ms.validators.AddWeight(subnetID, nodeID, delegatorStaker.Weight); err != nil { + if err := s.validators.AddWeight(subnetID, nodeID, delegatorStaker.Weight); err != nil { delegatorIterator.Release() return err } @@ -312,11 +312,11 @@ func (ms *merkleState) initValidatorSets() error { } } - ms.metrics.SetLocalStake(ms.validators.GetWeight(constants.PrimaryNetworkID, ms.ctx.NodeID)) - totalWeight, err := ms.validators.TotalWeight(constants.PrimaryNetworkID) + s.metrics.SetLocalStake(s.validators.GetWeight(constants.PrimaryNetworkID, s.ctx.NodeID)) + totalWeight, err := s.validators.TotalWeight(constants.PrimaryNetworkID) if err != nil { return fmt.Errorf("failed to get total weight of primary network validators: %w", err) } - ms.metrics.SetTotalStake(totalWeight) + s.metrics.SetTotalStake(totalWeight) return nil } diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index 5d24c2364a83..88e5897ba4d4 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -52,16 +52,16 @@ func TestStateInitialization(t *testing.T) { require := require.New(t) s, db := newUninitializedState(require) - shouldInit, err := s.(*merkleState).shouldInit() + shouldInit, err := s.(*state).shouldInit() require.NoError(err) require.True(shouldInit) - require.NoError(s.(*merkleState).doneInit()) + require.NoError(s.(*state).doneInit()) require.NoError(s.Commit()) s = newStateFromDB(require, db) - shouldInit, err = s.(*merkleState).shouldInit() + shouldInit, err = s.(*state).shouldInit() require.NoError(err) require.False(shouldInit) } @@ -153,7 +153,7 @@ func newInitializedState(require *require.Assertions) (State, database.Database) genesisBlk, err := block.NewApricotCommitBlock(genesisBlkID, 0) require.NoError(err) - require.NoError(s.(*merkleState).syncGenesis(genesisBlk, genesisState)) + require.NoError(s.(*state).syncGenesis(genesisBlk, genesisState)) return s, db } @@ -165,7 +165,7 @@ func newUninitializedState(require *require.Assertions) (State, database.Databas func newStateFromDB(require *require.Assertions, db database.Database) State { execCfg, _ := config.GetExecutionConfig(nil) - state, err := newMerkleState( + state, err := newState( db, metrics.Noop, validators.NewManager(), @@ -671,12 +671,12 @@ func TestParsedStateBlock(t *testing.T) { stBlkBytes, err := block.GenesisCodec.Marshal(block.Version, &stBlk) require.NoError(err) - gotBlk, _, isStateBlk, err := parseStoredBlock(stBlkBytes) + gotBlk, isStateBlk, err := parseStoredBlock(stBlkBytes) require.NoError(err) require.True(isStateBlk) require.Equal(blk.ID(), gotBlk.ID()) - gotBlk, _, isStateBlk, err = parseStoredBlock(blk.Bytes()) + gotBlk, isStateBlk, err = parseStoredBlock(blk.Bytes()) require.NoError(err) require.False(isStateBlk) require.Equal(blk.ID(), gotBlk.ID()) @@ -719,3 +719,34 @@ func TestStateSubnetOwner(t *testing.T) { require.NoError(err) require.Equal(owner2, owner) } + +// Returns the block, status of the block, and whether it is a [stateBlk]. +// Invariant: blkBytes is safe to parse with blocks.GenesisCodec +// +// TODO: Remove after v1.11.x is activated +type stateBlk struct { + Blk block.Block + Bytes []byte `serialize:"true"` + Status choices.Status `serialize:"true"` +} + +func parseStoredBlock(blkBytes []byte) (block.Block, bool, error) { + // Attempt to parse as blocks.Block + blk, err := block.Parse(block.GenesisCodec, blkBytes) + if err == nil { + return blk, false, nil + } + + // Fallback to [stateBlk] + blkState := stateBlk{} + if _, err := block.GenesisCodec.Unmarshal(blkBytes, &blkState); err != nil { + return nil, false, err + } + + blkState.Blk, err = block.Parse(block.GenesisCodec, blkState.Bytes) + if err != nil { + return nil, false, err + } + + return blkState.Blk, true, nil +} diff --git a/vms/platformvm/txs/executor/helpers_test.go b/vms/platformvm/txs/executor/helpers_test.go index 26536dbe5336..df3150e04bdd 100644 --- a/vms/platformvm/txs/executor/helpers_test.go +++ b/vms/platformvm/txs/executor/helpers_test.go @@ -225,7 +225,7 @@ func defaultState( ) state.State { genesisBytes := buildGenesisTest(ctx) execCfg, _ := config.GetExecutionConfig(nil) - state, err := state.NewMerkleState( + state, err := state.New( db, genesisBytes, prometheus.NewRegistry(), diff --git a/vms/platformvm/vm.go b/vms/platformvm/vm.go index 3d4334968f4c..c312e4044e8b 100644 --- a/vms/platformvm/vm.go +++ b/vms/platformvm/vm.go @@ -135,7 +135,7 @@ func (vm *VM) Initialize( rewards := reward.NewCalculator(vm.RewardConfig) - vm.state, err = state.NewMerkleState( + vm.state, err = state.New( vm.db, genesisBytes, registerer, diff --git a/vms/platformvm/vm_regression_test.go b/vms/platformvm/vm_regression_test.go index 5bf8ef9a0bfc..81582f1b14d2 100644 --- a/vms/platformvm/vm_regression_test.go +++ b/vms/platformvm/vm_regression_test.go @@ -644,7 +644,7 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { // Force a reload of the state from the database. vm.Config.Validators = validators.NewManager() execCfg, _ := config.GetExecutionConfig(nil) - newState, err := state.NewMerkleState( + newState, err := state.New( vm.db, nil, prometheus.NewRegistry(), @@ -951,7 +951,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { // Force a reload of the state from the database. vm.Config.Validators = validators.NewManager() execCfg, _ := config.GetExecutionConfig(nil) - newState, err := state.NewMerkleState( + newState, err := state.New( vm.db, nil, prometheus.NewRegistry(), From e9788286e03f23d9bdbb8419780a9826b4d79039 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Wed, 22 Nov 2023 23:30:59 +0100 Subject: [PATCH 105/132] nits --- vms/platformvm/state/state.go | 22 ------------------- vms/platformvm/vm.go | 41 +++-------------------------------- 2 files changed, 3 insertions(+), 60 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 8b2c4066ddd2..5100ef315eee 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -8,7 +8,6 @@ import ( "context" "errors" "fmt" - "sync" "time" "github.com/prometheus/client_golang/prometheus" @@ -30,7 +29,6 @@ import ( "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" - "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/utils/wrappers" @@ -145,18 +143,6 @@ type State interface { // Discard uncommitted changes to the database. Abort() - // Returns if the state should be pruned and indexed to remove rejected - // blocks and generate the block height index. - // - // TODO: Remove after v1.11.x is activated - ShouldPrune() (bool, error) - - // Removes rejected blocks from disk and indexes accepted blocks by height. This - // function supports being (and is recommended to be) called asynchronously. - // - // TODO: Remove after v1.11.x is activated - PruneAndIndex(sync.Locker, logging.Logger) error - // Commit changes to the base database. Commit() error @@ -1056,14 +1042,6 @@ func (s *state) GetBlockIDAtHeight(height uint64) (ids.ID, error) { return blkID, nil } -func (*state) ShouldPrune() (bool, error) { - return false, nil // Nothing to do -} - -func (*state) PruneAndIndex(sync.Locker, logging.Logger) error { - return nil // Nothing to do -} - // UPTIMES SECTION func (s *state) GetUptime(vdrID ids.NodeID, subnetID ids.ID) (upDuration time.Duration, lastUpdated time.Time, err error) { nodeUptimes, exists := s.localUptimesCache[vdrID] diff --git a/vms/platformvm/vm.go b/vms/platformvm/vm.go index c312e4044e8b..b4423841959c 100644 --- a/vms/platformvm/vm.go +++ b/vms/platformvm/vm.go @@ -87,9 +87,6 @@ type VM struct { txBuilder txbuilder.Builder manager blockexecutor.Manager - - // TODO: Remove after v1.11.x is activated - pruned utils.Atomic[bool] } // Initialize this blockchain. @@ -218,35 +215,7 @@ func (vm *VM) Initialize( chainCtx.Log.Info("initializing last accepted", zap.Stringer("blkID", lastAcceptedID), ) - if err := vm.SetPreference(ctx, lastAcceptedID); err != nil { - return err - } - - shouldPrune, err := vm.state.ShouldPrune() - if err != nil { - return fmt.Errorf( - "failed to check if the database should be pruned: %w", - err, - ) - } - if !shouldPrune { - chainCtx.Log.Info("state already pruned and indexed") - vm.pruned.Set(true) - return nil - } - - go func() { - err := vm.state.PruneAndIndex(&vm.ctx.Lock, vm.ctx.Log) - if err != nil { - vm.ctx.Log.Error("state pruning and height indexing failed", - zap.Error(err), - ) - } - - vm.pruned.Set(true) - }() - - return nil + return vm.SetPreference(ctx, lastAcceptedID) } // Create all chains that exist that this node validates. @@ -472,12 +441,8 @@ func (vm *VM) Logger() logging.Logger { return vm.ctx.Log } -func (vm *VM) VerifyHeightIndex(_ context.Context) error { - if vm.pruned.Get() { - return nil - } - - return snowmanblock.ErrIndexIncomplete +func (*VM) VerifyHeightIndex(_ context.Context) error { + return nil } func (vm *VM) GetBlockIDAtHeight(_ context.Context, height uint64) (ids.ID, error) { From 4663aa45f4c55a2a9b360da44504b9d14714afba Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Thu, 23 Nov 2023 00:07:59 +0100 Subject: [PATCH 106/132] leftover nit --- vms/platformvm/state/mock_state.go | 31 ------------------------------ 1 file changed, 31 deletions(-) diff --git a/vms/platformvm/state/mock_state.go b/vms/platformvm/state/mock_state.go index 41ce946a12e0..e7eae9198799 100644 --- a/vms/platformvm/state/mock_state.go +++ b/vms/platformvm/state/mock_state.go @@ -10,13 +10,11 @@ package state import ( context "context" reflect "reflect" - sync "sync" time "time" database "github.com/ava-labs/avalanchego/database" ids "github.com/ava-labs/avalanchego/ids" validators "github.com/ava-labs/avalanchego/snow/validators" - logging "github.com/ava-labs/avalanchego/utils/logging" avax "github.com/ava-labs/avalanchego/vms/components/avax" block "github.com/ava-labs/avalanchego/vms/platformvm/block" fx "github.com/ava-labs/avalanchego/vms/platformvm/fx" @@ -604,20 +602,6 @@ func (mr *MockStateMockRecorder) GetUptime(arg0, arg1 interface{}) *gomock.Call return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUptime", reflect.TypeOf((*MockState)(nil).GetUptime), arg0, arg1) } -// PruneAndIndex mocks base method. -func (m *MockState) PruneAndIndex(arg0 sync.Locker, arg1 logging.Logger) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PruneAndIndex", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// PruneAndIndex indicates an expected call of PruneAndIndex. -func (mr *MockStateMockRecorder) PruneAndIndex(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PruneAndIndex", reflect.TypeOf((*MockState)(nil).PruneAndIndex), arg0, arg1) -} - // PutCurrentDelegator mocks base method. func (m *MockState) PutCurrentDelegator(arg0 *Staker) { m.ctrl.T.Helper() @@ -754,21 +738,6 @@ func (mr *MockStateMockRecorder) SetUptime(arg0, arg1, arg2, arg3 interface{}) * return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetUptime", reflect.TypeOf((*MockState)(nil).SetUptime), arg0, arg1, arg2, arg3) } -// ShouldPrune mocks base method. -func (m *MockState) ShouldPrune() (bool, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ShouldPrune") - ret0, _ := ret[0].(bool) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ShouldPrune indicates an expected call of ShouldPrune. -func (mr *MockStateMockRecorder) ShouldPrune() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ShouldPrune", reflect.TypeOf((*MockState)(nil).ShouldPrune)) -} - // UTXOIDs mocks base method. func (m *MockState) UTXOIDs(arg0 []byte, arg1 ids.ID, arg2 int) ([]ids.ID, error) { m.ctrl.T.Helper() From 11371c9a266eaf3a177e9b9501fbe21f12cee805 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Fri, 24 Nov 2023 15:37:03 +0100 Subject: [PATCH 107/132] nit --- vms/platformvm/state/state.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 5100ef315eee..c5d33bf4e19b 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -438,8 +438,8 @@ type state struct { currentStakers *baseStakers pendingStakers *baseStakers - delegateeRewardCache map[ids.NodeID]map[ids.ID]uint64 - modifiedDelegateeReward map[ids.NodeID]set.Set[ids.ID] + delegateeRewardCache map[ids.NodeID]map[ids.ID]uint64 // (nodeID, subnetID) --> delegatee amount + modifiedDelegateeReward map[ids.NodeID]set.Set[ids.ID] // tracks (nodeID, subnetID) pairs updated after last commit // UTXOs section modifiedUTXOs map[ids.ID]*avax.UTXO // map of UTXO ID -> *UTXO From cc7111e693bae62f56297f913fc2d280ac089e33 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 28 Nov 2023 11:00:04 -0500 Subject: [PATCH 108/132] fix imports; move var declaration --- vms/platformvm/state/state.go | 72 ++++++++++++++++++----------------- 1 file changed, 37 insertions(+), 35 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index c5d33bf4e19b..d08623045929 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -11,7 +11,9 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" + "go.uber.org/zap" + "golang.org/x/exp/maps" "golang.org/x/exp/slices" @@ -53,6 +55,41 @@ const ( utxoCacheSize = 8192 // from avax/utxo_state.go ) +var ( + _ State = (*state)(nil) + + errValidatorSetAlreadyPopulated = errors.New("validator set already populated") + errIsNotSubnet = errors.New("is not a subnet") + + merkleStatePrefix = []byte{0x00} + merkleSingletonPrefix = []byte{0x01} + merkleBlockPrefix = []byte{0x02} + merkleBlockIDsPrefix = []byte{0x03} + merkleTxPrefix = []byte{0x04} + merkleIndexUTXOsPrefix = []byte{0x05} // to serve UTXOIDs(addr) + merkleUptimesPrefix = []byte{0x06} // locally measured uptimes + merkleWeightDiffPrefix = []byte{0x07} // non-merkleized validators weight diff. TODO: should we merkleize them? + merkleBlsKeyDiffPrefix = []byte{0x08} + merkleRewardUtxosPrefix = []byte{0x09} + + initializedKey = []byte("initialized") + + // merkle db sections + metadataSectionPrefix = byte(0x00) + merkleChainTimeKey = []byte{metadataSectionPrefix, 0x00} + merkleLastAcceptedBlkIDKey = []byte{metadataSectionPrefix, 0x01} + merkleSuppliesPrefix = []byte{metadataSectionPrefix, 0x02} + + permissionedSubnetSectionPrefix = []byte{0x01} + elasticSubnetSectionPrefix = []byte{0x02} + chainsSectionPrefix = []byte{0x03} + utxosSectionPrefix = []byte{0x04} + currentStakersSectionPrefix = []byte{0x05} + pendingStakersSectionPrefix = []byte{0x06} + delegateeRewardsPrefix = []byte{0x07} + subnetOwnersPrefix = []byte{0x08} +) + // Chain collects all methods to manage the state of the chain for block // execution. type Chain interface { @@ -155,41 +192,6 @@ type State interface { Close() error } -var ( - _ State = (*state)(nil) - - errValidatorSetAlreadyPopulated = errors.New("validator set already populated") - errIsNotSubnet = errors.New("is not a subnet") - - merkleStatePrefix = []byte{0x00} - merkleSingletonPrefix = []byte{0x01} - merkleBlockPrefix = []byte{0x02} - merkleBlockIDsPrefix = []byte{0x03} - merkleTxPrefix = []byte{0x04} - merkleIndexUTXOsPrefix = []byte{0x05} // to serve UTXOIDs(addr) - merkleUptimesPrefix = []byte{0x06} // locally measured uptimes - merkleWeightDiffPrefix = []byte{0x07} // non-merkleized validators weight diff. TODO: should we merkleize them? - merkleBlsKeyDiffPrefix = []byte{0x08} - merkleRewardUtxosPrefix = []byte{0x09} - - initializedKey = []byte("initialized") - - // merkle db sections - metadataSectionPrefix = byte(0x00) - merkleChainTimeKey = []byte{metadataSectionPrefix, 0x00} - merkleLastAcceptedBlkIDKey = []byte{metadataSectionPrefix, 0x01} - merkleSuppliesPrefix = []byte{metadataSectionPrefix, 0x02} - - permissionedSubnetSectionPrefix = []byte{0x01} - elasticSubnetSectionPrefix = []byte{0x02} - chainsSectionPrefix = []byte{0x03} - utxosSectionPrefix = []byte{0x04} - currentStakersSectionPrefix = []byte{0x05} - pendingStakersSectionPrefix = []byte{0x06} - delegateeRewardsPrefix = []byte{0x07} - subnetOwnersPrefix = []byte{0x08} -) - func New( rawDB database.Database, genesisBytes []byte, From 1161e110e7517dd9531dc8ed81f56da0c9287ede Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Wed, 29 Nov 2023 09:27:29 -0500 Subject: [PATCH 109/132] [WIP] P-Chain merkle DB -- smaller diff (#2382) --- vms/platformvm/state/diff.go | 7 +- vms/platformvm/state/masked_iterator_test.go | 10 +- vms/platformvm/state/staker_test.go | 2 +- vms/platformvm/state/state.go | 1835 ++++++++++------- vms/platformvm/state/state_load_ops.go | 324 --- vms/platformvm/state/state_test.go | 8 - .../txs/executor/staker_tx_verification.go | 2 +- vms/platformvm/vm_regression_test.go | 1 + 8 files changed, 1079 insertions(+), 1110 deletions(-) delete mode 100644 vms/platformvm/state/state_load_ops.go diff --git a/vms/platformvm/state/diff.go b/vms/platformvm/state/diff.go index f0a90e66170b..1aafcf079969 100644 --- a/vms/platformvm/state/diff.go +++ b/vms/platformvm/state/diff.go @@ -331,9 +331,12 @@ func (d *diff) GetSubnetTransformation(subnetID ids.ID) (*txs.Tx, error) { func (d *diff) AddSubnetTransformation(transformSubnetTxIntf *txs.Tx) { transformSubnetTx := transformSubnetTxIntf.Unsigned.(*txs.TransformSubnetTx) if d.transformedSubnets == nil { - d.transformedSubnets = make(map[ids.ID]*txs.Tx) + d.transformedSubnets = map[ids.ID]*txs.Tx{ + transformSubnetTx.Subnet: transformSubnetTxIntf, + } + } else { + d.transformedSubnets[transformSubnetTx.Subnet] = transformSubnetTxIntf } - d.transformedSubnets[transformSubnetTx.Subnet] = transformSubnetTxIntf } func (d *diff) GetChains(subnetID ids.ID) ([]*txs.Tx, error) { diff --git a/vms/platformvm/state/masked_iterator_test.go b/vms/platformvm/state/masked_iterator_test.go index a3c43818d3b4..8ba719d3e732 100644 --- a/vms/platformvm/state/masked_iterator_test.go +++ b/vms/platformvm/state/masked_iterator_test.go @@ -17,23 +17,19 @@ func TestMaskedIterator(t *testing.T) { stakers := []*Staker{ { TxID: ids.GenerateTestID(), - Weight: 0, // just to simplify debugging NextTime: time.Unix(0, 0), }, { TxID: ids.GenerateTestID(), - Weight: 10, // just to simplify debugging - NextTime: time.Unix(10, 0), + NextTime: time.Unix(1, 0), }, { TxID: ids.GenerateTestID(), - Weight: 20, // just to simplify debugging - NextTime: time.Unix(20, 0), + NextTime: time.Unix(2, 0), }, { TxID: ids.GenerateTestID(), - Weight: 30, // just to simplify debugging - NextTime: time.Unix(30, 0), + NextTime: time.Unix(3, 0), }, } maskedStakers := map[ids.ID]*Staker{ diff --git a/vms/platformvm/state/staker_test.go b/vms/platformvm/state/staker_test.go index a6faa4ab704f..747f442e5eda 100644 --- a/vms/platformvm/state/staker_test.go +++ b/vms/platformvm/state/staker_test.go @@ -144,7 +144,7 @@ func TestNewCurrentStaker(t *testing.T) { subnetID := ids.GenerateTestID() weight := uint64(12345) startTime := time.Now() - endTime := startTime.Add(time.Hour) + endTime := time.Now() potentialReward := uint64(54321) currentPriority := txs.SubnetPermissionedValidatorCurrentPriority diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index d08623045929..c1cc51bc5413 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -10,6 +10,7 @@ import ( "fmt" "time" + "github.com/google/btree" "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" @@ -25,12 +26,14 @@ import ( "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/trace" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/utils/wrappers" @@ -38,6 +41,7 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/block" "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/fx" + "github.com/ava-labs/avalanchego/vms/platformvm/genesis" "github.com/ava-labs/avalanchego/vms/platformvm/metrics" "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/status" @@ -192,8 +196,173 @@ type State interface { Close() error } +// TODO: Remove after v1.11.x is activated +type stateBlk struct { + Blk block.Block + Bytes []byte `serialize:"true"` + Status choices.Status `serialize:"true"` +} + +// Stores global state in a merkle trie. This means that each state corresponds +// to a unique merkle root. Specifically, the following state is merkleized. +// - Delegatee Rewards +// - UTXOs +// - Current Supply +// - Subnet Creation Transactions +// - Subnet Owners +// - Subnet Transformation Transactions +// - Chain Creation Transactions +// - Chain time +// - Last Accepted Block ID +// - Current Staker Set +// - Pending Staker Set +// +// Changing any of the above state will cause the merkle root to change. +// +// The following state is not merkleized: +// - Database Initialization Status +// - Blocks +// - Block IDs +// - Transactions (note some transactions are also stored merkleized) +// - Uptimes +// - Weight Diffs +// - BLS Key Diffs +// - Reward UTXOs +type state struct { + validators validators.Manager + ctx *snow.Context + metrics metrics.Metrics + rewards reward.Calculator + + baseDB *versiondb.Database + singletonDB database.Database + baseMerkleDB database.Database + merkleDB merkledb.MerkleDB // Stores merkleized state + + // stakers section (missing Delegatee piece) + // TODO: Consider moving delegatee to UTXOs section + currentStakers *baseStakers + pendingStakers *baseStakers + + delegateeRewardCache map[ids.NodeID]map[ids.ID]uint64 // (nodeID, subnetID) --> delegatee amount + modifiedDelegateeReward map[ids.NodeID]set.Set[ids.ID] // tracks (nodeID, subnetID) pairs updated after last commit + + // UTXOs section + modifiedUTXOs map[ids.ID]*avax.UTXO // map of UTXO ID -> *UTXO + utxoCache cache.Cacher[ids.ID, *avax.UTXO] // UTXO ID -> *UTXO. If the *UTXO is nil the UTXO doesn't exist + + // Metadata section + chainTime, latestComittedChainTime time.Time + lastAcceptedBlkID, latestCommittedLastAcceptedBlkID ids.ID + lastAcceptedHeight uint64 // TODO: Should this be written to state?? + modifiedSupplies map[ids.ID]uint64 // map of subnetID -> current supply + suppliesCache cache.Cacher[ids.ID, *uint64] // cache of subnetID -> current supply if the entry is nil, it is not in the database + + // Subnets section + // Subnet ID --> Owner of the subnet + subnetOwners map[ids.ID]fx.Owner + subnetOwnerCache cache.Cacher[ids.ID, fxOwnerAndSize] // cache of subnetID -> owner if the entry is nil, it is not in the database + + addedPermissionedSubnets []*txs.Tx // added SubnetTxs, waiting to be committed + permissionedSubnetCache []*txs.Tx // nil if the subnets haven't been loaded + addedElasticSubnets map[ids.ID]*txs.Tx // map of subnetID -> transformSubnetTx + elasticSubnetCache cache.Cacher[ids.ID, *txs.Tx] // cache of subnetID -> transformSubnetTx if the entry is nil, it is not in the database + + // Chains section + addedChains map[ids.ID][]*txs.Tx // maps subnetID -> the newly added chains to the subnet + chainCache cache.Cacher[ids.ID, []*txs.Tx] // cache of subnetID -> the chains after all local modifications []*txs.Tx + + // Blocks section + // Note: addedBlocks is a list because multiple blocks can be committed at one (proposal + accepted option) + addedBlocks map[ids.ID]block.Block // map of blockID -> Block. + blockCache cache.Cacher[ids.ID, block.Block] // cache of blockID -> Block. If the entry is nil, it is not in the database + blockDB database.Database + + addedBlockIDs map[uint64]ids.ID // map of height -> blockID + blockIDCache cache.Cacher[uint64, ids.ID] // cache of height -> blockID. If the entry is ids.Empty, it is not in the database + blockIDDB database.Database + + // Txs section + // FIND a way to reduce use of these. No use in verification of addedTxs + // a limited windows to support APIs + addedTxs map[ids.ID]*txAndStatus // map of txID -> {*txs.Tx, Status} + txCache cache.Cacher[ids.ID, *txAndStatus] // txID -> {*txs.Tx, Status}. If the entry is nil, it isn't in the database + txDB database.Database + + indexedUTXOsDB database.Database + + localUptimesCache map[ids.NodeID]map[ids.ID]*uptimes // vdrID -> subnetID -> metadata + modifiedLocalUptimes map[ids.NodeID]set.Set[ids.ID] // vdrID -> subnetIDs + localUptimesDB database.Database + + flatValidatorWeightDiffsDB database.Database + flatValidatorPublicKeyDiffsDB database.Database + + // Reward UTXOs section + addedRewardUTXOs map[ids.ID][]*avax.UTXO // map of txID -> []*UTXO + rewardUTXOsCache cache.Cacher[ids.ID, []*avax.UTXO] // txID -> []*UTXO + rewardUTXOsDB database.Database +} + +type ValidatorWeightDiff struct { + Decrease bool `serialize:"true"` + Amount uint64 `serialize:"true"` +} + +func (v *ValidatorWeightDiff) Add(negative bool, amount uint64) error { + if v.Decrease == negative { + var err error + v.Amount, err = safemath.Add64(v.Amount, amount) + return err + } + + if v.Amount > amount { + v.Amount -= amount + } else { + v.Amount = safemath.AbsDiff(v.Amount, amount) + v.Decrease = negative + } + return nil +} + +type txBytesAndStatus struct { + Tx []byte `serialize:"true"` + Status status.Status `serialize:"true"` +} + +type txAndStatus struct { + tx *txs.Tx + status status.Status +} + +type fxOwnerAndSize struct { + owner fx.Owner + size int +} + +func txSize(_ ids.ID, tx *txs.Tx) int { + if tx == nil { + return ids.IDLen + constants.PointerOverhead + } + return ids.IDLen + len(tx.Bytes()) + constants.PointerOverhead +} + +func txAndStatusSize(_ ids.ID, t *txAndStatus) int { + if t == nil { + return ids.IDLen + constants.PointerOverhead + } + return ids.IDLen + len(t.tx.Bytes()) + wrappers.IntLen + 2*constants.PointerOverhead +} + +func blockSize(_ ids.ID, blk block.Block) int { + if blk == nil { + return ids.IDLen + constants.PointerOverhead + } + return ids.IDLen + len(blk.Bytes()) + constants.PointerOverhead +} + func New( - rawDB database.Database, + db database.Database, genesisBytes []byte, metricsReg prometheus.Registerer, validators validators.Manager, @@ -202,8 +371,8 @@ func New( metrics metrics.Metrics, rewards reward.Calculator, ) (State, error) { - res, err := newState( - rawDB, + s, err := newState( + db, metrics, validators, execCfg, @@ -215,17 +384,17 @@ func New( return nil, err } - if err := res.sync(genesisBytes); err != nil { + if err := s.sync(genesisBytes); err != nil { // Drop any errors on close to return the first error - _ = res.Close() + _ = s.Close() return nil, err } - return res, nil + return s, nil } func newState( - rawDB database.Database, + db database.Database, metrics metrics.Metrics, validators validators.Manager, execCfg *config.ExecutionConfig, @@ -234,7 +403,7 @@ func newState( rewards reward.Calculator, ) (*state, error) { var ( - baseDB = versiondb.New(rawDB) + baseDB = versiondb.New(db) baseMerkleDB = prefixdb.New(merkleStatePrefix, baseDB) singletonDB = prefixdb.New(merkleSingletonPrefix, baseDB) blockDB = prefixdb.New(merkleBlockPrefix, baseDB) @@ -264,19 +433,19 @@ func newState( return nil, fmt.Errorf("failed creating merkleDB: %w", err) } - rewardUTXOsCache, err := metercacher.New[ids.ID, []*avax.UTXO]( - "reward_utxos_cache", + txCache, err := metercacher.New( + "tx_cache", metricsReg, - &cache.LRU[ids.ID, []*avax.UTXO]{Size: execCfg.RewardUTXOsCacheSize}, + cache.NewSizedLRU[ids.ID, *txAndStatus](execCfg.TxCacheSize, txAndStatusSize), ) if err != nil { return nil, err } - suppliesCache, err := metercacher.New[ids.ID, *uint64]( - "supply_cache", + rewardUTXOsCache, err := metercacher.New[ids.ID, []*avax.UTXO]( + "reward_utxos_cache", metricsReg, - &cache.LRU[ids.ID, *uint64]{Size: execCfg.ChainCacheSize}, + &cache.LRU[ids.ID, []*avax.UTXO]{Size: execCfg.RewardUTXOsCacheSize}, ) if err != nil { return nil, err @@ -302,6 +471,15 @@ func newState( return nil, err } + supplyCache, err := metercacher.New[ids.ID, *uint64]( + "supply_cache", + metricsReg, + &cache.LRU[ids.ID, *uint64]{Size: execCfg.ChainCacheSize}, + ) + if err != nil { + return nil, err + } + chainCache, err := metercacher.New[ids.ID, []*txs.Tx]( "chain_cache", metricsReg, @@ -329,15 +507,6 @@ func newState( return nil, err } - txCache, err := metercacher.New( - "tx_cache", - metricsReg, - cache.NewSizedLRU[ids.ID, *txAndStatus](execCfg.TxCacheSize, txAndStatusSize), - ) - if err != nil { - return nil, err - } - return &state{ validators: validators, ctx: ctx, @@ -359,7 +528,7 @@ func newState( utxoCache: &cache.LRU[ids.ID, *avax.UTXO]{Size: utxoCacheSize}, modifiedSupplies: make(map[ids.ID]uint64), - suppliesCache: suppliesCache, + suppliesCache: supplyCache, subnetOwners: make(map[ids.ID]fx.Owner), subnetOwnerCache: subnetOwnerCache, @@ -399,135 +568,33 @@ func newState( }, nil } -// Stores global state in a merkle trie. This means that each state corresponds -// to a unique merkle root. Specifically, the following state is merkleized. -// - Delegatee Rewards -// - UTXOs -// - Current Supply -// - Subnet Creation Transactions -// - Subnet Owners -// - Subnet Transformation Transactions -// - Chain Creation Transactions -// - Chain time -// - Last Accepted Block ID -// - Current Staker Set -// - Pending Staker Set -// -// Changing any of the above state will cause the merkle root to change. -// -// The following state is not merkleized: -// - Database Initialization Status -// - Blocks -// - Block IDs -// - Transactions (note some transactions are also stored merkleized) -// - Uptimes -// - Weight Diffs -// - BLS Key Diffs -// - Reward UTXOs -type state struct { - validators validators.Manager - ctx *snow.Context - metrics metrics.Metrics - rewards reward.Calculator +func (s *state) GetCurrentValidator(subnetID ids.ID, nodeID ids.NodeID) (*Staker, error) { + return s.currentStakers.GetValidator(subnetID, nodeID) +} - baseDB *versiondb.Database - singletonDB database.Database - baseMerkleDB database.Database - merkleDB merkledb.MerkleDB // Stores merkleized state +func (s *state) PutCurrentValidator(staker *Staker) { + s.currentStakers.PutValidator(staker) - // stakers section (missing Delegatee piece) - // TODO: Consider moving delegatee to UTXOs section - currentStakers *baseStakers - pendingStakers *baseStakers + // make sure that each new validator has an uptime entry + // and a delegatee reward entry. MerkleState implementations + // of SetUptime and SetDelegateeReward must not err + err := s.SetUptime(staker.NodeID, staker.SubnetID, 0 /*duration*/, staker.StartTime) + if err != nil { + panic(err) + } + err = s.SetDelegateeReward(staker.SubnetID, staker.NodeID, 0) + if err != nil { + panic(err) + } +} - delegateeRewardCache map[ids.NodeID]map[ids.ID]uint64 // (nodeID, subnetID) --> delegatee amount - modifiedDelegateeReward map[ids.NodeID]set.Set[ids.ID] // tracks (nodeID, subnetID) pairs updated after last commit +func (s *state) DeleteCurrentValidator(staker *Staker) { + s.currentStakers.DeleteValidator(staker) +} - // UTXOs section - modifiedUTXOs map[ids.ID]*avax.UTXO // map of UTXO ID -> *UTXO - utxoCache cache.Cacher[ids.ID, *avax.UTXO] // UTXO ID -> *UTXO. If the *UTXO is nil the UTXO doesn't exist - - // Metadata section - chainTime, latestComittedChainTime time.Time - lastAcceptedBlkID, latestCommittedLastAcceptedBlkID ids.ID - lastAcceptedHeight uint64 // TODO: Should this be written to state?? - modifiedSupplies map[ids.ID]uint64 // map of subnetID -> current supply - suppliesCache cache.Cacher[ids.ID, *uint64] // cache of subnetID -> current supply if the entry is nil, it is not in the database - - // Subnets section - // Subnet ID --> Owner of the subnet - subnetOwners map[ids.ID]fx.Owner - subnetOwnerCache cache.Cacher[ids.ID, fxOwnerAndSize] // cache of subnetID -> owner if the entry is nil, it is not in the database - - addedPermissionedSubnets []*txs.Tx // added SubnetTxs, waiting to be committed - permissionedSubnetCache []*txs.Tx // nil if the subnets haven't been loaded - addedElasticSubnets map[ids.ID]*txs.Tx // map of subnetID -> transformSubnetTx - elasticSubnetCache cache.Cacher[ids.ID, *txs.Tx] // cache of subnetID -> transformSubnetTx if the entry is nil, it is not in the database - - // Chains section - addedChains map[ids.ID][]*txs.Tx // maps subnetID -> the newly added chains to the subnet - chainCache cache.Cacher[ids.ID, []*txs.Tx] // cache of subnetID -> the chains after all local modifications []*txs.Tx - - // Blocks section - // Note: addedBlocks is a list because multiple blocks can be committed at one (proposal + accepted option) - addedBlocks map[ids.ID]block.Block // map of blockID -> Block. - blockCache cache.Cacher[ids.ID, block.Block] // cache of blockID -> Block. If the entry is nil, it is not in the database - blockDB database.Database - - addedBlockIDs map[uint64]ids.ID // map of height -> blockID - blockIDCache cache.Cacher[uint64, ids.ID] // cache of height -> blockID. If the entry is ids.Empty, it is not in the database - blockIDDB database.Database - - // Txs section - // FIND a way to reduce use of these. No use in verification of addedTxs - // a limited windows to support APIs - addedTxs map[ids.ID]*txAndStatus // map of txID -> {*txs.Tx, Status} - txCache cache.Cacher[ids.ID, *txAndStatus] // txID -> {*txs.Tx, Status}. If the entry is nil, it isn't in the database - txDB database.Database - - indexedUTXOsDB database.Database - - localUptimesCache map[ids.NodeID]map[ids.ID]*uptimes // vdrID -> subnetID -> metadata - modifiedLocalUptimes map[ids.NodeID]set.Set[ids.ID] // vdrID -> subnetIDs - localUptimesDB database.Database - - flatValidatorWeightDiffsDB database.Database - flatValidatorPublicKeyDiffsDB database.Database - - // Reward UTXOs section - addedRewardUTXOs map[ids.ID][]*avax.UTXO // map of txID -> []*UTXO - rewardUTXOsCache cache.Cacher[ids.ID, []*avax.UTXO] // txID -> []*UTXO - rewardUTXOsDB database.Database -} - -// STAKERS section -func (s *state) GetCurrentValidator(subnetID ids.ID, nodeID ids.NodeID) (*Staker, error) { - return s.currentStakers.GetValidator(subnetID, nodeID) -} - -func (s *state) PutCurrentValidator(staker *Staker) { - s.currentStakers.PutValidator(staker) - - // make sure that each new validator has an uptime entry - // and a delegatee reward entry. MerkleState implementations - // of SetUptime and SetDelegateeReward must not err - err := s.SetUptime(staker.NodeID, staker.SubnetID, 0 /*duration*/, staker.StartTime) - if err != nil { - panic(err) - } - err = s.SetDelegateeReward(staker.SubnetID, staker.NodeID, 0) - if err != nil { - panic(err) - } -} - -func (s *state) DeleteCurrentValidator(staker *Staker) { - s.currentStakers.DeleteValidator(staker) -} - -func (s *state) GetCurrentDelegatorIterator(subnetID ids.ID, nodeID ids.NodeID) (StakerIterator, error) { - return s.currentStakers.GetDelegatorIterator(subnetID, nodeID), nil -} +func (s *state) GetCurrentDelegatorIterator(subnetID ids.ID, nodeID ids.NodeID) (StakerIterator, error) { + return s.currentStakers.GetDelegatorIterator(subnetID, nodeID), nil +} func (s *state) PutCurrentDelegator(staker *Staker) { s.currentStakers.PutDelegator(staker) @@ -569,181 +636,13 @@ func (s *state) GetPendingStakerIterator() (StakerIterator, error) { return s.pendingStakers.GetStakerIterator(), nil } -func (s *state) GetDelegateeReward(subnetID ids.ID, vdrID ids.NodeID) (uint64, error) { - nodeDelegateeRewards, exists := s.delegateeRewardCache[vdrID] - if exists { - delegateeReward, exists := nodeDelegateeRewards[subnetID] - if exists { - return delegateeReward, nil - } - } - - // try loading from the db - key := merkleDelegateeRewardsKey(vdrID, subnetID) - amountBytes, err := s.merkleDB.Get(key) - if err != nil { - return 0, err - } - delegateeReward, err := database.ParseUInt64(amountBytes) - if err != nil { - return 0, err - } - - if _, found := s.delegateeRewardCache[vdrID]; !found { - s.delegateeRewardCache[vdrID] = make(map[ids.ID]uint64) - } - s.delegateeRewardCache[vdrID][subnetID] = delegateeReward - return delegateeReward, nil -} - -func (s *state) SetDelegateeReward(subnetID ids.ID, vdrID ids.NodeID, amount uint64) error { - nodeDelegateeRewards, exists := s.delegateeRewardCache[vdrID] - if !exists { - nodeDelegateeRewards = make(map[ids.ID]uint64) - s.delegateeRewardCache[vdrID] = nodeDelegateeRewards - } - nodeDelegateeRewards[subnetID] = amount - - // track diff - updatedDelegateeRewards, ok := s.modifiedDelegateeReward[vdrID] - if !ok { - updatedDelegateeRewards = set.Set[ids.ID]{} - s.modifiedDelegateeReward[vdrID] = updatedDelegateeRewards - } - updatedDelegateeRewards.Add(subnetID) - return nil -} - -// UTXOs section -func (s *state) GetUTXO(utxoID ids.ID) (*avax.UTXO, error) { - if utxo, exists := s.modifiedUTXOs[utxoID]; exists { - if utxo == nil { - return nil, database.ErrNotFound - } - return utxo, nil - } - if utxo, found := s.utxoCache.Get(utxoID); found { - if utxo == nil { - return nil, database.ErrNotFound - } - return utxo, nil - } - - key := merkleUtxoIDKey(utxoID) - - switch bytes, err := s.merkleDB.Get(key); err { - case nil: - utxo := &avax.UTXO{} - if _, err := txs.GenesisCodec.Unmarshal(bytes, utxo); err != nil { - return nil, err - } - s.utxoCache.Put(utxoID, utxo) - return utxo, nil - - case database.ErrNotFound: - s.utxoCache.Put(utxoID, nil) - return nil, database.ErrNotFound - - default: - return nil, err - } -} - -func (s *state) UTXOIDs(addr []byte, start ids.ID, limit int) ([]ids.ID, error) { - var ( - prefix = slices.Clone(addr) - key = merkleUtxoIndexKey(addr, start) - ) - - iter := s.indexedUTXOsDB.NewIteratorWithStartAndPrefix(key, prefix) - defer iter.Release() - - utxoIDs := []ids.ID(nil) - for len(utxoIDs) < limit && iter.Next() { - itAddr, utxoID := splitUtxoIndexKey(iter.Key()) - if !bytes.Equal(itAddr, addr) { - break - } - if utxoID == start { - continue - } - - start = ids.Empty - utxoIDs = append(utxoIDs, utxoID) - } - return utxoIDs, iter.Error() -} - -func (s *state) AddUTXO(utxo *avax.UTXO) { - s.modifiedUTXOs[utxo.InputID()] = utxo -} - -func (s *state) DeleteUTXO(utxoID ids.ID) { - s.modifiedUTXOs[utxoID] = nil -} - -// METADATA Section -func (s *state) GetTimestamp() time.Time { - return s.chainTime -} - -func (s *state) SetTimestamp(tm time.Time) { - s.chainTime = tm -} - -func (s *state) GetLastAccepted() ids.ID { - return s.lastAcceptedBlkID -} - -func (s *state) SetLastAccepted(lastAccepted ids.ID) { - s.lastAcceptedBlkID = lastAccepted -} - -func (s *state) SetHeight(height uint64) { - s.lastAcceptedHeight = height -} - -func (s *state) GetCurrentSupply(subnetID ids.ID) (uint64, error) { - supply, ok := s.modifiedSupplies[subnetID] - if ok { - return supply, nil - } - cachedSupply, ok := s.suppliesCache.Get(subnetID) - if ok { - if cachedSupply == nil { - return 0, database.ErrNotFound - } - return *cachedSupply, nil - } - - key := merkleSuppliesKey(subnetID) - - switch supplyBytes, err := s.merkleDB.Get(key); err { - case nil: - supply, err := database.ParseUInt64(supplyBytes) - if err != nil { - return 0, fmt.Errorf("failed parsing supply: %w", err) - } - s.suppliesCache.Put(subnetID, &supply) - return supply, nil - - case database.ErrNotFound: - s.suppliesCache.Put(subnetID, nil) - return 0, database.ErrNotFound - - default: - return 0, err - } -} - -func (s *state) SetCurrentSupply(subnetID ids.ID, cs uint64) { - s.modifiedSupplies[subnetID] = cs +func (s *state) shouldInit() (bool, error) { + has, err := s.singletonDB.Has(initializedKey) + return !has, err } -// SUBNETS Section -type fxOwnerAndSize struct { - owner fx.Owner - size int +func (s *state) doneInit() error { + return s.singletonDB.Put(initializedKey, nil) } func (s *state) GetSubnets() ([]*txs.Tx, error) { @@ -753,10 +652,10 @@ func (s *state) GetSubnets() ([]*txs.Tx, error) { return s.permissionedSubnetCache, nil } - subnets := make([]*txs.Tx, 0) subnetDBIt := s.merkleDB.NewIteratorWithPrefix(permissionedSubnetSectionPrefix) defer subnetDBIt.Release() + subnets := make([]*txs.Tx, 0) for subnetDBIt.Next() { subnetTxBytes := subnetDBIt.Value() subnetTx, err := txs.Parse(txs.GenesisCodec, subnetTxBytes) @@ -864,17 +763,17 @@ func (s *state) AddSubnetTransformation(transformSubnetTxIntf *txs.Tx) { s.addedElasticSubnets[transformSubnetTx.Subnet] = transformSubnetTxIntf } -// CHAINS Section func (s *state) GetChains(subnetID ids.ID) ([]*txs.Tx, error) { if chains, cached := s.chainCache.Get(subnetID); cached { return chains, nil } - chains := make([]*txs.Tx, 0) prefix := merkleChainPrefix(subnetID) - chainDBIt := s.merkleDB.NewIteratorWithPrefix(prefix) defer chainDBIt.Release() + + chains := make([]*txs.Tx, 0) + for chainDBIt.Next() { chainTxBytes := chainDBIt.Value() chainTx, err := txs.Parse(txs.GenesisCodec, chainTxBytes) @@ -898,31 +797,6 @@ func (s *state) AddChain(createChainTxIntf *txs.Tx) { s.addedChains[subnetID] = append(s.addedChains[subnetID], createChainTxIntf) } -// TXs Section -type txBytesAndStatus struct { - Tx []byte `serialize:"true"` - Status status.Status `serialize:"true"` -} - -type txAndStatus struct { - tx *txs.Tx - status status.Status -} - -func txSize(_ ids.ID, tx *txs.Tx) int { - if tx == nil { - return ids.IDLen + constants.PointerOverhead - } - return ids.IDLen + len(tx.Bytes()) + constants.PointerOverhead -} - -func txAndStatusSize(_ ids.ID, t *txAndStatus) int { - if t == nil { - return ids.IDLen + constants.PointerOverhead - } - return ids.IDLen + len(t.tx.Bytes()) + wrappers.IntLen + 2*constants.PointerOverhead -} - func (s *state) GetTx(txID ids.ID) (*txs.Tx, status.Status, error) { if tx, exists := s.addedTxs[txID]; exists { return tx.tx, tx.status, nil @@ -971,41 +845,66 @@ func (s *state) AddTx(tx *txs.Tx, status status.Status) { } } -// BLOCKs Section -func blockSize(_ ids.ID, blk block.Block) int { - if blk == nil { - return ids.IDLen + constants.PointerOverhead +func (s *state) GetRewardUTXOs(txID ids.ID) ([]*avax.UTXO, error) { + if utxos, exists := s.addedRewardUTXOs[txID]; exists { + return utxos, nil + } + if utxos, exists := s.rewardUTXOsCache.Get(txID); exists { + return utxos, nil } - return ids.IDLen + len(blk.Bytes()) + constants.PointerOverhead -} -func (s *state) GetStatelessBlock(blockID ids.ID) (block.Block, error) { - if blk, exists := s.addedBlocks[blockID]; exists { - return blk, nil + rawTxDB := prefixdb.New(txID[:], s.rewardUTXOsDB) + txDB := linkeddb.NewDefault(rawTxDB) + it := txDB.NewIterator() + defer it.Release() + + utxos := []*avax.UTXO(nil) + for it.Next() { + utxo := &avax.UTXO{} + if _, err := txs.Codec.Unmarshal(it.Value(), utxo); err != nil { + return nil, err + } + utxos = append(utxos, utxo) + } + if err := it.Error(); err != nil { + return nil, err } - if blk, cached := s.blockCache.Get(blockID); cached { - if blk == nil { + s.rewardUTXOsCache.Put(txID, utxos) + return utxos, nil +} + +func (s *state) AddRewardUTXO(txID ids.ID, utxo *avax.UTXO) { + s.addedRewardUTXOs[txID] = append(s.addedRewardUTXOs[txID], utxo) +} + +func (s *state) GetUTXO(utxoID ids.ID) (*avax.UTXO, error) { + if utxo, exists := s.modifiedUTXOs[utxoID]; exists { + if utxo == nil { return nil, database.ErrNotFound } - - return blk, nil + return utxo, nil + } + if utxo, found := s.utxoCache.Get(utxoID); found { + if utxo == nil { + return nil, database.ErrNotFound + } + return utxo, nil } - blkBytes, err := s.blockDB.Get(blockID[:]) - switch err { + key := merkleUtxoIDKey(utxoID) + + switch bytes, err := s.merkleDB.Get(key); err { case nil: - // Note: stored blocks are verified, so it's safe to unmarshal them with GenesisCodec - blk, err := block.Parse(block.GenesisCodec, blkBytes) - if err != nil { + utxo := &avax.UTXO{} + if _, err := txs.GenesisCodec.Unmarshal(bytes, utxo); err != nil { return nil, err } - - s.blockCache.Put(blockID, blk) - return blk, nil + s.utxoCache.Put(utxoID, utxo) + return utxo, nil case database.ErrNotFound: - s.blockCache.Put(blockID, nil) + s.utxoCache.Put(utxoID, nil) return nil, database.ErrNotFound default: @@ -1013,90 +912,37 @@ func (s *state) GetStatelessBlock(blockID ids.ID) (block.Block, error) { } } -func (s *state) AddStatelessBlock(block block.Block) { - s.addedBlocks[block.ID()] = block -} - -func (s *state) GetBlockIDAtHeight(height uint64) (ids.ID, error) { - if blkID, exists := s.addedBlockIDs[height]; exists { - return blkID, nil - } - if blkID, cached := s.blockIDCache.Get(height); cached { - if blkID == ids.Empty { - return ids.Empty, database.ErrNotFound - } - - return blkID, nil - } - - heightKey := database.PackUInt64(height) - - blkID, err := database.GetID(s.blockIDDB, heightKey) - if err == database.ErrNotFound { - s.blockIDCache.Put(height, ids.Empty) - return ids.Empty, database.ErrNotFound - } - if err != nil { - return ids.Empty, err - } +func (s *state) UTXOIDs(addr []byte, start ids.ID, limit int) ([]ids.ID, error) { + var ( + prefix = slices.Clone(addr) + key = merkleUtxoIndexKey(addr, start) + ) - s.blockIDCache.Put(height, blkID) - return blkID, nil -} + iter := s.indexedUTXOsDB.NewIteratorWithStartAndPrefix(key, prefix) + defer iter.Release() -// UPTIMES SECTION -func (s *state) GetUptime(vdrID ids.NodeID, subnetID ids.ID) (upDuration time.Duration, lastUpdated time.Time, err error) { - nodeUptimes, exists := s.localUptimesCache[vdrID] - if exists { - uptime, exists := nodeUptimes[subnetID] - if exists { - return uptime.Duration, uptime.lastUpdated, nil + utxoIDs := []ids.ID(nil) + for len(utxoIDs) < limit && iter.Next() { + itAddr, utxoID := splitUtxoIndexKey(iter.Key()) + if !bytes.Equal(itAddr, addr) { + break } - } - - // try loading from DB - key := merkleLocalUptimesKey(vdrID, subnetID) - uptimeBytes, err := s.localUptimesDB.Get(key) - switch err { - case nil: - upTm := &uptimes{} - if _, err := txs.GenesisCodec.Unmarshal(uptimeBytes, upTm); err != nil { - return 0, time.Time{}, err + if utxoID == start { + continue } - upTm.lastUpdated = time.Unix(int64(upTm.LastUpdated), 0) - s.localUptimesCache[vdrID] = make(map[ids.ID]*uptimes) - s.localUptimesCache[vdrID][subnetID] = upTm - return upTm.Duration, upTm.lastUpdated, nil - case database.ErrNotFound: - // no local data for this staker uptime - return 0, time.Time{}, database.ErrNotFound - default: - return 0, time.Time{}, err + start = ids.Empty + utxoIDs = append(utxoIDs, utxoID) } + return utxoIDs, iter.Error() } -func (s *state) SetUptime(vdrID ids.NodeID, subnetID ids.ID, upDuration time.Duration, lastUpdated time.Time) error { - nodeUptimes, exists := s.localUptimesCache[vdrID] - if !exists { - nodeUptimes = make(map[ids.ID]*uptimes) - s.localUptimesCache[vdrID] = nodeUptimes - } - - nodeUptimes[subnetID] = &uptimes{ - Duration: upDuration, - LastUpdated: uint64(lastUpdated.Unix()), - lastUpdated: lastUpdated, - } +func (s *state) AddUTXO(utxo *avax.UTXO) { + s.modifiedUTXOs[utxo.InputID()] = utxo +} - // track diff - updatedNodeUptimes, ok := s.modifiedLocalUptimes[vdrID] - if !ok { - updatedNodeUptimes = set.Set[ids.ID]{} - s.modifiedLocalUptimes[vdrID] = updatedNodeUptimes - } - updatedNodeUptimes.Add(subnetID) - return nil +func (s *state) DeleteUTXO(utxoID ids.ID) { + s.modifiedUTXOs[utxoID] = nil } func (s *state) GetStartTime(nodeID ids.NodeID, subnetID ids.ID) (time.Time, error) { @@ -1107,97 +953,57 @@ func (s *state) GetStartTime(nodeID ids.NodeID, subnetID ids.ID) (time.Time, err return staker.StartTime, nil } -// REWARD UTXOs SECTION -func (s *state) GetRewardUTXOs(txID ids.ID) ([]*avax.UTXO, error) { - if utxos, exists := s.addedRewardUTXOs[txID]; exists { - return utxos, nil - } - if utxos, exists := s.rewardUTXOsCache.Get(txID); exists { - return utxos, nil - } - - rawTxDB := prefixdb.New(txID[:], s.rewardUTXOsDB) - txDB := linkeddb.NewDefault(rawTxDB) - it := txDB.NewIterator() - defer it.Release() - - utxos := []*avax.UTXO(nil) - for it.Next() { - utxo := &avax.UTXO{} - if _, err := txs.Codec.Unmarshal(it.Value(), utxo); err != nil { - return nil, err - } - utxos = append(utxos, utxo) - } - if err := it.Error(); err != nil { - return nil, err - } +func (s *state) GetTimestamp() time.Time { + return s.chainTime +} - s.rewardUTXOsCache.Put(txID, utxos) - return utxos, nil +func (s *state) SetTimestamp(tm time.Time) { + s.chainTime = tm } -func (s *state) AddRewardUTXO(txID ids.ID, utxo *avax.UTXO) { - s.addedRewardUTXOs[txID] = append(s.addedRewardUTXOs[txID], utxo) +func (s *state) GetLastAccepted() ids.ID { + return s.lastAcceptedBlkID } -// VALIDATORS Section -type ValidatorWeightDiff struct { - Decrease bool `serialize:"true"` - Amount uint64 `serialize:"true"` +func (s *state) SetLastAccepted(lastAccepted ids.ID) { + s.lastAcceptedBlkID = lastAccepted } -func (v *ValidatorWeightDiff) Add(negative bool, amount uint64) error { - if v.Decrease == negative { - var err error - v.Amount, err = safemath.Add64(v.Amount, amount) - return err +func (s *state) GetCurrentSupply(subnetID ids.ID) (uint64, error) { + supply, ok := s.modifiedSupplies[subnetID] + if ok { + return supply, nil } - - if v.Amount > amount { - v.Amount -= amount - } else { - v.Amount = safemath.AbsDiff(v.Amount, amount) - v.Decrease = negative + cachedSupply, ok := s.suppliesCache.Get(subnetID) + if ok { + if cachedSupply == nil { + return 0, database.ErrNotFound + } + return *cachedSupply, nil } - return nil -} -func applyWeightDiff( - vdrs map[ids.NodeID]*validators.GetValidatorOutput, - nodeID ids.NodeID, - weightDiff *ValidatorWeightDiff, -) error { - vdr, ok := vdrs[nodeID] - if !ok { - // This node isn't in the current validator set. - vdr = &validators.GetValidatorOutput{ - NodeID: nodeID, + key := merkleSuppliesKey(subnetID) + + switch supplyBytes, err := s.merkleDB.Get(key); err { + case nil: + supply, err := database.ParseUInt64(supplyBytes) + if err != nil { + return 0, fmt.Errorf("failed parsing supply: %w", err) } - vdrs[nodeID] = vdr - } + s.suppliesCache.Put(subnetID, &supply) + return supply, nil - // The weight of this node changed at this block. - var err error - if weightDiff.Decrease { - // The validator's weight was decreased at this block, so in the - // prior block it was higher. - vdr.Weight, err = safemath.Add64(vdr.Weight, weightDiff.Amount) - } else { - // The validator's weight was increased at this block, so in the - // prior block it was lower. - vdr.Weight, err = safemath.Sub(vdr.Weight, weightDiff.Amount) - } - if err != nil { - return err - } + case database.ErrNotFound: + s.suppliesCache.Put(subnetID, nil) + return 0, database.ErrNotFound - if vdr.Weight == 0 { - // The validator's weight was 0 before this block so they weren't in the - // validator set. - delete(vdrs, nodeID) + default: + return 0, err } - return nil +} + +func (s *state) SetCurrentSupply(subnetID ids.ID, cs uint64) { + s.modifiedSupplies[subnetID] = cs } func (s *state) ApplyValidatorWeightDiffs( @@ -1241,6 +1047,43 @@ func (s *state) ApplyValidatorWeightDiffs( return diffIter.Error() } +func applyWeightDiff( + vdrs map[ids.NodeID]*validators.GetValidatorOutput, + nodeID ids.NodeID, + weightDiff *ValidatorWeightDiff, +) error { + vdr, ok := vdrs[nodeID] + if !ok { + // This node isn't in the current validator set. + vdr = &validators.GetValidatorOutput{ + NodeID: nodeID, + } + vdrs[nodeID] = vdr + } + + // The weight of this node changed at this block. + var err error + if weightDiff.Decrease { + // The validator's weight was decreased at this block, so in the + // prior block it was higher. + vdr.Weight, err = safemath.Add64(vdr.Weight, weightDiff.Amount) + } else { + // The validator's weight was increased at this block, so in the + // prior block it was lower. + vdr.Weight, err = safemath.Sub(vdr.Weight, weightDiff.Amount) + } + if err != nil { + return err + } + + if vdr.Weight == 0 { + // The validator's weight was 0 before this block so they weren't in the + // validator set. + delete(vdrs, nodeID) + } + return nil +} + func (s *state) ApplyValidatorPublicKeyDiffs( ctx context.Context, validators map[ids.NodeID]*validators.GetValidatorOutput, @@ -1279,74 +1122,528 @@ func (s *state) ApplyValidatorPublicKeyDiffs( continue } - vdr.PublicKey = new(bls.PublicKey).Deserialize(pkBytes) + vdr.PublicKey = new(bls.PublicKey).Deserialize(pkBytes) + } + return diffIter.Error() +} + +// Loads the state from [genesisBls] and [genesis] into [ms]. +func (s *state) syncGenesis(genesisBlk block.Block, genesis *genesis.Genesis) error { + genesisBlkID := genesisBlk.ID() + s.SetLastAccepted(genesisBlkID) + s.SetTimestamp(time.Unix(int64(genesis.Timestamp), 0)) + s.SetCurrentSupply(constants.PrimaryNetworkID, genesis.InitialSupply) + s.AddStatelessBlock(genesisBlk) + + // Persist UTXOs that exist at genesis + for _, utxo := range genesis.UTXOs { + avaxUTXO := utxo.UTXO + s.AddUTXO(&avaxUTXO) + } + + // Persist primary network validator set at genesis + for _, vdrTx := range genesis.Validators { + validatorTx, ok := vdrTx.Unsigned.(txs.ValidatorTx) + if !ok { + return fmt.Errorf("expected tx type txs.ValidatorTx but got %T", vdrTx.Unsigned) + } + + stakeAmount := validatorTx.Weight() + stakeDuration := validatorTx.EndTime().Sub(validatorTx.StartTime()) + currentSupply, err := s.GetCurrentSupply(constants.PrimaryNetworkID) + if err != nil { + return err + } + + potentialReward := s.rewards.Calculate( + stakeDuration, + stakeAmount, + currentSupply, + ) + newCurrentSupply, err := safemath.Add64(currentSupply, potentialReward) + if err != nil { + return err + } + + staker, err := NewCurrentStaker(vdrTx.ID(), validatorTx, potentialReward) + if err != nil { + return err + } + + s.PutCurrentValidator(staker) + s.AddTx(vdrTx, status.Committed) + s.SetCurrentSupply(constants.PrimaryNetworkID, newCurrentSupply) + } + + for _, chain := range genesis.Chains { + unsignedChain, ok := chain.Unsigned.(*txs.CreateChainTx) + if !ok { + return fmt.Errorf("expected tx type *txs.CreateChainTx but got %T", chain.Unsigned) + } + + // Ensure all chains that the genesis bytes say to create have the right + // network ID + if unsignedChain.NetworkID != s.ctx.NetworkID { + return avax.ErrWrongNetworkID + } + + s.AddChain(chain) + s.AddTx(chain, status.Committed) + } + + // updateValidators is set to false here to maintain the invariant that the + // primary network's validator set is empty before the validator sets are + // initialized. + return s.write(false /*=updateValidators*/, 0) +} + +// Load pulls data previously stored on disk that is expected to be in memory. +func (s *state) load(hasSynced bool) error { + return utils.Err( + s.loadMerkleMetadata(), + s.loadCurrentStakers(), + s.loadPendingStakers(), + s.initValidatorSets(), + + s.logMerkleRoot(!hasSynced), // we already logged if sync has happened + ) +} + +// Loads the chain time and last accepted block ID from disk +// and populates them in [ms]. +func (s *state) loadMerkleMetadata() error { + // load chain time + chainTimeBytes, err := s.merkleDB.Get(merkleChainTimeKey) + if err != nil { + return err + } + var chainTime time.Time + if err := chainTime.UnmarshalBinary(chainTimeBytes); err != nil { + return err + } + s.latestComittedChainTime = chainTime + s.SetTimestamp(chainTime) + + // load last accepted block + blkIDBytes, err := s.merkleDB.Get(merkleLastAcceptedBlkIDKey) + if err != nil { + return err + } + lastAcceptedBlkID := ids.Empty + copy(lastAcceptedBlkID[:], blkIDBytes) + s.latestCommittedLastAcceptedBlkID = lastAcceptedBlkID + s.SetLastAccepted(lastAcceptedBlkID) + + // We don't need to load supplies. Unlike chain time and last block ID, + // which have the persisted* attribute, we signify that a supply hasn't + // been modified by making it nil. + return nil +} + +// Loads current stakes from disk and populates them in [ms]. +func (s *state) loadCurrentStakers() error { + // TODO ABENEGIA: Check missing metadata + s.currentStakers = newBaseStakers() + + prefix := make([]byte, len(currentStakersSectionPrefix)) + copy(prefix, currentStakersSectionPrefix) + + iter := s.merkleDB.NewIteratorWithPrefix(prefix) + defer iter.Release() + for iter.Next() { + data := &stakersData{} + if _, err := txs.GenesisCodec.Unmarshal(iter.Value(), data); err != nil { + return fmt.Errorf("failed to deserialize current stakers data: %w", err) + } + + tx, err := txs.Parse(txs.GenesisCodec, data.TxBytes) + if err != nil { + return fmt.Errorf("failed to parsing current stakerTx: %w", err) + } + stakerTx, ok := tx.Unsigned.(txs.Staker) + if !ok { + return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned) + } + + staker, err := NewCurrentStaker(tx.ID(), stakerTx, data.PotentialReward) + if err != nil { + return err + } + if staker.Priority.IsValidator() { + // TODO: why not PutValidator/PutDelegator?? + validator := s.currentStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID) + validator.validator = staker + s.currentStakers.stakers.ReplaceOrInsert(staker) + } else { + validator := s.currentStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID) + if validator.delegators == nil { + validator.delegators = btree.NewG(defaultTreeDegree, (*Staker).Less) + } + validator.delegators.ReplaceOrInsert(staker) + s.currentStakers.stakers.ReplaceOrInsert(staker) + } + } + return iter.Error() +} + +func (s *state) loadPendingStakers() error { + // TODO ABENEGIA: Check missing metadata + s.pendingStakers = newBaseStakers() + + prefix := make([]byte, len(pendingStakersSectionPrefix)) + copy(prefix, pendingStakersSectionPrefix) + + iter := s.merkleDB.NewIteratorWithPrefix(prefix) + defer iter.Release() + for iter.Next() { + data := &stakersData{} + if _, err := txs.GenesisCodec.Unmarshal(iter.Value(), data); err != nil { + return fmt.Errorf("failed to deserialize pending stakers data: %w", err) + } + + tx, err := txs.Parse(txs.GenesisCodec, data.TxBytes) + if err != nil { + return fmt.Errorf("failed to parsing pending stakerTx: %w", err) + } + stakerTx, ok := tx.Unsigned.(txs.Staker) + if !ok { + return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned) + } + + staker, err := NewPendingStaker(tx.ID(), stakerTx) + if err != nil { + return err + } + if staker.Priority.IsValidator() { + validator := s.pendingStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID) + validator.validator = staker + s.pendingStakers.stakers.ReplaceOrInsert(staker) + } else { + validator := s.pendingStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID) + if validator.delegators == nil { + validator.delegators = btree.NewG(defaultTreeDegree, (*Staker).Less) + } + validator.delegators.ReplaceOrInsert(staker) + s.pendingStakers.stakers.ReplaceOrInsert(staker) + } + } + return iter.Error() +} + +// Invariant: initValidatorSets requires loadCurrentValidators to have already +// been called. +func (s *state) initValidatorSets() error { + for subnetID, validators := range s.currentStakers.validators { + if s.validators.Count(subnetID) != 0 { + // Enforce the invariant that the validator set is empty here. + return fmt.Errorf("%w: %s", errValidatorSetAlreadyPopulated, subnetID) + } + + for nodeID, validator := range validators { + validatorStaker := validator.validator + if err := s.validators.AddStaker(subnetID, nodeID, validatorStaker.PublicKey, validatorStaker.TxID, validatorStaker.Weight); err != nil { + return err + } + + delegatorIterator := NewTreeIterator(validator.delegators) + for delegatorIterator.Next() { + delegatorStaker := delegatorIterator.Value() + if err := s.validators.AddWeight(subnetID, nodeID, delegatorStaker.Weight); err != nil { + delegatorIterator.Release() + return err + } + } + delegatorIterator.Release() + } + } + + s.metrics.SetLocalStake(s.validators.GetWeight(constants.PrimaryNetworkID, s.ctx.NodeID)) + totalWeight, err := s.validators.TotalWeight(constants.PrimaryNetworkID) + if err != nil { + return fmt.Errorf("failed to get total weight of primary network validators: %w", err) + } + s.metrics.SetTotalStake(totalWeight) + return nil +} + +func (s *state) write(updateValidators bool, height uint64) error { + currentData, weightDiffs, blsKeyDiffs, valSetDiff, err := s.processCurrentStakers() + if err != nil { + return err + } + pendingData, err := s.processPendingStakers() + if err != nil { + return err + } + + return utils.Err( + s.writeMerkleState(currentData, pendingData), + s.writeBlocks(), + s.writeTxs(), + s.writeLocalUptimes(), + s.writeWeightDiffs(height, weightDiffs), + s.writeBlsKeyDiffs(height, blsKeyDiffs), + s.writeRewardUTXOs(), + s.updateValidatorSet(updateValidators, valSetDiff, weightDiffs), + ) +} + +func (s *state) Close() error { + return utils.Err( + s.flatValidatorWeightDiffsDB.Close(), + s.flatValidatorPublicKeyDiffsDB.Close(), + s.localUptimesDB.Close(), + s.indexedUTXOsDB.Close(), + s.txDB.Close(), + s.blockDB.Close(), + s.blockIDDB.Close(), + s.merkleDB.Close(), + s.baseMerkleDB.Close(), + ) +} + +// If [ms] isn't initialized, initializes it with [genesis]. +// Then loads [ms] from disk. +func (s *state) sync(genesis []byte) error { + shouldInit, err := s.shouldInit() + if err != nil { + return fmt.Errorf( + "failed to check if the database is initialized: %w", + err, + ) + } + + // If the database is empty, create the platform chain anew using the + // provided genesis state + if shouldInit { + if err := s.init(genesis); err != nil { + return fmt.Errorf( + "failed to initialize the database: %w", + err, + ) + } + } + + return s.load(shouldInit) +} + +// Creates a genesis from [genesisBytes] and initializes [ms] with it. +func (s *state) init(genesisBytes []byte) error { + // Create the genesis block and save it as being accepted (We don't do + // genesisBlock.Accept() because then it'd look for genesisBlock's + // non-existent parent) + genesisID := hashing.ComputeHash256Array(genesisBytes) + genesisBlock, err := block.NewApricotCommitBlock(genesisID, 0 /*height*/) + if err != nil { + return err + } + + genesisState, err := genesis.Parse(genesisBytes) + if err != nil { + return err + } + if err := s.syncGenesis(genesisBlock, genesisState); err != nil { + return err + } + + if err := s.doneInit(); err != nil { + return err + } + + return s.Commit() +} + +func (s *state) AddStatelessBlock(block block.Block) { + s.addedBlocks[block.ID()] = block +} + +func (s *state) SetHeight(height uint64) { + s.lastAcceptedHeight = height +} + +func (s *state) Commit() error { + defer s.Abort() + batch, err := s.CommitBatch() + if err != nil { + return err + } + return batch.Write() +} + +func (s *state) Abort() { + s.baseDB.Abort() +} + +func (*state) Checksum() ids.ID { + return ids.Empty +} + +func (s *state) CommitBatch() (database.Batch, error) { + // updateValidators is set to true here so that the validator manager is + // kept up to date with the last accepted state. + if err := s.write(true /*updateValidators*/, s.lastAcceptedHeight); err != nil { + return nil, err + } + return s.baseDB.CommitBatch() +} + +func (s *state) writeBlocks() error { + for blkID, blk := range s.addedBlocks { + var ( + blkID = blkID + blkHeight = blk.Height() + ) + + delete(s.addedBlockIDs, blkHeight) + s.blockIDCache.Put(blkHeight, blkID) + if err := database.PutID(s.blockIDDB, database.PackUInt64(blkHeight), blkID); err != nil { + return fmt.Errorf("failed to write block height index: %w", err) + } + + delete(s.addedBlocks, blkID) + // Note: Evict is used rather than Put here because blk may end up + // referencing additional data (because of shared byte slices) that + // would not be properly accounted for in the cache sizing. + s.blockCache.Evict(blkID) + + if err := s.blockDB.Put(blkID[:], blk.Bytes()); err != nil { + return fmt.Errorf("failed to write block %s: %w", blkID, err) + } + } + return nil +} + +func (s *state) GetStatelessBlock(blockID ids.ID) (block.Block, error) { + if blk, exists := s.addedBlocks[blockID]; exists { + return blk, nil + } + + if blk, cached := s.blockCache.Get(blockID); cached { + if blk == nil { + return nil, database.ErrNotFound + } + + return blk, nil + } + + blkBytes, err := s.blockDB.Get(blockID[:]) + switch err { + case nil: + // Note: stored blocks are verified, so it's safe to unmarshal them with GenesisCodec + blk, err := block.Parse(block.GenesisCodec, blkBytes) + if err != nil { + return nil, err + } + + s.blockCache.Put(blockID, blk) + return blk, nil + + case database.ErrNotFound: + s.blockCache.Put(blockID, nil) + return nil, database.ErrNotFound + + default: + return nil, err + } +} + +func (s *state) GetBlockIDAtHeight(height uint64) (ids.ID, error) { + if blkID, exists := s.addedBlockIDs[height]; exists { + return blkID, nil + } + if blkID, cached := s.blockIDCache.Get(height); cached { + if blkID == ids.Empty { + return ids.Empty, database.ErrNotFound + } + + return blkID, nil } - return diffIter.Error() -} -// DB Operations -func (s *state) Abort() { - s.baseDB.Abort() -} + heightKey := database.PackUInt64(height) -func (s *state) Commit() error { - defer s.Abort() - batch, err := s.CommitBatch() + blkID, err := database.GetID(s.blockIDDB, heightKey) + if err == database.ErrNotFound { + s.blockIDCache.Put(height, ids.Empty) + return ids.Empty, database.ErrNotFound + } if err != nil { - return err + return ids.Empty, err } - return batch.Write() -} -func (s *state) CommitBatch() (database.Batch, error) { - // updateValidators is set to true here so that the validator manager is - // kept up to date with the last accepted state. - if err := s.write(true /*updateValidators*/, s.lastAcceptedHeight); err != nil { - return nil, err - } - return s.baseDB.CommitBatch() + s.blockIDCache.Put(height, blkID) + return blkID, nil } -func (*state) Checksum() ids.ID { - return ids.Empty -} +func (*state) writeCurrentStakers(batchOps *[]database.BatchOp, currentData map[ids.ID]*stakersData) error { + for stakerTxID, data := range currentData { + key := merkleCurrentStakersKey(stakerTxID) -func (s *state) Close() error { - return utils.Err( - s.flatValidatorWeightDiffsDB.Close(), - s.flatValidatorPublicKeyDiffsDB.Close(), - s.localUptimesDB.Close(), - s.indexedUTXOsDB.Close(), - s.txDB.Close(), - s.blockDB.Close(), - s.blockIDDB.Close(), - s.merkleDB.Close(), - s.baseMerkleDB.Close(), - ) + if data.TxBytes == nil { + *batchOps = append(*batchOps, database.BatchOp{ + Key: key, + Delete: true, + }) + continue + } + + dataBytes, err := txs.GenesisCodec.Marshal(txs.Version, data) + if err != nil { + return fmt.Errorf("failed to serialize current stakers data, stakerTxID %v: %w", stakerTxID, err) + } + *batchOps = append(*batchOps, database.BatchOp{ + Key: key, + Value: dataBytes, + }) + } + return nil } -func (s *state) write(updateValidators bool, height uint64) error { - currentData, weightDiffs, blsKeyDiffs, valSetDiff, err := s.processCurrentStakers() +func (s *state) GetDelegateeReward(subnetID ids.ID, vdrID ids.NodeID) (uint64, error) { + nodeDelegateeRewards, exists := s.delegateeRewardCache[vdrID] + if exists { + delegateeReward, exists := nodeDelegateeRewards[subnetID] + if exists { + return delegateeReward, nil + } + } + + // try loading from the db + key := merkleDelegateeRewardsKey(vdrID, subnetID) + amountBytes, err := s.merkleDB.Get(key) if err != nil { - return err + return 0, err } - pendingData, err := s.processPendingStakers() + delegateeReward, err := database.ParseUInt64(amountBytes) if err != nil { - return err + return 0, err } - return utils.Err( - s.writeMerkleState(currentData, pendingData), - s.writeBlocks(), - s.writeTxs(), - s.writeLocalUptimes(), - s.writeWeightDiffs(height, weightDiffs), - s.writeBlsKeyDiffs(height, blsKeyDiffs), - s.writeRewardUTXOs(), - s.updateValidatorSet(updateValidators, valSetDiff, weightDiffs), - ) + if _, found := s.delegateeRewardCache[vdrID]; !found { + s.delegateeRewardCache[vdrID] = make(map[ids.ID]uint64) + } + s.delegateeRewardCache[vdrID][subnetID] = delegateeReward + return delegateeReward, nil +} + +func (s *state) SetDelegateeReward(subnetID ids.ID, vdrID ids.NodeID, amount uint64) error { + nodeDelegateeRewards, exists := s.delegateeRewardCache[vdrID] + if !exists { + nodeDelegateeRewards = make(map[ids.ID]uint64) + s.delegateeRewardCache[vdrID] = nodeDelegateeRewards + } + nodeDelegateeRewards[subnetID] = amount + + // track diff + updatedDelegateeRewards, ok := s.modifiedDelegateeReward[vdrID] + if !ok { + updatedDelegateeRewards = set.Set[ids.ID]{} + s.modifiedDelegateeReward[vdrID] = updatedDelegateeRewards + } + updatedDelegateeRewards.Add(subnetID) + return nil } +// DB Operations func (s *state) processCurrentStakers() ( map[ids.ID]*stakersData, map[weightDiffKey]*ValidatorWeightDiff, @@ -1537,156 +1834,92 @@ func (s *state) writeMerkleState(currentData, pendingData map[ids.ID]*stakersDat return s.logMerkleRoot(len(batchOps) != 0) } -func (s *state) writeMetadata(batchOps *[]database.BatchOp) error { - if !s.chainTime.Equal(s.latestComittedChainTime) { - encodedChainTime, err := s.chainTime.MarshalBinary() - if err != nil { - return fmt.Errorf("failed to encoding chainTime: %w", err) - } - - *batchOps = append(*batchOps, database.BatchOp{ - Key: merkleChainTimeKey, - Value: encodedChainTime, - }) - s.latestComittedChainTime = s.chainTime - } - - if s.lastAcceptedBlkID != s.latestCommittedLastAcceptedBlkID { - *batchOps = append(*batchOps, database.BatchOp{ - Key: merkleLastAcceptedBlkIDKey, - Value: s.lastAcceptedBlkID[:], - }) - s.latestCommittedLastAcceptedBlkID = s.lastAcceptedBlkID - } - - // lastAcceptedBlockHeight not persisted yet in merkleDB state. - // TODO: Consider if it should be - - for subnetID, supply := range s.modifiedSupplies { - supply := supply - delete(s.modifiedSupplies, subnetID) // clear up s.supplies to avoid potential double commits - s.suppliesCache.Put(subnetID, &supply) - - key := merkleSuppliesKey(subnetID) - *batchOps = append(*batchOps, database.BatchOp{ - Key: key, - Value: database.PackUInt64(supply), - }) - } - return nil -} - -func (s *state) writePermissionedSubnets(batchOps *[]database.BatchOp) error { //nolint:golint,unparam - for _, subnetTx := range s.addedPermissionedSubnets { - key := merklePermissionedSubnetKey(subnetTx.ID()) - *batchOps = append(*batchOps, database.BatchOp{ - Key: key, - Value: subnetTx.Bytes(), - }) - } - s.addedPermissionedSubnets = make([]*txs.Tx, 0) - return nil -} +func (*state) writePendingStakers(batchOps *[]database.BatchOp, pendingData map[ids.ID]*stakersData) error { + for stakerTxID, data := range pendingData { + key := merklePendingStakersKey(stakerTxID) -func (s *state) writeSubnetOwners(batchOps *[]database.BatchOp) error { - for subnetID, owner := range s.subnetOwners { - owner := owner + if data.TxBytes == nil { + *batchOps = append(*batchOps, database.BatchOp{ + Key: key, + Delete: true, + }) + continue + } - ownerBytes, err := block.GenesisCodec.Marshal(block.Version, &owner) + dataBytes, err := txs.GenesisCodec.Marshal(txs.Version, data) if err != nil { - return fmt.Errorf("failed to marshal subnet owner: %w", err) + return fmt.Errorf("failed to serialize pending stakers data, stakerTxID %v: %w", stakerTxID, err) } - - s.subnetOwnerCache.Put(subnetID, fxOwnerAndSize{ - owner: owner, - size: len(ownerBytes), - }) - - key := merkleSubnetOwnersKey(subnetID) *batchOps = append(*batchOps, database.BatchOp{ Key: key, - Value: ownerBytes, + Value: dataBytes, }) } - maps.Clear(s.subnetOwners) return nil } -func (s *state) writeElasticSubnets(batchOps *[]database.BatchOp) error { //nolint:golint,unparam - for subnetID, transforkSubnetTx := range s.addedElasticSubnets { - key := merkleElasticSubnetKey(subnetID) - *batchOps = append(*batchOps, database.BatchOp{ - Key: key, - Value: transforkSubnetTx.Bytes(), - }) - delete(s.addedElasticSubnets, subnetID) - - // Note: Evict is used rather than Put here because tx may end up - // referencing additional data (because of shared byte slices) that - // would not be properly accounted for in the cache sizing. - s.elasticSubnetCache.Evict(subnetID) - } - return nil -} +func (s *state) writeDelegateeRewards(batchOps *[]database.BatchOp) error { //nolint:golint,unparam + for nodeID, nodeDelegateeRewards := range s.modifiedDelegateeReward { + nodeDelegateeRewardsList := nodeDelegateeRewards.List() + for _, subnetID := range nodeDelegateeRewardsList { + delegateeReward := s.delegateeRewardCache[nodeID][subnetID] -func (s *state) writeChains(batchOps *[]database.BatchOp) error { //nolint:golint,unparam - for subnetID, chains := range s.addedChains { - for _, chainTx := range chains { - key := merkleChainKey(subnetID, chainTx.ID()) + key := merkleDelegateeRewardsKey(nodeID, subnetID) *batchOps = append(*batchOps, database.BatchOp{ Key: key, - Value: chainTx.Bytes(), + Value: database.PackUInt64(delegateeReward), }) } - delete(s.addedChains, subnetID) + delete(s.modifiedDelegateeReward, nodeID) } return nil } -func (*state) writeCurrentStakers(batchOps *[]database.BatchOp, currentData map[ids.ID]*stakersData) error { - for stakerTxID, data := range currentData { - key := merkleCurrentStakersKey(stakerTxID) +func (s *state) writeTxs() error { + for txID, txStatus := range s.addedTxs { + txID := txID - if data.TxBytes == nil { - *batchOps = append(*batchOps, database.BatchOp{ - Key: key, - Delete: true, - }) - continue + stx := txBytesAndStatus{ + Tx: txStatus.tx.Bytes(), + Status: txStatus.status, } - dataBytes, err := txs.GenesisCodec.Marshal(txs.Version, data) + // Note that we're serializing a [txBytesAndStatus] here, not a + // *txs.Tx, so we don't use [txs.Codec]. + txBytes, err := txs.GenesisCodec.Marshal(txs.Version, &stx) if err != nil { - return fmt.Errorf("failed to serialize current stakers data, stakerTxID %v: %w", stakerTxID, err) + return fmt.Errorf("failed to serialize tx: %w", err) + } + + delete(s.addedTxs, txID) + // Note: Evict is used rather than Put here because stx may end up + // referencing additional data (because of shared byte slices) that + // would not be properly accounted for in the cache sizing. + s.txCache.Evict(txID) + if err := s.txDB.Put(txID[:], txBytes); err != nil { + return fmt.Errorf("failed to add tx: %w", err) } - *batchOps = append(*batchOps, database.BatchOp{ - Key: key, - Value: dataBytes, - }) } return nil } -func (*state) writePendingStakers(batchOps *[]database.BatchOp, pendingData map[ids.ID]*stakersData) error { - for stakerTxID, data := range pendingData { - key := merklePendingStakersKey(stakerTxID) - - if data.TxBytes == nil { - *batchOps = append(*batchOps, database.BatchOp{ - Key: key, - Delete: true, - }) - continue - } +func (s *state) writeRewardUTXOs() error { + for txID, utxos := range s.addedRewardUTXOs { + delete(s.addedRewardUTXOs, txID) + s.rewardUTXOsCache.Put(txID, utxos) + rawTxDB := prefixdb.New(txID[:], s.rewardUTXOsDB) + txDB := linkeddb.NewDefault(rawTxDB) - dataBytes, err := txs.GenesisCodec.Marshal(txs.Version, data) - if err != nil { - return fmt.Errorf("failed to serialize pending stakers data, stakerTxID %v: %w", stakerTxID, err) + for _, utxo := range utxos { + utxoBytes, err := txs.GenesisCodec.Marshal(txs.Version, utxo) + if err != nil { + return fmt.Errorf("failed to serialize reward UTXO: %w", err) + } + utxoID := utxo.InputID() + if err := txDB.Put(utxoID[:], utxoBytes); err != nil { + return fmt.Errorf("failed to add reward UTXO: %w", err) + } } - *batchOps = append(*batchOps, database.BatchOp{ - Key: key, - Value: dataBytes, - }) } return nil } @@ -1737,74 +1970,56 @@ func (s *state) writeUTXOs(batchOps *[]database.BatchOp) error { return nil } -func (s *state) writeDelegateeRewards(batchOps *[]database.BatchOp) error { //nolint:golint,unparam - for nodeID, nodeDelegateeRewards := range s.modifiedDelegateeReward { - nodeDelegateeRewardsList := nodeDelegateeRewards.List() - for _, subnetID := range nodeDelegateeRewardsList { - delegateeReward := s.delegateeRewardCache[nodeID][subnetID] - - key := merkleDelegateeRewardsKey(nodeID, subnetID) - *batchOps = append(*batchOps, database.BatchOp{ - Key: key, - Value: database.PackUInt64(delegateeReward), - }) - } - delete(s.modifiedDelegateeReward, nodeID) +func (s *state) writePermissionedSubnets(batchOps *[]database.BatchOp) error { //nolint:golint,unparam + for _, subnetTx := range s.addedPermissionedSubnets { + key := merklePermissionedSubnetKey(subnetTx.ID()) + *batchOps = append(*batchOps, database.BatchOp{ + Key: key, + Value: subnetTx.Bytes(), + }) } + s.addedPermissionedSubnets = make([]*txs.Tx, 0) return nil } -func (s *state) writeBlocks() error { - for blkID, blk := range s.addedBlocks { - var ( - blkID = blkID - blkHeight = blk.Height() - ) - - delete(s.addedBlockIDs, blkHeight) - s.blockIDCache.Put(blkHeight, blkID) - if err := database.PutID(s.blockIDDB, database.PackUInt64(blkHeight), blkID); err != nil { - return fmt.Errorf("failed to write block height index: %w", err) - } +func (s *state) writeElasticSubnets(batchOps *[]database.BatchOp) error { //nolint:golint,unparam + for subnetID, transforkSubnetTx := range s.addedElasticSubnets { + key := merkleElasticSubnetKey(subnetID) + *batchOps = append(*batchOps, database.BatchOp{ + Key: key, + Value: transforkSubnetTx.Bytes(), + }) + delete(s.addedElasticSubnets, subnetID) - delete(s.addedBlocks, blkID) - // Note: Evict is used rather than Put here because blk may end up + // Note: Evict is used rather than Put here because tx may end up // referencing additional data (because of shared byte slices) that // would not be properly accounted for in the cache sizing. - s.blockCache.Evict(blkID) - - if err := s.blockDB.Put(blkID[:], blk.Bytes()); err != nil { - return fmt.Errorf("failed to write block %s: %w", blkID, err) - } + s.elasticSubnetCache.Evict(subnetID) } return nil } -func (s *state) writeTxs() error { - for txID, txStatus := range s.addedTxs { - txID := txID - - stx := txBytesAndStatus{ - Tx: txStatus.tx.Bytes(), - Status: txStatus.status, - } +func (s *state) writeSubnetOwners(batchOps *[]database.BatchOp) error { + for subnetID, owner := range s.subnetOwners { + owner := owner - // Note that we're serializing a [txBytesAndStatus] here, not a - // *txs.Tx, so we don't use [txs.Codec]. - txBytes, err := txs.GenesisCodec.Marshal(txs.Version, &stx) + ownerBytes, err := block.GenesisCodec.Marshal(block.Version, &owner) if err != nil { - return fmt.Errorf("failed to serialize tx: %w", err) + return fmt.Errorf("failed to marshal subnet owner: %w", err) } - delete(s.addedTxs, txID) - // Note: Evict is used rather than Put here because stx may end up - // referencing additional data (because of shared byte slices) that - // would not be properly accounted for in the cache sizing. - s.txCache.Evict(txID) - if err := s.txDB.Put(txID[:], txBytes); err != nil { - return fmt.Errorf("failed to add tx: %w", err) - } + s.subnetOwnerCache.Put(subnetID, fxOwnerAndSize{ + owner: owner, + size: len(ownerBytes), + }) + + key := merkleSubnetOwnersKey(subnetID) + *batchOps = append(*batchOps, database.BatchOp{ + Key: key, + Value: ownerBytes, + }) } + maps.Clear(s.subnetOwners) return nil } @@ -1851,6 +2066,59 @@ func (s *state) writeLocalUptimes() error { return nil } +func (s *state) writeChains(batchOps *[]database.BatchOp) error { //nolint:golint,unparam + for subnetID, chains := range s.addedChains { + for _, chainTx := range chains { + key := merkleChainKey(subnetID, chainTx.ID()) + *batchOps = append(*batchOps, database.BatchOp{ + Key: key, + Value: chainTx.Bytes(), + }) + } + delete(s.addedChains, subnetID) + } + return nil +} + +func (s *state) writeMetadata(batchOps *[]database.BatchOp) error { + if !s.chainTime.Equal(s.latestComittedChainTime) { + encodedChainTime, err := s.chainTime.MarshalBinary() + if err != nil { + return fmt.Errorf("failed to encoding chainTime: %w", err) + } + + *batchOps = append(*batchOps, database.BatchOp{ + Key: merkleChainTimeKey, + Value: encodedChainTime, + }) + s.latestComittedChainTime = s.chainTime + } + + if s.lastAcceptedBlkID != s.latestCommittedLastAcceptedBlkID { + *batchOps = append(*batchOps, database.BatchOp{ + Key: merkleLastAcceptedBlkIDKey, + Value: s.lastAcceptedBlkID[:], + }) + s.latestCommittedLastAcceptedBlkID = s.lastAcceptedBlkID + } + + // lastAcceptedBlockHeight not persisted yet in merkleDB state. + // TODO: Consider if it should be + + for subnetID, supply := range s.modifiedSupplies { + supply := supply + delete(s.modifiedSupplies, subnetID) // clear up s.supplies to avoid potential double commits + s.suppliesCache.Put(subnetID, &supply) + + key := merkleSuppliesKey(subnetID) + *batchOps = append(*batchOps, database.BatchOp{ + Key: key, + Value: database.PackUInt64(supply), + }) + } + return nil +} + func (s *state) writeWeightDiffs(height uint64, weightDiffs map[weightDiffKey]*ValidatorWeightDiff) error { for weightKey, weightDiff := range weightDiffs { if weightDiff.Amount == 0 { @@ -1884,27 +2152,6 @@ func (s *state) writeBlsKeyDiffs(height uint64, blsKeyDiffs map[ids.NodeID]*bls. return nil } -func (s *state) writeRewardUTXOs() error { - for txID, utxos := range s.addedRewardUTXOs { - delete(s.addedRewardUTXOs, txID) - s.rewardUTXOsCache.Put(txID, utxos) - rawTxDB := prefixdb.New(txID[:], s.rewardUTXOsDB) - txDB := linkeddb.NewDefault(rawTxDB) - - for _, utxo := range utxos { - utxoBytes, err := txs.GenesisCodec.Marshal(txs.Version, utxo) - if err != nil { - return fmt.Errorf("failed to serialize reward UTXO: %w", err) - } - utxoID := utxo.InputID() - if err := txDB.Put(utxoID[:], utxoBytes); err != nil { - return fmt.Errorf("failed to add reward UTXO: %w", err) - } - } - } - return nil -} - func (s *state) updateValidatorSet( updateValidators bool, valSetDiff map[weightDiffKey]*diffValidator, @@ -1990,3 +2237,57 @@ func (s *state) logMerkleRoot(hasChanges bool) error { ) return nil } + +func (s *state) GetUptime(vdrID ids.NodeID, subnetID ids.ID) (upDuration time.Duration, lastUpdated time.Time, err error) { + nodeUptimes, exists := s.localUptimesCache[vdrID] + if exists { + uptime, exists := nodeUptimes[subnetID] + if exists { + return uptime.Duration, uptime.lastUpdated, nil + } + } + + // try loading from DB + key := merkleLocalUptimesKey(vdrID, subnetID) + uptimeBytes, err := s.localUptimesDB.Get(key) + switch err { + case nil: + upTm := &uptimes{} + if _, err := txs.GenesisCodec.Unmarshal(uptimeBytes, upTm); err != nil { + return 0, time.Time{}, err + } + upTm.lastUpdated = time.Unix(int64(upTm.LastUpdated), 0) + s.localUptimesCache[vdrID] = make(map[ids.ID]*uptimes) + s.localUptimesCache[vdrID][subnetID] = upTm + return upTm.Duration, upTm.lastUpdated, nil + + case database.ErrNotFound: + // no local data for this staker uptime + return 0, time.Time{}, database.ErrNotFound + default: + return 0, time.Time{}, err + } +} + +func (s *state) SetUptime(vdrID ids.NodeID, subnetID ids.ID, upDuration time.Duration, lastUpdated time.Time) error { + nodeUptimes, exists := s.localUptimesCache[vdrID] + if !exists { + nodeUptimes = make(map[ids.ID]*uptimes) + s.localUptimesCache[vdrID] = nodeUptimes + } + + nodeUptimes[subnetID] = &uptimes{ + Duration: upDuration, + LastUpdated: uint64(lastUpdated.Unix()), + lastUpdated: lastUpdated, + } + + // track diff + updatedNodeUptimes, ok := s.modifiedLocalUptimes[vdrID] + if !ok { + updatedNodeUptimes = set.Set[ids.ID]{} + s.modifiedLocalUptimes[vdrID] = updatedNodeUptimes + } + updatedNodeUptimes.Add(subnetID) + return nil +} diff --git a/vms/platformvm/state/state_load_ops.go b/vms/platformvm/state/state_load_ops.go deleted file mode 100644 index 1db86ffc01b2..000000000000 --- a/vms/platformvm/state/state_load_ops.go +++ /dev/null @@ -1,324 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package state - -import ( - "fmt" - "time" - - "github.com/google/btree" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/hashing" - "github.com/ava-labs/avalanchego/vms/components/avax" - "github.com/ava-labs/avalanchego/vms/platformvm/block" - "github.com/ava-labs/avalanchego/vms/platformvm/genesis" - "github.com/ava-labs/avalanchego/vms/platformvm/status" - "github.com/ava-labs/avalanchego/vms/platformvm/txs" - - safemath "github.com/ava-labs/avalanchego/utils/math" -) - -// var errNotYetImplemented = errors.New("NOT YET IMPLEMENTED") - -// If [ms] isn't initialized, initializes it with [genesis]. -// Then loads [ms] from disk. -func (s *state) sync(genesis []byte) error { - shouldInit, err := s.shouldInit() - if err != nil { - return fmt.Errorf( - "failed to check if the database is initialized: %w", - err, - ) - } - - // If the database is empty, create the platform chain anew using the - // provided genesis state - if shouldInit { - if err := s.init(genesis); err != nil { - return fmt.Errorf( - "failed to initialize the database: %w", - err, - ) - } - } - - return s.load(shouldInit) -} - -func (s *state) shouldInit() (bool, error) { - has, err := s.singletonDB.Has(initializedKey) - return !has, err -} - -func (s *state) doneInit() error { - return s.singletonDB.Put(initializedKey, nil) -} - -// Creates a genesis from [genesisBytes] and initializes [ms] with it. -func (s *state) init(genesisBytes []byte) error { - // Create the genesis block and save it as being accepted (We don't do - // genesisBlock.Accept() because then it'd look for genesisBlock's - // non-existent parent) - genesisID := hashing.ComputeHash256Array(genesisBytes) - genesisBlock, err := block.NewApricotCommitBlock(genesisID, 0 /*height*/) - if err != nil { - return err - } - - genesisState, err := genesis.Parse(genesisBytes) - if err != nil { - return err - } - if err := s.syncGenesis(genesisBlock, genesisState); err != nil { - return err - } - - if err := s.doneInit(); err != nil { - return err - } - - return s.Commit() -} - -// Loads the state from [genesisBls] and [genesis] into [ms]. -func (s *state) syncGenesis(genesisBlk block.Block, genesis *genesis.Genesis) error { - genesisBlkID := genesisBlk.ID() - s.SetLastAccepted(genesisBlkID) - s.SetTimestamp(time.Unix(int64(genesis.Timestamp), 0)) - s.SetCurrentSupply(constants.PrimaryNetworkID, genesis.InitialSupply) - s.AddStatelessBlock(genesisBlk) - - // Persist UTXOs that exist at genesis - for _, utxo := range genesis.UTXOs { - avaxUTXO := utxo.UTXO - s.AddUTXO(&avaxUTXO) - } - - // Persist primary network validator set at genesis - for _, vdrTx := range genesis.Validators { - validatorTx, ok := vdrTx.Unsigned.(txs.ValidatorTx) - if !ok { - return fmt.Errorf("expected tx type txs.ValidatorTx but got %T", vdrTx.Unsigned) - } - - stakeAmount := validatorTx.Weight() - stakeDuration := validatorTx.EndTime().Sub(validatorTx.StartTime()) - currentSupply, err := s.GetCurrentSupply(constants.PrimaryNetworkID) - if err != nil { - return err - } - - potentialReward := s.rewards.Calculate( - stakeDuration, - stakeAmount, - currentSupply, - ) - newCurrentSupply, err := safemath.Add64(currentSupply, potentialReward) - if err != nil { - return err - } - - staker, err := NewCurrentStaker(vdrTx.ID(), validatorTx, potentialReward) - if err != nil { - return err - } - - s.PutCurrentValidator(staker) - s.AddTx(vdrTx, status.Committed) - s.SetCurrentSupply(constants.PrimaryNetworkID, newCurrentSupply) - } - - for _, chain := range genesis.Chains { - unsignedChain, ok := chain.Unsigned.(*txs.CreateChainTx) - if !ok { - return fmt.Errorf("expected tx type *txs.CreateChainTx but got %T", chain.Unsigned) - } - - // Ensure all chains that the genesis bytes say to create have the right - // network ID - if unsignedChain.NetworkID != s.ctx.NetworkID { - return avax.ErrWrongNetworkID - } - - s.AddChain(chain) - s.AddTx(chain, status.Committed) - } - - // updateValidators is set to false here to maintain the invariant that the - // primary network's validator set is empty before the validator sets are - // initialized. - return s.write(false /*=updateValidators*/, 0) -} - -// Load pulls data previously stored on disk that is expected to be in memory. -func (s *state) load(hasSynced bool) error { - return utils.Err( - s.loadMerkleMetadata(), - s.loadCurrentStakers(), - s.loadPendingStakers(), - s.initValidatorSets(), - - s.logMerkleRoot(!hasSynced), // we already logged if sync has happened - ) -} - -// Loads the chain time and last accepted block ID from disk -// and populates them in [ms]. -func (s *state) loadMerkleMetadata() error { - // load chain time - chainTimeBytes, err := s.merkleDB.Get(merkleChainTimeKey) - if err != nil { - return err - } - var chainTime time.Time - if err := chainTime.UnmarshalBinary(chainTimeBytes); err != nil { - return err - } - s.latestComittedChainTime = chainTime - s.SetTimestamp(chainTime) - - // load last accepted block - blkIDBytes, err := s.merkleDB.Get(merkleLastAcceptedBlkIDKey) - if err != nil { - return err - } - lastAcceptedBlkID := ids.Empty - copy(lastAcceptedBlkID[:], blkIDBytes) - s.latestCommittedLastAcceptedBlkID = lastAcceptedBlkID - s.SetLastAccepted(lastAcceptedBlkID) - - // We don't need to load supplies. Unlike chain time and last block ID, - // which have the persisted* attribute, we signify that a supply hasn't - // been modified by making it nil. - return nil -} - -// Loads current stakes from disk and populates them in [ms]. -func (s *state) loadCurrentStakers() error { - // TODO ABENEGIA: Check missing metadata - s.currentStakers = newBaseStakers() - - prefix := make([]byte, len(currentStakersSectionPrefix)) - copy(prefix, currentStakersSectionPrefix) - - iter := s.merkleDB.NewIteratorWithPrefix(prefix) - defer iter.Release() - for iter.Next() { - data := &stakersData{} - if _, err := txs.GenesisCodec.Unmarshal(iter.Value(), data); err != nil { - return fmt.Errorf("failed to deserialize current stakers data: %w", err) - } - - tx, err := txs.Parse(txs.GenesisCodec, data.TxBytes) - if err != nil { - return fmt.Errorf("failed to parsing current stakerTx: %w", err) - } - stakerTx, ok := tx.Unsigned.(txs.Staker) - if !ok { - return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned) - } - - staker, err := NewCurrentStaker(tx.ID(), stakerTx, data.PotentialReward) - if err != nil { - return err - } - if staker.Priority.IsValidator() { - // TODO: why not PutValidator/PutDelegator?? - validator := s.currentStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID) - validator.validator = staker - s.currentStakers.stakers.ReplaceOrInsert(staker) - } else { - validator := s.currentStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID) - if validator.delegators == nil { - validator.delegators = btree.NewG(defaultTreeDegree, (*Staker).Less) - } - validator.delegators.ReplaceOrInsert(staker) - s.currentStakers.stakers.ReplaceOrInsert(staker) - } - } - return iter.Error() -} - -func (s *state) loadPendingStakers() error { - // TODO ABENEGIA: Check missing metadata - s.pendingStakers = newBaseStakers() - - prefix := make([]byte, len(pendingStakersSectionPrefix)) - copy(prefix, pendingStakersSectionPrefix) - - iter := s.merkleDB.NewIteratorWithPrefix(prefix) - defer iter.Release() - for iter.Next() { - data := &stakersData{} - if _, err := txs.GenesisCodec.Unmarshal(iter.Value(), data); err != nil { - return fmt.Errorf("failed to deserialize pending stakers data: %w", err) - } - - tx, err := txs.Parse(txs.GenesisCodec, data.TxBytes) - if err != nil { - return fmt.Errorf("failed to parsing pending stakerTx: %w", err) - } - stakerTx, ok := tx.Unsigned.(txs.Staker) - if !ok { - return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned) - } - - staker, err := NewPendingStaker(tx.ID(), stakerTx) - if err != nil { - return err - } - if staker.Priority.IsValidator() { - validator := s.pendingStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID) - validator.validator = staker - s.pendingStakers.stakers.ReplaceOrInsert(staker) - } else { - validator := s.pendingStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID) - if validator.delegators == nil { - validator.delegators = btree.NewG(defaultTreeDegree, (*Staker).Less) - } - validator.delegators.ReplaceOrInsert(staker) - s.pendingStakers.stakers.ReplaceOrInsert(staker) - } - } - return iter.Error() -} - -// Invariant: initValidatorSets requires loadCurrentValidators to have already -// been called. -func (s *state) initValidatorSets() error { - for subnetID, validators := range s.currentStakers.validators { - if s.validators.Count(subnetID) != 0 { - // Enforce the invariant that the validator set is empty here. - return fmt.Errorf("%w: %s", errValidatorSetAlreadyPopulated, subnetID) - } - - for nodeID, validator := range validators { - validatorStaker := validator.validator - if err := s.validators.AddStaker(subnetID, nodeID, validatorStaker.PublicKey, validatorStaker.TxID, validatorStaker.Weight); err != nil { - return err - } - - delegatorIterator := NewTreeIterator(validator.delegators) - for delegatorIterator.Next() { - delegatorStaker := delegatorIterator.Value() - if err := s.validators.AddWeight(subnetID, nodeID, delegatorStaker.Weight); err != nil { - delegatorIterator.Release() - return err - } - } - delegatorIterator.Release() - } - } - - s.metrics.SetLocalStake(s.validators.GetWeight(constants.PrimaryNetworkID, s.ctx.NodeID)) - totalWeight, err := s.validators.TotalWeight(constants.PrimaryNetworkID) - if err != nil { - return fmt.Errorf("failed to get total weight of primary network validators: %w", err) - } - s.metrics.SetTotalStake(totalWeight) - return nil -} diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index 88e5897ba4d4..99c90d55f202 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -722,14 +722,6 @@ func TestStateSubnetOwner(t *testing.T) { // Returns the block, status of the block, and whether it is a [stateBlk]. // Invariant: blkBytes is safe to parse with blocks.GenesisCodec -// -// TODO: Remove after v1.11.x is activated -type stateBlk struct { - Blk block.Block - Bytes []byte `serialize:"true"` - Status choices.Status `serialize:"true"` -} - func parseStoredBlock(blkBytes []byte) (block.Block, bool, error) { // Attempt to parse as blocks.Block blk, err := block.Parse(block.GenesisCodec, blkBytes) diff --git a/vms/platformvm/txs/executor/staker_tx_verification.go b/vms/platformvm/txs/executor/staker_tx_verification.go index 9d4af6f5a025..9ec4880e4a44 100644 --- a/vms/platformvm/txs/executor/staker_tx_verification.go +++ b/vms/platformvm/txs/executor/staker_tx_verification.go @@ -479,7 +479,7 @@ func verifyAddPermissionlessValidatorTx( validatorRules, err := getValidatorRules(backend, chainState, tx.Subnet) if err != nil { - return fmt.Errorf("failed retrieving validator rules: %w", err) + return err } duration := tx.Validator.Duration() diff --git a/vms/platformvm/vm_regression_test.go b/vms/platformvm/vm_regression_test.go index 81582f1b14d2..80b38a06c234 100644 --- a/vms/platformvm/vm_regression_test.go +++ b/vms/platformvm/vm_regression_test.go @@ -1436,6 +1436,7 @@ func TestSubnetValidatorBLSKeyDiffAfterExpiry(t *testing.T) { vm.ctx.Lock.Lock() defer func() { require.NoError(vm.Shutdown(context.Background())) + vm.ctx.Lock.Unlock() }() subnetID := testSubnet1.TxID From 07b3cdb0c227eaf4245a1ead6fc7d2fa93a39d7a Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Wed, 29 Nov 2023 16:56:19 -0500 Subject: [PATCH 110/132] nit naming --- vms/platformvm/state/state.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index c1cc51bc5413..50fb9b693422 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -1907,8 +1907,8 @@ func (s *state) writeRewardUTXOs() error { for txID, utxos := range s.addedRewardUTXOs { delete(s.addedRewardUTXOs, txID) s.rewardUTXOsCache.Put(txID, utxos) - rawTxDB := prefixdb.New(txID[:], s.rewardUTXOsDB) - txDB := linkeddb.NewDefault(rawTxDB) + rawRewardUTXOsDB := prefixdb.New(txID[:], s.rewardUTXOsDB) + rewardUTXOsDB := linkeddb.NewDefault(rawRewardUTXOsDB) for _, utxo := range utxos { utxoBytes, err := txs.GenesisCodec.Marshal(txs.Version, utxo) @@ -1916,7 +1916,7 @@ func (s *state) writeRewardUTXOs() error { return fmt.Errorf("failed to serialize reward UTXO: %w", err) } utxoID := utxo.InputID() - if err := txDB.Put(utxoID[:], utxoBytes); err != nil { + if err := rewardUTXOsDB.Put(utxoID[:], utxoBytes); err != nil { return fmt.Errorf("failed to add reward UTXO: %w", err) } } From 3ea16e16ce07c0b8ca7193392fb38887dd2e347e Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Fri, 1 Dec 2023 12:32:46 -0500 Subject: [PATCH 111/132] P-Chain merkledb -- remove caches (#2392) --- vms/platformvm/block/builder/helpers_test.go | 3 - vms/platformvm/block/executor/helpers_test.go | 3 - vms/platformvm/state/state.go | 486 ++++-------------- vms/platformvm/state/state_test.go | 6 - vms/platformvm/txs/executor/helpers_test.go | 5 - .../validators/manager_benchmark_test.go | 5 - vms/platformvm/vm.go | 2 - vms/platformvm/vm_regression_test.go | 8 - 8 files changed, 91 insertions(+), 427 deletions(-) diff --git a/vms/platformvm/block/builder/helpers_test.go b/vms/platformvm/block/builder/helpers_test.go index de37d08ff0dd..21f1b84e28ed 100644 --- a/vms/platformvm/block/builder/helpers_test.go +++ b/vms/platformvm/block/builder/helpers_test.go @@ -243,14 +243,11 @@ func defaultState( ) state.State { require := require.New(t) - execCfg, _ := config.GetExecutionConfig([]byte(`{}`)) genesisBytes := buildGenesisTest(t, ctx) state, err := state.New( db, genesisBytes, - prometheus.NewRegistry(), validators, - execCfg, ctx, metrics.Noop, rewards, diff --git a/vms/platformvm/block/executor/helpers_test.go b/vms/platformvm/block/executor/helpers_test.go index 778d9b203181..91f4c31c1a93 100644 --- a/vms/platformvm/block/executor/helpers_test.go +++ b/vms/platformvm/block/executor/helpers_test.go @@ -269,13 +269,10 @@ func defaultState( rewards reward.Calculator, ) state.State { genesisBytes := buildGenesisTest(ctx) - execCfg, _ := config.GetExecutionConfig([]byte(`{}`)) state, err := state.New( db, genesisBytes, - prometheus.NewRegistry(), validators, - execCfg, ctx, metrics.Noop, rewards, diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 50fb9b693422..abb4c395597a 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -18,8 +18,6 @@ import ( "golang.org/x/exp/maps" "golang.org/x/exp/slices" - "github.com/ava-labs/avalanchego/cache" - "github.com/ava-labs/avalanchego/cache/metercacher" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/linkeddb" "github.com/ava-labs/avalanchego/database/prefixdb" @@ -34,12 +32,9 @@ import ( "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/hashing" - "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/block" - "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/fx" "github.com/ava-labs/avalanchego/vms/platformvm/genesis" "github.com/ava-labs/avalanchego/vms/platformvm/metrics" @@ -56,7 +51,6 @@ const ( valueNodeCacheSize = 512 * units.MiB intermediateNodeCacheSize = 512 * units.MiB - utxoCacheSize = 8192 // from avax/utxo_state.go ) var ( @@ -244,63 +238,54 @@ type state struct { currentStakers *baseStakers pendingStakers *baseStakers - delegateeRewardCache map[ids.NodeID]map[ids.ID]uint64 // (nodeID, subnetID) --> delegatee amount - modifiedDelegateeReward map[ids.NodeID]set.Set[ids.ID] // tracks (nodeID, subnetID) pairs updated after last commit + // Node ID --> Subnet ID --> Delegatee Reward on that subnet for that node ID. + modifiedDelegateeReward map[ids.NodeID]map[ids.ID]uint64 // UTXOs section - modifiedUTXOs map[ids.ID]*avax.UTXO // map of UTXO ID -> *UTXO - utxoCache cache.Cacher[ids.ID, *avax.UTXO] // UTXO ID -> *UTXO. If the *UTXO is nil the UTXO doesn't exist + modifiedUTXOs map[ids.ID]*avax.UTXO // map of UTXO ID -> *UTXO // Metadata section chainTime, latestComittedChainTime time.Time lastAcceptedBlkID, latestCommittedLastAcceptedBlkID ids.ID - lastAcceptedHeight uint64 // TODO: Should this be written to state?? - modifiedSupplies map[ids.ID]uint64 // map of subnetID -> current supply - suppliesCache cache.Cacher[ids.ID, *uint64] // cache of subnetID -> current supply if the entry is nil, it is not in the database + lastAcceptedHeight uint64 // TODO: Should this be written to state?? + modifiedSupplies map[ids.ID]uint64 // map of subnetID -> current supply // Subnets section // Subnet ID --> Owner of the subnet - subnetOwners map[ids.ID]fx.Owner - subnetOwnerCache cache.Cacher[ids.ID, fxOwnerAndSize] // cache of subnetID -> owner if the entry is nil, it is not in the database + subnetOwners map[ids.ID]fx.Owner - addedPermissionedSubnets []*txs.Tx // added SubnetTxs, waiting to be committed - permissionedSubnetCache []*txs.Tx // nil if the subnets haven't been loaded - addedElasticSubnets map[ids.ID]*txs.Tx // map of subnetID -> transformSubnetTx - elasticSubnetCache cache.Cacher[ids.ID, *txs.Tx] // cache of subnetID -> transformSubnetTx if the entry is nil, it is not in the database + addedPermissionedSubnets []*txs.Tx // added SubnetTxs, waiting to be committed + permissionedSubnetCache []*txs.Tx // nil if the subnets haven't been loaded + addedElasticSubnets map[ids.ID]*txs.Tx // map of subnetID -> transformSubnetTx // Chains section - addedChains map[ids.ID][]*txs.Tx // maps subnetID -> the newly added chains to the subnet - chainCache cache.Cacher[ids.ID, []*txs.Tx] // cache of subnetID -> the chains after all local modifications []*txs.Tx + addedChains map[ids.ID][]*txs.Tx // maps subnetID -> the newly added chains to the subnet // Blocks section // Note: addedBlocks is a list because multiple blocks can be committed at one (proposal + accepted option) - addedBlocks map[ids.ID]block.Block // map of blockID -> Block. - blockCache cache.Cacher[ids.ID, block.Block] // cache of blockID -> Block. If the entry is nil, it is not in the database + addedBlocks map[ids.ID]block.Block // map of blockID -> Block. blockDB database.Database - addedBlockIDs map[uint64]ids.ID // map of height -> blockID - blockIDCache cache.Cacher[uint64, ids.ID] // cache of height -> blockID. If the entry is ids.Empty, it is not in the database + addedBlockIDs map[uint64]ids.ID // map of height -> blockID blockIDDB database.Database // Txs section // FIND a way to reduce use of these. No use in verification of addedTxs // a limited windows to support APIs - addedTxs map[ids.ID]*txAndStatus // map of txID -> {*txs.Tx, Status} - txCache cache.Cacher[ids.ID, *txAndStatus] // txID -> {*txs.Tx, Status}. If the entry is nil, it isn't in the database + addedTxs map[ids.ID]*txAndStatus // map of txID -> {*txs.Tx, Status} txDB database.Database indexedUTXOsDB database.Database - localUptimesCache map[ids.NodeID]map[ids.ID]*uptimes // vdrID -> subnetID -> metadata - modifiedLocalUptimes map[ids.NodeID]set.Set[ids.ID] // vdrID -> subnetIDs + // Node ID --> SubnetID --> Uptime of the node on the subnet + modifiedLocalUptimes map[ids.NodeID]map[ids.ID]*uptimes localUptimesDB database.Database flatValidatorWeightDiffsDB database.Database flatValidatorPublicKeyDiffsDB database.Database // Reward UTXOs section - addedRewardUTXOs map[ids.ID][]*avax.UTXO // map of txID -> []*UTXO - rewardUTXOsCache cache.Cacher[ids.ID, []*avax.UTXO] // txID -> []*UTXO + addedRewardUTXOs map[ids.ID][]*avax.UTXO // map of txID -> []*UTXO rewardUTXOsDB database.Database } @@ -335,38 +320,10 @@ type txAndStatus struct { status status.Status } -type fxOwnerAndSize struct { - owner fx.Owner - size int -} - -func txSize(_ ids.ID, tx *txs.Tx) int { - if tx == nil { - return ids.IDLen + constants.PointerOverhead - } - return ids.IDLen + len(tx.Bytes()) + constants.PointerOverhead -} - -func txAndStatusSize(_ ids.ID, t *txAndStatus) int { - if t == nil { - return ids.IDLen + constants.PointerOverhead - } - return ids.IDLen + len(t.tx.Bytes()) + wrappers.IntLen + 2*constants.PointerOverhead -} - -func blockSize(_ ids.ID, blk block.Block) int { - if blk == nil { - return ids.IDLen + constants.PointerOverhead - } - return ids.IDLen + len(blk.Bytes()) + constants.PointerOverhead -} - func New( db database.Database, genesisBytes []byte, - metricsReg prometheus.Registerer, validators validators.Manager, - execCfg *config.ExecutionConfig, ctx *snow.Context, metrics metrics.Metrics, rewards reward.Calculator, @@ -375,9 +332,7 @@ func New( db, metrics, validators, - execCfg, ctx, - metricsReg, rewards, ) if err != nil { @@ -397,9 +352,7 @@ func newState( db database.Database, metrics metrics.Metrics, validators validators.Manager, - execCfg *config.ExecutionConfig, ctx *snow.Context, - metricsReg prometheus.Registerer, rewards reward.Calculator, ) (*state, error) { var ( @@ -433,80 +386,6 @@ func newState( return nil, fmt.Errorf("failed creating merkleDB: %w", err) } - txCache, err := metercacher.New( - "tx_cache", - metricsReg, - cache.NewSizedLRU[ids.ID, *txAndStatus](execCfg.TxCacheSize, txAndStatusSize), - ) - if err != nil { - return nil, err - } - - rewardUTXOsCache, err := metercacher.New[ids.ID, []*avax.UTXO]( - "reward_utxos_cache", - metricsReg, - &cache.LRU[ids.ID, []*avax.UTXO]{Size: execCfg.RewardUTXOsCacheSize}, - ) - if err != nil { - return nil, err - } - - subnetOwnerCache, err := metercacher.New[ids.ID, fxOwnerAndSize]( - "subnet_owner_cache", - metricsReg, - cache.NewSizedLRU[ids.ID, fxOwnerAndSize](execCfg.FxOwnerCacheSize, func(_ ids.ID, f fxOwnerAndSize) int { - return ids.IDLen + f.size - }), - ) - if err != nil { - return nil, err - } - - transformedSubnetCache, err := metercacher.New( - "transformed_subnet_cache", - metricsReg, - cache.NewSizedLRU[ids.ID, *txs.Tx](execCfg.TransformedSubnetTxCacheSize, txSize), - ) - if err != nil { - return nil, err - } - - supplyCache, err := metercacher.New[ids.ID, *uint64]( - "supply_cache", - metricsReg, - &cache.LRU[ids.ID, *uint64]{Size: execCfg.ChainCacheSize}, - ) - if err != nil { - return nil, err - } - - chainCache, err := metercacher.New[ids.ID, []*txs.Tx]( - "chain_cache", - metricsReg, - &cache.LRU[ids.ID, []*txs.Tx]{Size: execCfg.ChainCacheSize}, - ) - if err != nil { - return nil, err - } - - blockCache, err := metercacher.New[ids.ID, block.Block]( - "block_cache", - metricsReg, - cache.NewSizedLRU[ids.ID, block.Block](execCfg.BlockCacheSize, blockSize), - ) - if err != nil { - return nil, err - } - - blockIDCache, err := metercacher.New[uint64, ids.ID]( - "block_id_cache", - metricsReg, - &cache.LRU[uint64, ids.ID]{Size: execCfg.BlockIDCacheSize}, - ) - if err != nil { - return nil, err - } - return &state{ validators: validators, ctx: ctx, @@ -521,49 +400,38 @@ func newState( currentStakers: newBaseStakers(), pendingStakers: newBaseStakers(), - delegateeRewardCache: make(map[ids.NodeID]map[ids.ID]uint64), - modifiedDelegateeReward: make(map[ids.NodeID]set.Set[ids.ID]), + modifiedDelegateeReward: make(map[ids.NodeID]map[ids.ID]uint64), modifiedUTXOs: make(map[ids.ID]*avax.UTXO), - utxoCache: &cache.LRU[ids.ID, *avax.UTXO]{Size: utxoCacheSize}, modifiedSupplies: make(map[ids.ID]uint64), - suppliesCache: supplyCache, - subnetOwners: make(map[ids.ID]fx.Owner), - subnetOwnerCache: subnetOwnerCache, + subnetOwners: make(map[ids.ID]fx.Owner), addedPermissionedSubnets: make([]*txs.Tx, 0), permissionedSubnetCache: nil, // created first time GetSubnets is called addedElasticSubnets: make(map[ids.ID]*txs.Tx), - elasticSubnetCache: transformedSubnetCache, addedChains: make(map[ids.ID][]*txs.Tx), - chainCache: chainCache, addedBlocks: make(map[ids.ID]block.Block), - blockCache: blockCache, blockDB: blockDB, addedBlockIDs: make(map[uint64]ids.ID), - blockIDCache: blockIDCache, blockIDDB: blockIDsDB, addedTxs: make(map[ids.ID]*txAndStatus), - txCache: txCache, txDB: txDB, indexedUTXOsDB: indexedUTXOsDB, - localUptimesCache: make(map[ids.NodeID]map[ids.ID]*uptimes), - modifiedLocalUptimes: make(map[ids.NodeID]set.Set[ids.ID]), + modifiedLocalUptimes: make(map[ids.NodeID]map[ids.ID]*uptimes), localUptimesDB: localUptimesDB, flatValidatorWeightDiffsDB: flatValidatorWeightDiffsDB, flatValidatorPublicKeyDiffsDB: flatValidatorPublicKeyDiffsDB, addedRewardUTXOs: make(map[ids.ID][]*avax.UTXO), - rewardUTXOsCache: rewardUTXOsCache, rewardUTXOsDB: rewardUTXOsDB, }, nil } @@ -674,6 +542,9 @@ func (s *state) GetSubnets() ([]*txs.Tx, error) { func (s *state) AddSubnet(createSubnetTx *txs.Tx) { s.addedPermissionedSubnets = append(s.addedPermissionedSubnets, createSubnetTx) + if s.permissionedSubnetCache != nil { + s.permissionedSubnetCache = append(s.permissionedSubnetCache, createSubnetTx) + } } func (s *state) GetSubnetOwner(subnetID ids.ID) (fx.Owner, error) { @@ -681,13 +552,6 @@ func (s *state) GetSubnetOwner(subnetID ids.ID) (fx.Owner, error) { return owner, nil } - if ownerAndSize, cached := s.subnetOwnerCache.Get(subnetID); cached { - if ownerAndSize.owner == nil { - return nil, database.ErrNotFound - } - return ownerAndSize.owner, nil - } - subnetIDKey := merkleSubnetOwnersKey(subnetID) ownerBytes, err := s.merkleDB.Get(subnetIDKey) if err == nil { @@ -695,10 +559,6 @@ func (s *state) GetSubnetOwner(subnetID ids.ID) (fx.Owner, error) { if _, err := block.GenesisCodec.Unmarshal(ownerBytes, &owner); err != nil { return nil, err } - s.subnetOwnerCache.Put(subnetID, fxOwnerAndSize{ - owner: owner, - size: len(ownerBytes), - }) return owner, nil } if err != database.ErrNotFound { @@ -707,9 +567,6 @@ func (s *state) GetSubnetOwner(subnetID ids.ID) (fx.Owner, error) { subnetIntf, _, err := s.GetTx(subnetID) if err != nil { - if err == database.ErrNotFound { - s.subnetOwnerCache.Put(subnetID, fxOwnerAndSize{}) - } return nil, err } @@ -731,31 +588,12 @@ func (s *state) GetSubnetTransformation(subnetID ids.ID) (*txs.Tx, error) { return tx, nil } - if tx, cached := s.elasticSubnetCache.Get(subnetID); cached { - if tx == nil { - return nil, database.ErrNotFound - } - return tx, nil - } - key := merkleElasticSubnetKey(subnetID) transformSubnetTxBytes, err := s.merkleDB.Get(key) - switch err { - case nil: - transformSubnetTx, err := txs.Parse(txs.GenesisCodec, transformSubnetTxBytes) - if err != nil { - return nil, err - } - s.elasticSubnetCache.Put(subnetID, transformSubnetTx) - return transformSubnetTx, nil - - case database.ErrNotFound: - s.elasticSubnetCache.Put(subnetID, nil) - return nil, database.ErrNotFound - - default: + if err != nil { return nil, err } + return txs.Parse(txs.GenesisCodec, transformSubnetTxBytes) } func (s *state) AddSubnetTransformation(transformSubnetTxIntf *txs.Tx) { @@ -764,10 +602,6 @@ func (s *state) AddSubnetTransformation(transformSubnetTxIntf *txs.Tx) { } func (s *state) GetChains(subnetID ids.ID) ([]*txs.Tx, error) { - if chains, cached := s.chainCache.Get(subnetID); cached { - return chains, nil - } - prefix := merkleChainPrefix(subnetID) chainDBIt := s.merkleDB.NewIteratorWithPrefix(prefix) defer chainDBIt.Release() @@ -785,8 +619,8 @@ func (s *state) GetChains(subnetID ids.ID) ([]*txs.Tx, error) { if err := chainDBIt.Error(); err != nil { return nil, err } + chains = append(chains, s.addedChains[subnetID]...) - s.chainCache.Put(subnetID, chains) return chains, nil } @@ -801,41 +635,28 @@ func (s *state) GetTx(txID ids.ID) (*txs.Tx, status.Status, error) { if tx, exists := s.addedTxs[txID]; exists { return tx.tx, tx.status, nil } - if tx, cached := s.txCache.Get(txID); cached { - if tx == nil { - return nil, status.Unknown, database.ErrNotFound - } - return tx.tx, tx.status, nil - } txBytes, err := s.txDB.Get(txID[:]) - switch err { - case nil: - stx := txBytesAndStatus{} - if _, err := txs.GenesisCodec.Unmarshal(txBytes, &stx); err != nil { - return nil, status.Unknown, err - } - - tx, err := txs.Parse(txs.GenesisCodec, stx.Tx) - if err != nil { - return nil, status.Unknown, err - } - - ptx := &txAndStatus{ - tx: tx, - status: stx.Status, - } - - s.txCache.Put(txID, ptx) - return ptx.tx, ptx.status, nil + if err != nil { + return nil, status.Unknown, err + } - case database.ErrNotFound: - s.txCache.Put(txID, nil) - return nil, status.Unknown, database.ErrNotFound + var txBytesAndStatus txBytesAndStatus + if _, err := txs.GenesisCodec.Unmarshal(txBytes, &txBytesAndStatus); err != nil { + return nil, status.Unknown, err + } - default: + tx, err := txs.Parse(txs.GenesisCodec, txBytesAndStatus.Tx) + if err != nil { return nil, status.Unknown, err } + + txAndStatus := &txAndStatus{ + tx: tx, + status: txBytesAndStatus.Status, + } + + return txAndStatus.tx, txAndStatus.status, nil } func (s *state) AddTx(tx *txs.Tx, status status.Status) { @@ -849,28 +670,24 @@ func (s *state) GetRewardUTXOs(txID ids.ID) ([]*avax.UTXO, error) { if utxos, exists := s.addedRewardUTXOs[txID]; exists { return utxos, nil } - if utxos, exists := s.rewardUTXOsCache.Get(txID); exists { - return utxos, nil - } rawTxDB := prefixdb.New(txID[:], s.rewardUTXOsDB) txDB := linkeddb.NewDefault(rawTxDB) it := txDB.NewIterator() defer it.Release() - utxos := []*avax.UTXO(nil) + var utxos []*avax.UTXO for it.Next() { - utxo := &avax.UTXO{} - if _, err := txs.Codec.Unmarshal(it.Value(), utxo); err != nil { + var utxo avax.UTXO + if _, err := txs.Codec.Unmarshal(it.Value(), &utxo); err != nil { return nil, err } - utxos = append(utxos, utxo) + utxos = append(utxos, &utxo) } if err := it.Error(); err != nil { return nil, err } - s.rewardUTXOsCache.Put(txID, utxos) return utxos, nil } @@ -885,31 +702,18 @@ func (s *state) GetUTXO(utxoID ids.ID) (*avax.UTXO, error) { } return utxo, nil } - if utxo, found := s.utxoCache.Get(utxoID); found { - if utxo == nil { - return nil, database.ErrNotFound - } - return utxo, nil - } key := merkleUtxoIDKey(utxoID) + bytes, err := s.merkleDB.Get(key) + if err != nil { + return nil, err + } - switch bytes, err := s.merkleDB.Get(key); err { - case nil: - utxo := &avax.UTXO{} - if _, err := txs.GenesisCodec.Unmarshal(bytes, utxo); err != nil { - return nil, err - } - s.utxoCache.Put(utxoID, utxo) - return utxo, nil - - case database.ErrNotFound: - s.utxoCache.Put(utxoID, nil) - return nil, database.ErrNotFound - - default: + var utxo avax.UTXO + if _, err := txs.GenesisCodec.Unmarshal(bytes, &utxo); err != nil { return nil, err } + return &utxo, nil } func (s *state) UTXOIDs(addr []byte, start ids.ID, limit int) ([]ids.ID, error) { @@ -934,6 +738,8 @@ func (s *state) UTXOIDs(addr []byte, start ids.ID, limit int) ([]ids.ID, error) start = ids.Empty utxoIDs = append(utxoIDs, utxoID) } + + // TODO do we need to account for UTXOs in [s.modifiedUTXOs]? return utxoIDs, iter.Error() } @@ -970,36 +776,17 @@ func (s *state) SetLastAccepted(lastAccepted ids.ID) { } func (s *state) GetCurrentSupply(subnetID ids.ID) (uint64, error) { - supply, ok := s.modifiedSupplies[subnetID] - if ok { + if supply, ok := s.modifiedSupplies[subnetID]; ok { return supply, nil } - cachedSupply, ok := s.suppliesCache.Get(subnetID) - if ok { - if cachedSupply == nil { - return 0, database.ErrNotFound - } - return *cachedSupply, nil - } key := merkleSuppliesKey(subnetID) - - switch supplyBytes, err := s.merkleDB.Get(key); err { - case nil: - supply, err := database.ParseUInt64(supplyBytes) - if err != nil { - return 0, fmt.Errorf("failed parsing supply: %w", err) - } - s.suppliesCache.Put(subnetID, &supply) - return supply, nil - - case database.ErrNotFound: - s.suppliesCache.Put(subnetID, nil) - return 0, database.ErrNotFound - - default: + supplyBytes, err := s.merkleDB.Get(key) + if err != nil { return 0, err } + + return database.ParseUInt64(supplyBytes) } func (s *state) SetCurrentSupply(subnetID ids.ID, cs uint64) { @@ -1296,8 +1083,8 @@ func (s *state) loadPendingStakers() error { iter := s.merkleDB.NewIteratorWithPrefix(prefix) defer iter.Release() for iter.Next() { - data := &stakersData{} - if _, err := txs.GenesisCodec.Unmarshal(iter.Value(), data); err != nil { + var data stakersData + if _, err := txs.GenesisCodec.Unmarshal(iter.Value(), &data); err != nil { return fmt.Errorf("failed to deserialize pending stakers data: %w", err) } @@ -1495,16 +1282,11 @@ func (s *state) writeBlocks() error { ) delete(s.addedBlockIDs, blkHeight) - s.blockIDCache.Put(blkHeight, blkID) if err := database.PutID(s.blockIDDB, database.PackUInt64(blkHeight), blkID); err != nil { return fmt.Errorf("failed to write block height index: %w", err) } delete(s.addedBlocks, blkID) - // Note: Evict is used rather than Put here because blk may end up - // referencing additional data (because of shared byte slices) that - // would not be properly accounted for in the cache sizing. - s.blockCache.Evict(blkID) if err := s.blockDB.Put(blkID[:], blk.Bytes()); err != nil { return fmt.Errorf("failed to write block %s: %w", blkID, err) @@ -1518,60 +1300,22 @@ func (s *state) GetStatelessBlock(blockID ids.ID) (block.Block, error) { return blk, nil } - if blk, cached := s.blockCache.Get(blockID); cached { - if blk == nil { - return nil, database.ErrNotFound - } - - return blk, nil - } - blkBytes, err := s.blockDB.Get(blockID[:]) - switch err { - case nil: - // Note: stored blocks are verified, so it's safe to unmarshal them with GenesisCodec - blk, err := block.Parse(block.GenesisCodec, blkBytes) - if err != nil { - return nil, err - } - - s.blockCache.Put(blockID, blk) - return blk, nil - - case database.ErrNotFound: - s.blockCache.Put(blockID, nil) - return nil, database.ErrNotFound - - default: + if err != nil { return nil, err } + + // Note: stored blocks are verified, so it's safe to unmarshal them with GenesisCodec + return block.Parse(block.GenesisCodec, blkBytes) } func (s *state) GetBlockIDAtHeight(height uint64) (ids.ID, error) { if blkID, exists := s.addedBlockIDs[height]; exists { return blkID, nil } - if blkID, cached := s.blockIDCache.Get(height); cached { - if blkID == ids.Empty { - return ids.Empty, database.ErrNotFound - } - - return blkID, nil - } - - heightKey := database.PackUInt64(height) - - blkID, err := database.GetID(s.blockIDDB, heightKey) - if err == database.ErrNotFound { - s.blockIDCache.Put(height, ids.Empty) - return ids.Empty, database.ErrNotFound - } - if err != nil { - return ids.Empty, err - } - s.blockIDCache.Put(height, blkID) - return blkID, nil + key := database.PackUInt64(height) + return database.GetID(s.blockIDDB, key) } func (*state) writeCurrentStakers(batchOps *[]database.BatchOp, currentData map[ids.ID]*stakersData) error { @@ -1599,47 +1343,29 @@ func (*state) writeCurrentStakers(batchOps *[]database.BatchOp, currentData map[ } func (s *state) GetDelegateeReward(subnetID ids.ID, vdrID ids.NodeID) (uint64, error) { - nodeDelegateeRewards, exists := s.delegateeRewardCache[vdrID] - if exists { - delegateeReward, exists := nodeDelegateeRewards[subnetID] - if exists { - return delegateeReward, nil + // check if we have a modified value + if subnetIDToReward, ok := s.modifiedDelegateeReward[vdrID]; ok { + if reward, ok := subnetIDToReward[subnetID]; ok { + return reward, nil } } // try loading from the db key := merkleDelegateeRewardsKey(vdrID, subnetID) - amountBytes, err := s.merkleDB.Get(key) - if err != nil { - return 0, err - } - delegateeReward, err := database.ParseUInt64(amountBytes) + rewardBytes, err := s.merkleDB.Get(key) if err != nil { return 0, err } - - if _, found := s.delegateeRewardCache[vdrID]; !found { - s.delegateeRewardCache[vdrID] = make(map[ids.ID]uint64) - } - s.delegateeRewardCache[vdrID][subnetID] = delegateeReward - return delegateeReward, nil + return database.ParseUInt64(rewardBytes) } func (s *state) SetDelegateeReward(subnetID ids.ID, vdrID ids.NodeID, amount uint64) error { - nodeDelegateeRewards, exists := s.delegateeRewardCache[vdrID] - if !exists { - nodeDelegateeRewards = make(map[ids.ID]uint64) - s.delegateeRewardCache[vdrID] = nodeDelegateeRewards - } - nodeDelegateeRewards[subnetID] = amount - - // track diff - updatedDelegateeRewards, ok := s.modifiedDelegateeReward[vdrID] + subnetIDToReward, ok := s.modifiedDelegateeReward[vdrID] if !ok { - updatedDelegateeRewards = set.Set[ids.ID]{} - s.modifiedDelegateeReward[vdrID] = updatedDelegateeRewards + subnetIDToReward = make(map[ids.ID]uint64) + s.modifiedDelegateeReward[vdrID] = subnetIDToReward } - updatedDelegateeRewards.Add(subnetID) + subnetIDToReward[subnetID] = amount return nil } @@ -1859,15 +1585,12 @@ func (*state) writePendingStakers(batchOps *[]database.BatchOp, pendingData map[ } func (s *state) writeDelegateeRewards(batchOps *[]database.BatchOp) error { //nolint:golint,unparam - for nodeID, nodeDelegateeRewards := range s.modifiedDelegateeReward { - nodeDelegateeRewardsList := nodeDelegateeRewards.List() - for _, subnetID := range nodeDelegateeRewardsList { - delegateeReward := s.delegateeRewardCache[nodeID][subnetID] - + for nodeID, subnetIDToReward := range s.modifiedDelegateeReward { + for subnetID, reward := range subnetIDToReward { key := merkleDelegateeRewardsKey(nodeID, subnetID) *batchOps = append(*batchOps, database.BatchOp{ Key: key, - Value: database.PackUInt64(delegateeReward), + Value: database.PackUInt64(reward), }) } delete(s.modifiedDelegateeReward, nodeID) @@ -1895,7 +1618,6 @@ func (s *state) writeTxs() error { // Note: Evict is used rather than Put here because stx may end up // referencing additional data (because of shared byte slices) that // would not be properly accounted for in the cache sizing. - s.txCache.Evict(txID) if err := s.txDB.Put(txID[:], txBytes); err != nil { return fmt.Errorf("failed to add tx: %w", err) } @@ -1906,7 +1628,6 @@ func (s *state) writeTxs() error { func (s *state) writeRewardUTXOs() error { for txID, utxos := range s.addedRewardUTXOs { delete(s.addedRewardUTXOs, txID) - s.rewardUTXOsCache.Put(txID, utxos) rawRewardUTXOsDB := prefixdb.New(txID[:], s.rewardUTXOsDB) rewardUTXOsDB := linkeddb.NewDefault(rawRewardUTXOsDB) @@ -1931,7 +1652,6 @@ func (s *state) writeUTXOs(batchOps *[]database.BatchOp) error { if utxo == nil { // delete the UTXO switch utxo, err := s.GetUTXO(utxoID); err { case nil: - s.utxoCache.Put(utxoID, nil) *batchOps = append(*batchOps, database.BatchOp{ Key: key, Delete: true, @@ -1990,11 +1710,6 @@ func (s *state) writeElasticSubnets(batchOps *[]database.BatchOp) error { //noli Value: transforkSubnetTx.Bytes(), }) delete(s.addedElasticSubnets, subnetID) - - // Note: Evict is used rather than Put here because tx may end up - // referencing additional data (because of shared byte slices) that - // would not be properly accounted for in the cache sizing. - s.elasticSubnetCache.Evict(subnetID) } return nil } @@ -2008,11 +1723,6 @@ func (s *state) writeSubnetOwners(batchOps *[]database.BatchOp) error { return fmt.Errorf("failed to marshal subnet owner: %w", err) } - s.subnetOwnerCache.Put(subnetID, fxOwnerAndSize{ - owner: owner, - size: len(ownerBytes), - }) - key := merkleSubnetOwnersKey(subnetID) *batchOps = append(*batchOps, database.BatchOp{ Key: key, @@ -2047,12 +1757,11 @@ func (s *state) writeUTXOsIndex(utxo *avax.UTXO, insertUtxo bool) error { } func (s *state) writeLocalUptimes() error { - for vdrID, updatedSubnets := range s.modifiedLocalUptimes { - for subnetID := range updatedSubnets { + for vdrID, subnetIDToUptime := range s.modifiedLocalUptimes { + for subnetID, uptime := range subnetIDToUptime { key := merkleLocalUptimesKey(vdrID, subnetID) - uptimes := s.localUptimesCache[vdrID][subnetID] - uptimeBytes, err := txs.GenesisCodec.Marshal(txs.Version, uptimes) + uptimeBytes, err := txs.GenesisCodec.Marshal(txs.Version, uptime) if err != nil { return err } @@ -2108,7 +1817,6 @@ func (s *state) writeMetadata(batchOps *[]database.BatchOp) error { for subnetID, supply := range s.modifiedSupplies { supply := supply delete(s.modifiedSupplies, subnetID) // clear up s.supplies to avoid potential double commits - s.suppliesCache.Put(subnetID, &supply) key := merkleSuppliesKey(subnetID) *batchOps = append(*batchOps, database.BatchOp{ @@ -2239,10 +1947,9 @@ func (s *state) logMerkleRoot(hasChanges bool) error { } func (s *state) GetUptime(vdrID ids.NodeID, subnetID ids.ID) (upDuration time.Duration, lastUpdated time.Time, err error) { - nodeUptimes, exists := s.localUptimesCache[vdrID] - if exists { - uptime, exists := nodeUptimes[subnetID] - if exists { + // check if we have a modified value + if subnetIDToUptime, ok := s.modifiedLocalUptimes[vdrID]; ok { + if uptime, ok := subnetIDToUptime[subnetID]; ok { return uptime.Duration, uptime.lastUpdated, nil } } @@ -2257,8 +1964,6 @@ func (s *state) GetUptime(vdrID ids.NodeID, subnetID ids.ID) (upDuration time.Du return 0, time.Time{}, err } upTm.lastUpdated = time.Unix(int64(upTm.LastUpdated), 0) - s.localUptimesCache[vdrID] = make(map[ids.ID]*uptimes) - s.localUptimesCache[vdrID][subnetID] = upTm return upTm.Duration, upTm.lastUpdated, nil case database.ErrNotFound: @@ -2270,24 +1975,15 @@ func (s *state) GetUptime(vdrID ids.NodeID, subnetID ids.ID) (upDuration time.Du } func (s *state) SetUptime(vdrID ids.NodeID, subnetID ids.ID, upDuration time.Duration, lastUpdated time.Time) error { - nodeUptimes, exists := s.localUptimesCache[vdrID] - if !exists { - nodeUptimes = make(map[ids.ID]*uptimes) - s.localUptimesCache[vdrID] = nodeUptimes + updatedNodeUptimes, ok := s.modifiedLocalUptimes[vdrID] + if !ok { + updatedNodeUptimes = make(map[ids.ID]*uptimes, 0) + s.modifiedLocalUptimes[vdrID] = updatedNodeUptimes } - - nodeUptimes[subnetID] = &uptimes{ + updatedNodeUptimes[subnetID] = &uptimes{ Duration: upDuration, LastUpdated: uint64(lastUpdated.Unix()), lastUpdated: lastUpdated, } - - // track diff - updatedNodeUptimes, ok := s.modifiedLocalUptimes[vdrID] - if !ok { - updatedNodeUptimes = set.Set[ids.ID]{} - s.modifiedLocalUptimes[vdrID] = updatedNodeUptimes - } - updatedNodeUptimes.Add(subnetID) return nil } diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index 99c90d55f202..4ca05ee63e94 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -10,8 +10,6 @@ import ( "testing" "time" - "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" @@ -29,7 +27,6 @@ import ( "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/block" - "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/fx" "github.com/ava-labs/avalanchego/vms/platformvm/genesis" "github.com/ava-labs/avalanchego/vms/platformvm/metrics" @@ -164,16 +161,13 @@ func newUninitializedState(require *require.Assertions) (State, database.Databas } func newStateFromDB(require *require.Assertions, db database.Database) State { - execCfg, _ := config.GetExecutionConfig(nil) state, err := newState( db, metrics.Noop, validators.NewManager(), - execCfg, &snow.Context{ Log: logging.NoLog{}, }, - prometheus.NewRegistry(), reward.NewCalculator(reward.Config{ MaxConsumptionRate: .12 * reward.PercentDenominator, MinConsumptionRate: .1 * reward.PercentDenominator, diff --git a/vms/platformvm/txs/executor/helpers_test.go b/vms/platformvm/txs/executor/helpers_test.go index df3150e04bdd..b37ee777552a 100644 --- a/vms/platformvm/txs/executor/helpers_test.go +++ b/vms/platformvm/txs/executor/helpers_test.go @@ -11,8 +11,6 @@ import ( "testing" "time" - "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/chains" @@ -224,13 +222,10 @@ func defaultState( rewards reward.Calculator, ) state.State { genesisBytes := buildGenesisTest(ctx) - execCfg, _ := config.GetExecutionConfig(nil) state, err := state.New( db, genesisBytes, - prometheus.NewRegistry(), validators, - execCfg, ctx, metrics.Noop, rewards, diff --git a/vms/platformvm/validators/manager_benchmark_test.go b/vms/platformvm/validators/manager_benchmark_test.go index 155811d988ad..1b549cca00e8 100644 --- a/vms/platformvm/validators/manager_benchmark_test.go +++ b/vms/platformvm/validators/manager_benchmark_test.go @@ -102,18 +102,13 @@ func BenchmarkGetValidatorSet(b *testing.B) { vdrs := validators.NewManager() - execConfig, err := config.GetExecutionConfig(nil) - require.NoError(err) - metrics, err := metrics.New("", prometheus.NewRegistry()) require.NoError(err) s, err := state.New( db, genesisBytes, - prometheus.NewRegistry(), vdrs, - execConfig, &snow.Context{ NetworkID: constants.UnitTestID, NodeID: ids.GenerateTestNodeID(), diff --git a/vms/platformvm/vm.go b/vms/platformvm/vm.go index c0cbaea36f37..78c2c8e140ea 100644 --- a/vms/platformvm/vm.go +++ b/vms/platformvm/vm.go @@ -135,9 +135,7 @@ func (vm *VM) Initialize( vm.state, err = state.New( vm.db, genesisBytes, - registerer, vm.Config.Validators, - execConfig, vm.ctx, vm.metrics, rewards, diff --git a/vms/platformvm/vm_regression_test.go b/vms/platformvm/vm_regression_test.go index 80b38a06c234..e2f989bcd5e2 100644 --- a/vms/platformvm/vm_regression_test.go +++ b/vms/platformvm/vm_regression_test.go @@ -10,8 +10,6 @@ import ( "testing" "time" - "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/chains" @@ -643,13 +641,10 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { // Force a reload of the state from the database. vm.Config.Validators = validators.NewManager() - execCfg, _ := config.GetExecutionConfig(nil) newState, err := state.New( vm.db, nil, - prometheus.NewRegistry(), vm.Config.Validators, - execCfg, vm.ctx, metrics.Noop, reward.NewCalculator(vm.Config.RewardConfig), @@ -950,13 +945,10 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { // Force a reload of the state from the database. vm.Config.Validators = validators.NewManager() - execCfg, _ := config.GetExecutionConfig(nil) newState, err := state.New( vm.db, nil, - prometheus.NewRegistry(), vm.Config.Validators, - execCfg, vm.ctx, metrics.Noop, reward.NewCalculator(vm.Config.RewardConfig), From 02c526706716ad56cd18e61c732415924dc3cc57 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Fri, 1 Dec 2023 12:57:20 -0500 Subject: [PATCH 112/132] P-Chain merkledb -- don't merkleize last accepted block ID (#2397) --- vms/platformvm/state/state.go | 42 ++++++++++++++++------------------- 1 file changed, 19 insertions(+), 23 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index abb4c395597a..2d5733f6db00 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -70,13 +70,13 @@ var ( merkleBlsKeyDiffPrefix = []byte{0x08} merkleRewardUtxosPrefix = []byte{0x09} - initializedKey = []byte("initialized") + initializedKey = []byte{0x00} + lastAcceptedBlockIDKey = []byte{0x01} // merkle db sections - metadataSectionPrefix = byte(0x00) - merkleChainTimeKey = []byte{metadataSectionPrefix, 0x00} - merkleLastAcceptedBlkIDKey = []byte{metadataSectionPrefix, 0x01} - merkleSuppliesPrefix = []byte{metadataSectionPrefix, 0x02} + metadataSectionPrefix = byte(0x00) + merkleChainTimeKey = []byte{metadataSectionPrefix, 0x00} + merkleSuppliesPrefix = []byte{metadataSectionPrefix, 0x01} permissionedSubnetSectionPrefix = []byte{0x01} elasticSubnetSectionPrefix = []byte{0x02} @@ -986,6 +986,19 @@ func (s *state) syncGenesis(genesisBlk block.Block, genesis *genesis.Genesis) er // Load pulls data previously stored on disk that is expected to be in memory. func (s *state) load(hasSynced bool) error { + // load last accepted block + lastAcceptedBlkIDBytes, err := s.singletonDB.Get(lastAcceptedBlockIDKey) + if err != nil { + return err + } + + lastAcceptedBlkID, err := ids.ToID(lastAcceptedBlkIDBytes) + if err != nil { + return err + } + s.SetLastAccepted(lastAcceptedBlkID) + s.latestCommittedLastAcceptedBlkID = lastAcceptedBlkID + return utils.Err( s.loadMerkleMetadata(), s.loadCurrentStakers(), @@ -1011,16 +1024,6 @@ func (s *state) loadMerkleMetadata() error { s.latestComittedChainTime = chainTime s.SetTimestamp(chainTime) - // load last accepted block - blkIDBytes, err := s.merkleDB.Get(merkleLastAcceptedBlkIDKey) - if err != nil { - return err - } - lastAcceptedBlkID := ids.Empty - copy(lastAcceptedBlkID[:], blkIDBytes) - s.latestCommittedLastAcceptedBlkID = lastAcceptedBlkID - s.SetLastAccepted(lastAcceptedBlkID) - // We don't need to load supplies. Unlike chain time and last block ID, // which have the persisted* attribute, we signify that a supply hasn't // been modified by making it nil. @@ -1172,6 +1175,7 @@ func (s *state) write(updateValidators bool, height uint64) error { s.writeBlsKeyDiffs(height, blsKeyDiffs), s.writeRewardUTXOs(), s.updateValidatorSet(updateValidators, valSetDiff, weightDiffs), + s.singletonDB.Put(lastAcceptedBlockIDKey, s.lastAcceptedBlkID[:]), // Write last accepted block ID ) } @@ -1803,14 +1807,6 @@ func (s *state) writeMetadata(batchOps *[]database.BatchOp) error { s.latestComittedChainTime = s.chainTime } - if s.lastAcceptedBlkID != s.latestCommittedLastAcceptedBlkID { - *batchOps = append(*batchOps, database.BatchOp{ - Key: merkleLastAcceptedBlkIDKey, - Value: s.lastAcceptedBlkID[:], - }) - s.latestCommittedLastAcceptedBlkID = s.lastAcceptedBlkID - } - // lastAcceptedBlockHeight not persisted yet in merkleDB state. // TODO: Consider if it should be From 093647c9a45916da61fcfd9399f4fa7ef24d86f2 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Fri, 1 Dec 2023 17:12:45 -0500 Subject: [PATCH 113/132] [WIP] Add ability to get merkle root of P-Chain state (#2352) --- vms/platformvm/state/diff.go | 249 +++++++++++++++++++++++++++++ vms/platformvm/state/mock_state.go | 46 ++++++ vms/platformvm/state/state.go | 72 ++++----- 3 files changed, 330 insertions(+), 37 deletions(-) diff --git a/vms/platformvm/state/diff.go b/vms/platformvm/state/diff.go index d509fa69e0dd..c19f782b65dd 100644 --- a/vms/platformvm/state/diff.go +++ b/vms/platformvm/state/diff.go @@ -4,6 +4,7 @@ package state import ( + "context" "errors" "fmt" "time" @@ -11,11 +12,15 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/platformvm/block" "github.com/ava-labs/avalanchego/vms/platformvm/fx" "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/x/merkledb" ) +const initialTxSliceSize = 8 + var ( _ Diff = (*diff)(nil) @@ -74,6 +79,25 @@ func NewDiff( }, nil } +func (d *diff) NewView() (merkledb.TrieView, error) { + parentState, ok := d.stateVersions.GetState(d.parentID) + if !ok { + return nil, fmt.Errorf("%w: %s", ErrMissingParentState, d.parentID) + } + + changes, err := d.getMerkleChanges() + if err != nil { + return nil, err + } + + parentView, err := parentState.NewView() + if err != nil { + return nil, err + } + + return parentView.NewView(context.Background(), changes) +} + func (d *diff) GetTimestamp() time.Time { return d.timestamp } @@ -384,6 +408,231 @@ func (d *diff) DeleteUTXO(utxoID ids.ID) { } } +func (d *diff) getMerkleChanges() (merkledb.ViewChanges, error) { + changes := merkledb.ViewChanges{} + + // writeMetadata + encodedChainTime, err := d.timestamp.MarshalBinary() + if err != nil { + return merkledb.ViewChanges{}, fmt.Errorf("failed to encoding chainTime: %w", err) + } + changes.BatchOps = append(changes.BatchOps, database.BatchOp{ + Key: merkleChainTimeKey, + Value: encodedChainTime, + }) + for subnetID, supply := range d.currentSupply { + key := merkleSuppliesKey(subnetID) + changes.BatchOps = append(changes.BatchOps, database.BatchOp{ + Key: key, + Value: database.PackUInt64(supply), + }) + } + + // writePermissionedSubnets + for _, subnet := range d.addedSubnets { + key := merklePermissionedSubnetKey(subnet.ID()) + changes.BatchOps = append(changes.BatchOps, database.BatchOp{ + Key: key, + Value: subnet.Bytes(), + }) + } + + // writeSubnetOwners + for subnetID, owner := range d.subnetOwners { + owner := owner + ownerBytes, err := block.GenesisCodec.Marshal(block.Version, &owner) + if err != nil { + return merkledb.ViewChanges{}, fmt.Errorf("failed to marshal subnet owner: %w", err) + } + + key := merkleSubnetOwnersKey(subnetID) + changes.BatchOps = append(changes.BatchOps, database.BatchOp{ + Key: key, + Value: ownerBytes, + }) + } + + // writeElasticSubnets + for _, tx := range d.transformedSubnets { + transformSubnetTx := tx.Unsigned.(*txs.TransformSubnetTx) + key := merkleElasticSubnetKey(transformSubnetTx.Subnet) + changes.BatchOps = append(changes.BatchOps, database.BatchOp{ + Key: key, + Value: transformSubnetTx.Bytes(), + }) + } + + // writeChains + for _, chains := range d.addedChains { + for _, chain := range chains { + subnetID := chain.Unsigned.(*txs.CreateChainTx).SubnetID + key := merkleChainKey(subnetID, chain.ID()) + changes.BatchOps = append(changes.BatchOps, database.BatchOp{ + Key: key, + Value: chain.Bytes(), + }) + } + } + + type txIDAndReward struct { + txID ids.ID + reward uint64 + } + + // writeCurrentStakers + for _, nodeIDToValidatorDiff := range d.currentStakerDiffs.validatorDiffs { + for _, validatorDiff := range nodeIDToValidatorDiff { + toAddTxIDAndRewards := make([]txIDAndReward, 0, initialTxSliceSize) + + switch validatorDiff.validatorStatus { + case deleted: + changes.BatchOps = append(changes.BatchOps, database.BatchOp{ + Key: merkleCurrentStakersKey(validatorDiff.validator.TxID), + Delete: true, + }) + case added: + toAddTxIDAndRewards = append(toAddTxIDAndRewards, txIDAndReward{ + txID: validatorDiff.validator.TxID, + reward: validatorDiff.validator.PotentialReward, + }) + } + + addedDelegatorIterator := NewTreeIterator(validatorDiff.addedDelegators) + for addedDelegatorIterator.Next() { + staker := addedDelegatorIterator.Value() + toAddTxIDAndRewards = append(toAddTxIDAndRewards, txIDAndReward{ + txID: staker.TxID, + reward: staker.PotentialReward, + }) + } + addedDelegatorIterator.Release() + + for _, staker := range validatorDiff.deletedDelegators { + changes.BatchOps = append(changes.BatchOps, database.BatchOp{ + Key: merkleCurrentStakersKey(staker.TxID), + Delete: true, + }) + } + + for _, txIDAndReward := range toAddTxIDAndRewards { + tx, _, err := d.GetTx(txIDAndReward.txID) + if err != nil { + return merkledb.ViewChanges{}, err + } + + stakersDataBytes, err := txs.GenesisCodec.Marshal(txs.Version, &stakersData{ + TxBytes: tx.Bytes(), + PotentialReward: txIDAndReward.reward, + }) + if err != nil { + return merkledb.ViewChanges{}, err + } + + changes.BatchOps = append(changes.BatchOps, database.BatchOp{ + Key: merkleCurrentStakersKey(txIDAndReward.txID), + Value: stakersDataBytes, + }) + } + } + } + + // writePendingStakers + for _, subnetValidatorDiffs := range d.pendingStakerDiffs.validatorDiffs { + for _, validatorDiff := range subnetValidatorDiffs { + toAddTxIDAndRewards := make([]txIDAndReward, 0, initialTxSliceSize) + + // validatorDiff.validator is not guaranteed to be non-nil here. + // Access it only if validatorDiff.validatorStatus is added or deleted + switch validatorDiff.validatorStatus { + case added: + toAddTxIDAndRewards = append(toAddTxIDAndRewards, txIDAndReward{ + txID: validatorDiff.validator.TxID, + reward: 0, + }) + case deleted: + changes.BatchOps = append(changes.BatchOps, database.BatchOp{ + Key: merklePendingStakersKey(validatorDiff.validator.TxID), + Delete: true, + }) + } + + addedDelegatorIterator := NewTreeIterator(validatorDiff.addedDelegators) + defer addedDelegatorIterator.Release() + for addedDelegatorIterator.Next() { + staker := addedDelegatorIterator.Value() + toAddTxIDAndRewards = append(toAddTxIDAndRewards, txIDAndReward{ + txID: staker.TxID, + reward: 0, + }) + } + + for _, staker := range validatorDiff.deletedDelegators { + changes.BatchOps = append(changes.BatchOps, database.BatchOp{ + Key: merklePendingStakersKey(staker.TxID), + Delete: true, + }) + } + + for _, txIDAndReward := range toAddTxIDAndRewards { + tx, _, err := d.GetTx(txIDAndReward.txID) + if err != nil { + return merkledb.ViewChanges{}, err + } + + stakersDataBytes, err := txs.GenesisCodec.Marshal(txs.Version, &stakersData{ + TxBytes: tx.Bytes(), + PotentialReward: txIDAndReward.reward, + }) + if err != nil { + return merkledb.ViewChanges{}, err + } + + changes.BatchOps = append(changes.BatchOps, database.BatchOp{ + Key: merklePendingStakersKey(txIDAndReward.txID), + Value: stakersDataBytes, + }) + } + } + } + + // writeDelegateeRewards + for subnetID, nodes := range d.modifiedDelegateeRewards { + for nodeID, amount := range nodes { + key := merkleDelegateeRewardsKey(nodeID, subnetID) + changes.BatchOps = append(changes.BatchOps, database.BatchOp{ + Key: key, + Value: database.PackUInt64(amount), + }) + } + } + + // writeUTXOs + for utxoID, utxo := range d.modifiedUTXOs { + key := merkleUtxoIDKey(utxoID) + + if utxo == nil { + // Deleting a UTXO + changes.BatchOps = append(changes.BatchOps, database.BatchOp{ + Key: key, + Delete: true, + }) + continue + } + + // Inserting a UTXO + utxoBytes, err := txs.GenesisCodec.Marshal(txs.Version, utxo) + if err != nil { + return merkledb.ViewChanges{}, err + } + changes.BatchOps = append(changes.BatchOps, database.BatchOp{ + Key: key, + Value: utxoBytes, + }) + } + + return changes, nil +} + func (d *diff) Apply(baseState Chain) error { baseState.SetTimestamp(d.timestamp) for subnetID, supply := range d.currentSupply { diff --git a/vms/platformvm/state/mock_state.go b/vms/platformvm/state/mock_state.go index 231eb92ccf29..69b817fe372d 100644 --- a/vms/platformvm/state/mock_state.go +++ b/vms/platformvm/state/mock_state.go @@ -20,6 +20,7 @@ import ( fx "github.com/ava-labs/avalanchego/vms/platformvm/fx" status "github.com/ava-labs/avalanchego/vms/platformvm/status" txs "github.com/ava-labs/avalanchego/vms/platformvm/txs" + merkledb "github.com/ava-labs/avalanchego/x/merkledb" gomock "go.uber.org/mock/gomock" ) @@ -373,6 +374,21 @@ func (mr *MockChainMockRecorder) GetUTXO(arg0 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUTXO", reflect.TypeOf((*MockChain)(nil).GetUTXO), arg0) } +// NewView mocks base method. +func (m *MockChain) NewView() (merkledb.TrieView, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewView") + ret0, _ := ret[0].(merkledb.TrieView) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NewView indicates an expected call of NewView. +func (mr *MockChainMockRecorder) NewView() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewView", reflect.TypeOf((*MockChain)(nil).NewView)) +} + // PutCurrentDelegator mocks base method. func (m *MockChain) PutCurrentDelegator(arg0 *Staker) { m.ctrl.T.Helper() @@ -835,6 +851,21 @@ func (mr *MockDiffMockRecorder) GetUTXO(arg0 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUTXO", reflect.TypeOf((*MockDiff)(nil).GetUTXO), arg0) } +// NewView mocks base method. +func (m *MockDiff) NewView() (merkledb.TrieView, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewView") + ret0, _ := ret[0].(merkledb.TrieView) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NewView indicates an expected call of NewView. +func (mr *MockDiffMockRecorder) NewView() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewView", reflect.TypeOf((*MockDiff)(nil).NewView)) +} + // PutCurrentDelegator mocks base method. func (m *MockDiff) PutCurrentDelegator(arg0 *Staker) { m.ctrl.T.Helper() @@ -1512,6 +1543,21 @@ func (mr *MockStateMockRecorder) GetUptime(arg0, arg1 interface{}) *gomock.Call return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUptime", reflect.TypeOf((*MockState)(nil).GetUptime), arg0, arg1) } +// NewView mocks base method. +func (m *MockState) NewView() (merkledb.TrieView, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewView") + ret0, _ := ret[0].(merkledb.TrieView) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NewView indicates an expected call of NewView. +func (mr *MockStateMockRecorder) NewView() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewView", reflect.TypeOf((*MockState)(nil).NewView)) +} + // PutCurrentDelegator mocks base method. func (m *MockState) PutCurrentDelegator(arg0 *Staker) { m.ctrl.T.Helper() diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 19bc1284b65b..90be03bf1727 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -96,6 +96,9 @@ type Chain interface { avax.UTXOGetter avax.UTXODeleter + // Returns a view that contains the merkleized portion of the state. + NewView() (merkledb.TrieView, error) + GetTimestamp() time.Time SetTimestamp(tm time.Time) @@ -986,7 +989,7 @@ func (s *state) syncGenesis(genesisBlk block.Block, genesis *genesis.Genesis) er } // Load pulls data previously stored on disk that is expected to be in memory. -func (s *state) load(hasSynced bool) error { +func (s *state) load() error { // load last accepted block lastAcceptedBlkIDBytes, err := s.singletonDB.Get(lastAcceptedBlockIDKey) if err != nil { @@ -1000,14 +1003,14 @@ func (s *state) load(hasSynced bool) error { s.SetLastAccepted(lastAcceptedBlkID) s.latestCommittedLastAcceptedBlkID = lastAcceptedBlkID - return utils.Err( + err = utils.Err( s.loadMerkleMetadata(), s.loadCurrentStakers(), s.loadPendingStakers(), s.initValidatorSets(), - - s.logMerkleRoot(!hasSynced), // we already logged if sync has happened ) + s.logMerkleRoot() // we already logged if sync has happened + return err } // Loads the chain time and last accepted block ID from disk @@ -1216,7 +1219,7 @@ func (s *state) sync(genesis []byte) error { } } - return s.load(shouldInit) + return s.load() } // Creates a genesis from [genesisBytes] and initializes [ms] with it. @@ -1533,7 +1536,11 @@ func (s *state) processPendingStakers() (map[ids.ID]*stakersData, error) { return output, nil } -func (s *state) writeMerkleState(currentData, pendingData map[ids.ID]*stakersData) error { +func (s *state) NewView() (merkledb.TrieView, error) { + return s.merkleDB.NewView(context.TODO(), merkledb.ViewChanges{}) +} + +func (s *state) getMerkleChanges(currentData, pendingData map[ids.ID]*stakersData) ([]database.BatchOp, error) { batchOps := make([]database.BatchOp, 0) err := utils.Err( s.writeMetadata(&batchOps), @@ -1546,23 +1553,28 @@ func (s *state) writeMerkleState(currentData, pendingData map[ids.ID]*stakersDat s.writeDelegateeRewards(&batchOps), s.writeUTXOs(&batchOps), ) + + return batchOps, err +} + +func (s *state) writeMerkleState(currentData, pendingData map[ids.ID]*stakersData) error { + changes, err := s.getMerkleChanges(currentData, pendingData) if err != nil { return err } - if len(batchOps) == 0 { - // nothing to commit - return nil - } - - view, err := s.merkleDB.NewView(context.TODO(), merkledb.ViewChanges{BatchOps: batchOps}) + view, err := s.merkleDB.NewView(context.TODO(), merkledb.ViewChanges{ + BatchOps: changes, + }) if err != nil { - return fmt.Errorf("failed creating merkleDB view: %w", err) + return err } - if err := view.CommitToDB(context.TODO()); err != nil { - return fmt.Errorf("failed committing merkleDB view: %w", err) + + if err := view.CommitToDB(context.Background()); err != nil { + return err } - return s.logMerkleRoot(len(batchOps) != 0) + s.logMerkleRoot() + return nil } func (*state) writePendingStakers(batchOps *[]database.BatchOp, pendingData map[ids.ID]*stakersData) error { @@ -1812,9 +1824,7 @@ func (s *state) writeMetadata(batchOps *[]database.BatchOp) error { // TODO: Consider if it should be for subnetID, supply := range s.modifiedSupplies { - supply := supply delete(s.modifiedSupplies, subnetID) // clear up s.supplies to avoid potential double commits - key := merkleSuppliesKey(subnetID) *batchOps = append(*batchOps, database.BatchOp{ Key: key, @@ -1909,38 +1919,26 @@ func (s *state) updateValidatorSet( return nil } -func (s *state) logMerkleRoot(hasChanges bool) error { +func (s *state) logMerkleRoot() { // get current Height blk, err := s.GetStatelessBlock(s.GetLastAccepted()) if err != nil { // may happen in tests. Let's just skip - return nil + s.ctx.Log.Error("failed to get last accepted block", zap.Error(err)) + return } - if !hasChanges { - s.ctx.Log.Info("merkle root", - zap.Uint64("height", blk.Height()), - zap.Stringer("blkID", blk.ID()), - zap.String("merkle root", "no changes to merkle state"), - ) - return nil - } - - view, err := s.merkleDB.NewView(context.TODO(), merkledb.ViewChanges{}) + rootID, err := s.merkleDB.GetMerkleRoot(context.Background()) if err != nil { - return fmt.Errorf("failed creating merkleDB view: %w", err) - } - root, err := view.GetMerkleRoot(context.TODO()) - if err != nil { - return fmt.Errorf("failed pulling merkle root: %w", err) + s.ctx.Log.Error("failed to get merkle root", zap.Error(err)) + return } s.ctx.Log.Info("merkle root", zap.Uint64("height", blk.Height()), zap.Stringer("blkID", blk.ID()), - zap.String("merkle root", root.String()), + zap.Stringer("merkle root", rootID), ) - return nil } func (s *state) GetUptime(vdrID ids.NodeID, subnetID ids.ID) (upDuration time.Duration, lastUpdated time.Time, err error) { From 0b875aba4ade6cc50c0fd88809abc4d0855b8e1a Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 5 Dec 2023 10:27:03 -0500 Subject: [PATCH 114/132] P-Chain merkledb -- include txs in merkleized state (#2398) --- vms/platformvm/state/state.go | 12 +++++------- vms/platformvm/state/state_helpers.go | 7 +++++++ vms/platformvm/state/state_helpers_test.go | 10 ++++++++++ 3 files changed, 22 insertions(+), 7 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 90be03bf1727..ed2598ce974a 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -63,7 +63,7 @@ var ( merkleSingletonPrefix = []byte{0x01} merkleBlockPrefix = []byte{0x02} merkleBlockIDsPrefix = []byte{0x03} - merkleTxPrefix = []byte{0x04} + merkleTxsPrefix = []byte{0x04} merkleIndexUTXOsPrefix = []byte{0x05} // to serve UTXOIDs(addr) merkleUptimesPrefix = []byte{0x06} // locally measured uptimes merkleWeightDiffPrefix = []byte{0x07} // non-merkleized validators weight diff. TODO: should we merkleize them? @@ -277,7 +277,6 @@ type state struct { // FIND a way to reduce use of these. No use in verification of addedTxs // a limited windows to support APIs addedTxs map[ids.ID]*txAndStatus // map of txID -> {*txs.Tx, Status} - txDB database.Database indexedUTXOsDB database.Database @@ -365,7 +364,6 @@ func newState( singletonDB = prefixdb.New(merkleSingletonPrefix, baseDB) blockDB = prefixdb.New(merkleBlockPrefix, baseDB) blockIDsDB = prefixdb.New(merkleBlockIDsPrefix, baseDB) - txDB = prefixdb.New(merkleTxPrefix, baseDB) indexedUTXOsDB = prefixdb.New(merkleIndexUTXOsPrefix, baseDB) localUptimesDB = prefixdb.New(merkleUptimesPrefix, baseDB) flatValidatorWeightDiffsDB = prefixdb.New(merkleWeightDiffPrefix, baseDB) @@ -425,7 +423,6 @@ func newState( blockIDDB: blockIDsDB, addedTxs: make(map[ids.ID]*txAndStatus), - txDB: txDB, indexedUTXOsDB: indexedUTXOsDB, @@ -640,7 +637,8 @@ func (s *state) GetTx(txID ids.ID) (*txs.Tx, status.Status, error) { return tx.tx, tx.status, nil } - txBytes, err := s.txDB.Get(txID[:]) + key := merkleTxKey(txID) + txBytes, err := s.merkleDB.Get(key) if err != nil { return nil, status.Unknown, err } @@ -1189,7 +1187,6 @@ func (s *state) Close() error { s.flatValidatorPublicKeyDiffsDB.Close(), s.localUptimesDB.Close(), s.indexedUTXOsDB.Close(), - s.txDB.Close(), s.blockDB.Close(), s.blockIDDB.Close(), s.merkleDB.Close(), @@ -1635,7 +1632,8 @@ func (s *state) writeTxs() error { // Note: Evict is used rather than Put here because stx may end up // referencing additional data (because of shared byte slices) that // would not be properly accounted for in the cache sizing. - if err := s.txDB.Put(txID[:], txBytes); err != nil { + key := merkleTxKey(txID) + if err := s.merkleDB.Put(key, txBytes); err != nil { return fmt.Errorf("failed to add tx: %w", err) } } diff --git a/vms/platformvm/state/state_helpers.go b/vms/platformvm/state/state_helpers.go index cf8b165e2698..91532169c663 100644 --- a/vms/platformvm/state/state_helpers.go +++ b/vms/platformvm/state/state_helpers.go @@ -128,3 +128,10 @@ func merkleSubnetOwnersKey(subnetID ids.ID) []byte { copy(key[len(delegateeRewardsPrefix):], subnetID[:]) return key } + +func merkleTxKey(txID ids.ID) []byte { + key := make([]byte, len(merkleTxsPrefix)+ids.IDLen) + copy(key, merkleTxsPrefix) + copy(key[len(merkleTxsPrefix):], txID[:]) + return key +} diff --git a/vms/platformvm/state/state_helpers_test.go b/vms/platformvm/state/state_helpers_test.go index 00547c9c8d93..1905c8f782f9 100644 --- a/vms/platformvm/state/state_helpers_test.go +++ b/vms/platformvm/state/state_helpers_test.go @@ -140,3 +140,13 @@ func TestDelegateeRewardsKey(t *testing.T) { require.Equal(nodeID[:], key[len(prefix):len(prefix)+len(nodeID[:])]) require.Equal(subnetID[:], key[len(prefix)+len(nodeID[:]):]) } + +func TestMerkleTxKey(t *testing.T) { + require := require.New(t) + txID := ids.GenerateTestID() + + key := merkleTxKey(txID) + require.Len(key, len(merkleTxsPrefix)+len(txID[:])) + require.Equal(merkleTxsPrefix, key[:len(merkleTxsPrefix)]) + require.Equal(txID[:], key[len(merkleTxsPrefix):]) +} From a7432268cdfc441f1c4374fcf05227ebc509f7d5 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 12 Dec 2023 11:39:01 -0500 Subject: [PATCH 115/132] nit --- vms/platformvm/state/state.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 16da372c94b0..44290a2a34df 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -1054,7 +1054,7 @@ func (s *state) loadCurrentStakers() error { tx, err := txs.Parse(txs.GenesisCodec, data.TxBytes) if err != nil { - return fmt.Errorf("failed to parsing current stakerTx: %w", err) + return fmt.Errorf("failed to parse current stakerTx: %w", err) } stakerTx, ok := tx.Unsigned.(txs.ScheduledStaker) if !ok { From ddb34e3f1702f8bc3ea2f532e2cb4e110ac8cc61 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 12 Dec 2023 11:39:52 -0500 Subject: [PATCH 116/132] remove unneeded var --- vms/platformvm/state/state.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 44290a2a34df..758b6c3a61f8 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -1041,10 +1041,7 @@ func (s *state) loadCurrentStakers() error { // TODO ABENEGIA: Check missing metadata s.currentStakers = newBaseStakers() - prefix := make([]byte, len(currentStakersSectionPrefix)) - copy(prefix, currentStakersSectionPrefix) - - iter := s.merkleDB.NewIteratorWithPrefix(prefix) + iter := s.merkleDB.NewIteratorWithPrefix(currentStakersSectionPrefix) defer iter.Release() for iter.Next() { data := &stakersData{} From 798debf7a10b372dd8d050cf6a9fd7dfc33a3ae0 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 12 Dec 2023 11:40:18 -0500 Subject: [PATCH 117/132] nit --- vms/platformvm/state/state.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 758b6c3a61f8..906c101c0c95 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -1044,7 +1044,7 @@ func (s *state) loadCurrentStakers() error { iter := s.merkleDB.NewIteratorWithPrefix(currentStakersSectionPrefix) defer iter.Release() for iter.Next() { - data := &stakersData{} + var data stakersData if _, err := txs.GenesisCodec.Unmarshal(iter.Value(), data); err != nil { return fmt.Errorf("failed to deserialize current stakers data: %w", err) } From 1357e389487c04c9c137865f985af91d9438f7be Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 12 Dec 2023 11:56:30 -0500 Subject: [PATCH 118/132] update comments --- vms/platformvm/state/state.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 906c101c0c95..445e3828b929 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -916,7 +916,7 @@ func (s *state) ApplyValidatorPublicKeyDiffs( return diffIter.Error() } -// Loads the state from [genesisBls] and [genesis] into [ms]. +// Loads the state from [genesisBls] and [genesis] into [s]. func (s *state) syncGenesis(genesisBlk block.Block, genesis *genesis.Genesis) error { genesisBlkID := genesisBlk.ID() s.SetLastAccepted(genesisBlkID) @@ -1016,7 +1016,7 @@ func (s *state) load() error { } // Loads the chain time and last accepted block ID from disk -// and populates them in [ms]. +// and populates them in [s]. func (s *state) loadMerkleMetadata() error { // load chain time chainTimeBytes, err := s.merkleDB.Get(merkleChainTimeKey) @@ -1036,7 +1036,7 @@ func (s *state) loadMerkleMetadata() error { return nil } -// Loads current stakes from disk and populates them in [ms]. +// Loads current stakes from disk and populates them in [s]. func (s *state) loadCurrentStakers() error { // TODO ABENEGIA: Check missing metadata s.currentStakers = newBaseStakers() @@ -1195,8 +1195,8 @@ func (s *state) Close() error { ) } -// If [ms] isn't initialized, initializes it with [genesis]. -// Then loads [ms] from disk. +// If [s] isn't initialized, initializes it with [genesis]. +// Then loads [s] from disk. func (s *state) sync(genesis []byte) error { shouldInit, err := s.shouldInit() if err != nil { @@ -1220,7 +1220,7 @@ func (s *state) sync(genesis []byte) error { return s.load() } -// Creates a genesis from [genesisBytes] and initializes [ms] with it. +// Creates a genesis from [genesisBytes] and initializes [s] with it. func (s *state) init(genesisBytes []byte) error { // Create the genesis block and save it as being accepted (We don't do // genesisBlock.Accept() because then it'd look for genesisBlock's From 6bce2f1c93b19d7bedcfa78bf0f2ec81688f3979 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 12 Dec 2023 12:03:12 -0500 Subject: [PATCH 119/132] remove unused code --- vms/platformvm/state/metadata_validator.go | 38 ------ .../state/metadata_validator_test.go | 129 ------------------ 2 files changed, 167 deletions(-) diff --git a/vms/platformvm/state/metadata_validator.go b/vms/platformvm/state/metadata_validator.go index 6b839ccad801..6bf4217c50d8 100644 --- a/vms/platformvm/state/metadata_validator.go +++ b/vms/platformvm/state/metadata_validator.go @@ -37,44 +37,6 @@ type validatorMetadata struct { lastUpdated time.Time } -// Permissioned validators originally wrote their values as nil. -// With Banff we wrote the potential reward. -// With Cortina we wrote the potential reward with the potential delegatee reward. -// We now write the uptime, reward, and delegatee reward together. -func parseValidatorMetadata(bytes []byte, metadata *validatorMetadata) error { - switch len(bytes) { - case 0: - // nothing was stored - - case database.Uint64Size: - // only potential reward was stored - var err error - metadata.PotentialReward, err = database.ParseUInt64(bytes) - if err != nil { - return err - } - - case preDelegateeRewardSize: - // potential reward and uptime was stored but potential delegatee reward - // was not - tmp := preDelegateeRewardMetadata{} - if _, err := metadataCodec.Unmarshal(bytes, &tmp); err != nil { - return err - } - - metadata.UpDuration = tmp.UpDuration - metadata.LastUpdated = tmp.LastUpdated - metadata.PotentialReward = tmp.PotentialReward - default: - // everything was stored - if _, err := metadataCodec.Unmarshal(bytes, metadata); err != nil { - return err - } - } - metadata.lastUpdated = time.Unix(int64(metadata.LastUpdated), 0) - return nil -} - type validatorState interface { // LoadValidatorMetadata sets the [metadata] of [vdrID] on [subnetID]. // GetUptime and SetUptime will return an error if the [vdrID] and diff --git a/vms/platformvm/state/metadata_validator_test.go b/vms/platformvm/state/metadata_validator_test.go index 68f18e62bd72..64e5d8f264c9 100644 --- a/vms/platformvm/state/metadata_validator_test.go +++ b/vms/platformvm/state/metadata_validator_test.go @@ -9,11 +9,9 @@ import ( "github.com/stretchr/testify/require" - "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/wrappers" ) func TestValidatorUptimes(t *testing.T) { @@ -169,130 +167,3 @@ func TestValidatorDelegateeRewards(t *testing.T) { _, _, err = state.GetUptime(nodeID, subnetID) require.ErrorIs(err, database.ErrNotFound) } - -func TestParseValidatorMetadata(t *testing.T) { - type test struct { - name string - bytes []byte - expected *validatorMetadata - expectedErr error - } - tests := []test{ - { - name: "nil", - bytes: nil, - expected: &validatorMetadata{ - lastUpdated: time.Unix(0, 0), - }, - expectedErr: nil, - }, - { - name: "nil", - bytes: []byte{}, - expected: &validatorMetadata{ - lastUpdated: time.Unix(0, 0), - }, - expectedErr: nil, - }, - { - name: "potential reward only", - bytes: []byte{ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x86, 0xA0, - }, - expected: &validatorMetadata{ - PotentialReward: 100000, - lastUpdated: time.Unix(0, 0), - }, - expectedErr: nil, - }, - { - name: "uptime + potential reward", - bytes: []byte{ - // codec version - 0x00, 0x00, - // up duration - 0x00, 0x00, 0x00, 0x00, 0x00, 0x5B, 0x8D, 0x80, - // last updated - 0x00, 0x00, 0x00, 0x00, 0x00, 0x0D, 0xBB, 0xA0, - // potential reward - 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x86, 0xA0, - }, - expected: &validatorMetadata{ - UpDuration: 6000000, - LastUpdated: 900000, - PotentialReward: 100000, - lastUpdated: time.Unix(900000, 0), - }, - expectedErr: nil, - }, - { - name: "uptime + potential reward + potential delegatee reward", - bytes: []byte{ - // codec version - 0x00, 0x00, - // up duration - 0x00, 0x00, 0x00, 0x00, 0x00, 0x5B, 0x8D, 0x80, - // last updated - 0x00, 0x00, 0x00, 0x00, 0x00, 0x0D, 0xBB, 0xA0, - // potential reward - 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x86, 0xA0, - // potential delegatee reward - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4E, 0x20, - }, - expected: &validatorMetadata{ - UpDuration: 6000000, - LastUpdated: 900000, - PotentialReward: 100000, - PotentialDelegateeReward: 20000, - lastUpdated: time.Unix(900000, 0), - }, - expectedErr: nil, - }, - { - name: "invalid codec version", - bytes: []byte{ - // codec version - 0x00, 0x01, - // up duration - 0x00, 0x00, 0x00, 0x00, 0x00, 0x5B, 0x8D, 0x80, - // last updated - 0x00, 0x00, 0x00, 0x00, 0x00, 0x0D, 0xBB, 0xA0, - // potential reward - 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x86, 0xA0, - // potential delegatee reward - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4E, 0x20, - }, - expected: nil, - expectedErr: codec.ErrUnknownVersion, - }, - { - name: "short byte len", - bytes: []byte{ - // codec version - 0x00, 0x00, - // up duration - 0x00, 0x00, 0x00, 0x00, 0x00, 0x5B, 0x8D, 0x80, - // last updated - 0x00, 0x00, 0x00, 0x00, 0x00, 0x0D, 0xBB, 0xA0, - // potential reward - 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x86, 0xA0, - // potential delegatee reward - 0x00, 0x00, 0x00, 0x00, 0x4E, 0x20, - }, - expected: nil, - expectedErr: wrappers.ErrInsufficientLength, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - require := require.New(t) - var metadata validatorMetadata - err := parseValidatorMetadata(tt.bytes, &metadata) - require.ErrorIs(err, tt.expectedErr) - if tt.expectedErr != nil { - return - } - require.Equal(tt.expected, &metadata) - }) - } -} From 9637244bc83737aa7255bb310dfbf6ab178e8dc1 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 12 Dec 2023 12:14:30 -0500 Subject: [PATCH 120/132] nits --- vms/platformvm/state/state.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 445e3828b929..84254cc1a290 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -1036,7 +1036,7 @@ func (s *state) loadMerkleMetadata() error { return nil } -// Loads current stakes from disk and populates them in [s]. +// Loads current stakers from disk and populates them in [s]. func (s *state) loadCurrentStakers() error { // TODO ABENEGIA: Check missing metadata s.currentStakers = newBaseStakers() @@ -1062,13 +1062,13 @@ func (s *state) loadCurrentStakers() error { if err != nil { return err } + + validator := s.currentStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID) if staker.Priority.IsValidator() { // TODO: why not PutValidator/PutDelegator?? - validator := s.currentStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID) validator.validator = staker s.currentStakers.stakers.ReplaceOrInsert(staker) } else { - validator := s.currentStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID) if validator.delegators == nil { validator.delegators = btree.NewG(defaultTreeDegree, (*Staker).Less) } @@ -1325,10 +1325,10 @@ func (s *state) GetBlockIDAtHeight(height uint64) (ids.ID, error) { } func (*state) writeCurrentStakers(batchOps *[]database.BatchOp, currentData map[ids.ID]*stakersData) error { - for stakerTxID, data := range currentData { + for stakerTxID, stakerData := range currentData { key := merkleCurrentStakersKey(stakerTxID) - if data.TxBytes == nil { + if stakerData.TxBytes == nil { *batchOps = append(*batchOps, database.BatchOp{ Key: key, Delete: true, @@ -1336,7 +1336,7 @@ func (*state) writeCurrentStakers(batchOps *[]database.BatchOp, currentData map[ continue } - dataBytes, err := txs.GenesisCodec.Marshal(txs.Version, data) + dataBytes, err := txs.GenesisCodec.Marshal(txs.Version, stakerData) if err != nil { return fmt.Errorf("failed to serialize current stakers data, stakerTxID %v: %w", stakerTxID, err) } From c27f7938e3989508451d68965c9559fdb466333f Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 12 Dec 2023 12:21:52 -0500 Subject: [PATCH 121/132] nit --- vms/platformvm/state/state.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 84254cc1a290..eb0e1acfc10b 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -1953,13 +1953,12 @@ func (s *state) GetUptime(vdrID ids.NodeID, subnetID ids.ID) (upDuration time.Du uptimeBytes, err := s.localUptimesDB.Get(key) switch err { case nil: - upTm := &uptimes{} - if _, err := txs.GenesisCodec.Unmarshal(uptimeBytes, upTm); err != nil { + var uptime uptimes + if _, err := txs.GenesisCodec.Unmarshal(uptimeBytes, uptime); err != nil { return 0, time.Time{}, err } - upTm.lastUpdated = time.Unix(int64(upTm.LastUpdated), 0) - return upTm.Duration, upTm.lastUpdated, nil - + uptime.lastUpdated = time.Unix(int64(uptime.LastUpdated), 0) + return uptime.Duration, uptime.lastUpdated, nil case database.ErrNotFound: // no local data for this staker uptime return 0, time.Time{}, database.ErrNotFound From 9bb088ed8420b0064d581c06a785f79bac0e545c Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 12 Dec 2023 12:29:27 -0500 Subject: [PATCH 122/132] appease linter; add missing reference --- vms/platformvm/state/metadata_validator.go | 13 ------------- vms/platformvm/state/state.go | 2 +- 2 files changed, 1 insertion(+), 14 deletions(-) diff --git a/vms/platformvm/state/metadata_validator.go b/vms/platformvm/state/metadata_validator.go index 6bf4217c50d8..9276299f31c8 100644 --- a/vms/platformvm/state/metadata_validator.go +++ b/vms/platformvm/state/metadata_validator.go @@ -10,23 +10,10 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/utils/wrappers" ) -// preDelegateeRewardSize is the size of codec marshalling -// [preDelegateeRewardMetadata]. -// -// CodecVersionLen + UpDurationLen + LastUpdatedLen + PotentialRewardLen -const preDelegateeRewardSize = wrappers.ShortLen + 3*wrappers.LongLen - var _ validatorState = (*metadata)(nil) -type preDelegateeRewardMetadata struct { - UpDuration time.Duration `v0:"true"` - LastUpdated uint64 `v0:"true"` // Unix time in seconds - PotentialReward uint64 `v0:"true"` -} - type validatorMetadata struct { UpDuration time.Duration `v0:"true"` LastUpdated uint64 `v0:"true"` // Unix time in seconds diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index eb0e1acfc10b..0f88ac07897e 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -1954,7 +1954,7 @@ func (s *state) GetUptime(vdrID ids.NodeID, subnetID ids.ID) (upDuration time.Du switch err { case nil: var uptime uptimes - if _, err := txs.GenesisCodec.Unmarshal(uptimeBytes, uptime); err != nil { + if _, err := txs.GenesisCodec.Unmarshal(uptimeBytes, &uptime); err != nil { return 0, time.Time{}, err } uptime.lastUpdated = time.Unix(int64(uptime.LastUpdated), 0) From 1b5d590f97b14a067939344291c4b7dd1b95b456 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 12 Dec 2023 12:46:37 -0500 Subject: [PATCH 123/132] remove dead code --- vms/platformvm/state/metadata_codec.go | 28 --- vms/platformvm/state/metadata_validator.go | 212 ------------------ .../state/metadata_validator_test.go | 169 -------------- 3 files changed, 409 deletions(-) delete mode 100644 vms/platformvm/state/metadata_codec.go delete mode 100644 vms/platformvm/state/metadata_validator.go delete mode 100644 vms/platformvm/state/metadata_validator_test.go diff --git a/vms/platformvm/state/metadata_codec.go b/vms/platformvm/state/metadata_codec.go deleted file mode 100644 index 6240bbd879ca..000000000000 --- a/vms/platformvm/state/metadata_codec.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package state - -import ( - "math" - - "github.com/ava-labs/avalanchego/codec" - "github.com/ava-labs/avalanchego/codec/linearcodec" -) - -const ( - v0tag = "v0" - v0 = uint16(0) -) - -var metadataCodec codec.Manager - -func init() { - c := linearcodec.New([]string{v0tag}, math.MaxInt32) - metadataCodec = codec.NewManager(math.MaxInt32) - - err := metadataCodec.RegisterCodec(v0, c) - if err != nil { - panic(err) - } -} diff --git a/vms/platformvm/state/metadata_validator.go b/vms/platformvm/state/metadata_validator.go deleted file mode 100644 index 9276299f31c8..000000000000 --- a/vms/platformvm/state/metadata_validator.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package state - -import ( - "time" - - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/set" -) - -var _ validatorState = (*metadata)(nil) - -type validatorMetadata struct { - UpDuration time.Duration `v0:"true"` - LastUpdated uint64 `v0:"true"` // Unix time in seconds - PotentialReward uint64 `v0:"true"` - PotentialDelegateeReward uint64 `v0:"true"` - - txID ids.ID - lastUpdated time.Time -} - -type validatorState interface { - // LoadValidatorMetadata sets the [metadata] of [vdrID] on [subnetID]. - // GetUptime and SetUptime will return an error if the [vdrID] and - // [subnetID] hasn't been loaded. This call will not result in a write to - // disk. - LoadValidatorMetadata( - vdrID ids.NodeID, - subnetID ids.ID, - metadata *validatorMetadata, - ) - - // GetUptime returns the current uptime measurements of [vdrID] on - // [subnetID]. - GetUptime( - vdrID ids.NodeID, - subnetID ids.ID, - ) (upDuration time.Duration, lastUpdated time.Time, err error) - - // SetUptime updates the uptime measurements of [vdrID] on [subnetID]. - // Unless these measurements are deleted first, the next call to - // WriteUptimes will write this update to disk. - SetUptime( - vdrID ids.NodeID, - subnetID ids.ID, - upDuration time.Duration, - lastUpdated time.Time, - ) error - - // GetDelegateeReward returns the current rewards accrued to [vdrID] on - // [subnetID]. - GetDelegateeReward( - subnetID ids.ID, - vdrID ids.NodeID, - ) (amount uint64, err error) - - // SetDelegateeReward updates the rewards accrued to [vdrID] on [subnetID]. - // Unless these measurements are deleted first, the next call to - // WriteUptimes will write this update to disk. - SetDelegateeReward( - subnetID ids.ID, - vdrID ids.NodeID, - amount uint64, - ) error - - // DeleteValidatorMetadata removes in-memory references to the metadata of - // [vdrID] on [subnetID]. If there were staged updates from a prior call to - // SetUptime or SetDelegateeReward, the updates will be dropped. This call - // will not result in a write to disk. - DeleteValidatorMetadata(vdrID ids.NodeID, subnetID ids.ID) - - // WriteValidatorMetadata writes all staged updates from prior calls to - // SetUptime or SetDelegateeReward. - WriteValidatorMetadata( - dbPrimary database.KeyValueWriter, - dbSubnet database.KeyValueWriter, - ) error -} - -type metadata struct { - metadata map[ids.NodeID]map[ids.ID]*validatorMetadata // vdrID -> subnetID -> metadata - // updatedMetadata tracks the updates since WriteValidatorMetadata was last called - updatedMetadata map[ids.NodeID]set.Set[ids.ID] // vdrID -> subnetIDs -} - -func newValidatorState() validatorState { - return &metadata{ - metadata: make(map[ids.NodeID]map[ids.ID]*validatorMetadata), - updatedMetadata: make(map[ids.NodeID]set.Set[ids.ID]), - } -} - -func (m *metadata) LoadValidatorMetadata( - vdrID ids.NodeID, - subnetID ids.ID, - uptime *validatorMetadata, -) { - subnetMetadata, ok := m.metadata[vdrID] - if !ok { - subnetMetadata = make(map[ids.ID]*validatorMetadata) - m.metadata[vdrID] = subnetMetadata - } - subnetMetadata[subnetID] = uptime -} - -func (m *metadata) GetUptime( - vdrID ids.NodeID, - subnetID ids.ID, -) (time.Duration, time.Time, error) { - metadata, exists := m.metadata[vdrID][subnetID] - if !exists { - return 0, time.Time{}, database.ErrNotFound - } - return metadata.UpDuration, metadata.lastUpdated, nil -} - -func (m *metadata) SetUptime( - vdrID ids.NodeID, - subnetID ids.ID, - upDuration time.Duration, - lastUpdated time.Time, -) error { - metadata, exists := m.metadata[vdrID][subnetID] - if !exists { - return database.ErrNotFound - } - metadata.UpDuration = upDuration - metadata.lastUpdated = lastUpdated - - m.addUpdatedMetadata(vdrID, subnetID) - return nil -} - -func (m *metadata) GetDelegateeReward( - subnetID ids.ID, - vdrID ids.NodeID, -) (uint64, error) { - metadata, exists := m.metadata[vdrID][subnetID] - if !exists { - return 0, database.ErrNotFound - } - return metadata.PotentialDelegateeReward, nil -} - -func (m *metadata) SetDelegateeReward( - subnetID ids.ID, - vdrID ids.NodeID, - amount uint64, -) error { - metadata, exists := m.metadata[vdrID][subnetID] - if !exists { - return database.ErrNotFound - } - metadata.PotentialDelegateeReward = amount - - m.addUpdatedMetadata(vdrID, subnetID) - return nil -} - -func (m *metadata) DeleteValidatorMetadata(vdrID ids.NodeID, subnetID ids.ID) { - subnetMetadata := m.metadata[vdrID] - delete(subnetMetadata, subnetID) - if len(subnetMetadata) == 0 { - delete(m.metadata, vdrID) - } - - subnetUpdatedMetadata := m.updatedMetadata[vdrID] - subnetUpdatedMetadata.Remove(subnetID) - if subnetUpdatedMetadata.Len() == 0 { - delete(m.updatedMetadata, vdrID) - } -} - -func (m *metadata) WriteValidatorMetadata( - dbPrimary database.KeyValueWriter, - dbSubnet database.KeyValueWriter, -) error { - for vdrID, updatedSubnets := range m.updatedMetadata { - for subnetID := range updatedSubnets { - metadata := m.metadata[vdrID][subnetID] - metadata.LastUpdated = uint64(metadata.lastUpdated.Unix()) - - metadataBytes, err := metadataCodec.Marshal(v0, metadata) - if err != nil { - return err - } - db := dbSubnet - if subnetID == constants.PrimaryNetworkID { - db = dbPrimary - } - if err := db.Put(metadata.txID[:], metadataBytes); err != nil { - return err - } - } - delete(m.updatedMetadata, vdrID) - } - return nil -} - -func (m *metadata) addUpdatedMetadata(vdrID ids.NodeID, subnetID ids.ID) { - updatedSubnetMetadata, ok := m.updatedMetadata[vdrID] - if !ok { - updatedSubnetMetadata = set.Set[ids.ID]{} - m.updatedMetadata[vdrID] = updatedSubnetMetadata - } - updatedSubnetMetadata.Add(subnetID) -} diff --git a/vms/platformvm/state/metadata_validator_test.go b/vms/platformvm/state/metadata_validator_test.go deleted file mode 100644 index 64e5d8f264c9..000000000000 --- a/vms/platformvm/state/metadata_validator_test.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package state - -import ( - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/memdb" - "github.com/ava-labs/avalanchego/ids" -) - -func TestValidatorUptimes(t *testing.T) { - require := require.New(t) - state := newValidatorState() - - // get non-existent uptime - nodeID := ids.GenerateTestNodeID() - subnetID := ids.GenerateTestID() - _, _, err := state.GetUptime(nodeID, subnetID) - require.ErrorIs(err, database.ErrNotFound) - - // set non-existent uptime - err = state.SetUptime(nodeID, subnetID, 1, time.Now()) - require.ErrorIs(err, database.ErrNotFound) - - testMetadata := &validatorMetadata{ - UpDuration: time.Hour, - lastUpdated: time.Now(), - } - // load uptime - state.LoadValidatorMetadata(nodeID, subnetID, testMetadata) - - // get uptime - upDuration, lastUpdated, err := state.GetUptime(nodeID, subnetID) - require.NoError(err) - require.Equal(testMetadata.UpDuration, upDuration) - require.Equal(testMetadata.lastUpdated, lastUpdated) - - // set uptime - newUpDuration := testMetadata.UpDuration + 1 - newLastUpdated := testMetadata.lastUpdated.Add(time.Hour) - require.NoError(state.SetUptime(nodeID, subnetID, newUpDuration, newLastUpdated)) - - // get new uptime - upDuration, lastUpdated, err = state.GetUptime(nodeID, subnetID) - require.NoError(err) - require.Equal(newUpDuration, upDuration) - require.Equal(newLastUpdated, lastUpdated) - - // load uptime changes uptimes - newTestMetadata := &validatorMetadata{ - UpDuration: testMetadata.UpDuration + time.Hour, - lastUpdated: testMetadata.lastUpdated.Add(time.Hour), - } - state.LoadValidatorMetadata(nodeID, subnetID, newTestMetadata) - - // get new uptime - upDuration, lastUpdated, err = state.GetUptime(nodeID, subnetID) - require.NoError(err) - require.Equal(newTestMetadata.UpDuration, upDuration) - require.Equal(newTestMetadata.lastUpdated, lastUpdated) - - // delete uptime - state.DeleteValidatorMetadata(nodeID, subnetID) - - // get deleted uptime - _, _, err = state.GetUptime(nodeID, subnetID) - require.ErrorIs(err, database.ErrNotFound) -} - -func TestWriteValidatorMetadata(t *testing.T) { - require := require.New(t) - state := newValidatorState() - - primaryDB := memdb.New() - subnetDB := memdb.New() - // write empty uptimes - require.NoError(state.WriteValidatorMetadata(primaryDB, subnetDB)) - - // load uptime - nodeID := ids.GenerateTestNodeID() - subnetID := ids.GenerateTestID() - testUptimeReward := &validatorMetadata{ - UpDuration: time.Hour, - lastUpdated: time.Now(), - PotentialReward: 100, - txID: ids.GenerateTestID(), - } - state.LoadValidatorMetadata(nodeID, subnetID, testUptimeReward) - - // write state, should not reflect to DB yet - require.NoError(state.WriteValidatorMetadata(primaryDB, subnetDB)) - require.False(primaryDB.Has(testUptimeReward.txID[:])) - require.False(subnetDB.Has(testUptimeReward.txID[:])) - - // get uptime should still return the loaded value - upDuration, lastUpdated, err := state.GetUptime(nodeID, subnetID) - require.NoError(err) - require.Equal(testUptimeReward.UpDuration, upDuration) - require.Equal(testUptimeReward.lastUpdated, lastUpdated) - - // update uptimes - newUpDuration := testUptimeReward.UpDuration + 1 - newLastUpdated := testUptimeReward.lastUpdated.Add(time.Hour) - require.NoError(state.SetUptime(nodeID, subnetID, newUpDuration, newLastUpdated)) - - // write uptimes, should reflect to subnet DB - require.NoError(state.WriteValidatorMetadata(primaryDB, subnetDB)) - require.False(primaryDB.Has(testUptimeReward.txID[:])) - require.True(subnetDB.Has(testUptimeReward.txID[:])) -} - -func TestValidatorDelegateeRewards(t *testing.T) { - require := require.New(t) - state := newValidatorState() - - // get non-existent delegatee reward - nodeID := ids.GenerateTestNodeID() - subnetID := ids.GenerateTestID() - _, err := state.GetDelegateeReward(subnetID, nodeID) - require.ErrorIs(err, database.ErrNotFound) - - // set non-existent delegatee reward - err = state.SetDelegateeReward(subnetID, nodeID, 100000) - require.ErrorIs(err, database.ErrNotFound) - - testMetadata := &validatorMetadata{ - PotentialDelegateeReward: 100000, - } - // load delegatee reward - state.LoadValidatorMetadata(nodeID, subnetID, testMetadata) - - // get delegatee reward - delegateeReward, err := state.GetDelegateeReward(subnetID, nodeID) - require.NoError(err) - require.Equal(testMetadata.PotentialDelegateeReward, delegateeReward) - - // set delegatee reward - newDelegateeReward := testMetadata.PotentialDelegateeReward + 100000 - require.NoError(state.SetDelegateeReward(subnetID, nodeID, newDelegateeReward)) - - // get new delegatee reward - delegateeReward, err = state.GetDelegateeReward(subnetID, nodeID) - require.NoError(err) - require.Equal(newDelegateeReward, delegateeReward) - - // load delegatee reward changes - newTestMetadata := &validatorMetadata{ - PotentialDelegateeReward: testMetadata.PotentialDelegateeReward + 100000, - } - state.LoadValidatorMetadata(nodeID, subnetID, newTestMetadata) - - // get new delegatee reward - delegateeReward, err = state.GetDelegateeReward(subnetID, nodeID) - require.NoError(err) - require.Equal(newTestMetadata.PotentialDelegateeReward, delegateeReward) - - // delete delegatee reward - state.DeleteValidatorMetadata(nodeID, subnetID) - - // get deleted delegatee reward - _, _, err = state.GetUptime(nodeID, subnetID) - require.ErrorIs(err, database.ErrNotFound) -} From 35be4bba5047e87cb31e20bbc1a28d8b315b1449 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 12 Dec 2023 12:48:35 -0500 Subject: [PATCH 124/132] comment --- vms/platformvm/state/state_helpers.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vms/platformvm/state/state_helpers.go b/vms/platformvm/state/state_helpers.go index 91532169c663..3dde278d18e9 100644 --- a/vms/platformvm/state/state_helpers.go +++ b/vms/platformvm/state/state_helpers.go @@ -19,7 +19,7 @@ type uptimes struct { } type stakersData struct { - TxBytes []byte `serialize:"true"` // nit signals remove + TxBytes []byte `serialize:"true"` // nil means the staker is removed PotentialReward uint64 `serialize:"true"` } From b6e52fb382e79b29ac7606717648f61c45e5f22c Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 12 Dec 2023 12:49:36 -0500 Subject: [PATCH 125/132] nits --- vms/platformvm/state/state.go | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 0f88ac07897e..1a1a083addbc 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -1470,9 +1470,7 @@ func (s *state) processCurrentStakers() ( } for _, staker := range validatorDiff.deletedDelegators { - txID := staker.TxID - - outputStakers[txID] = &stakersData{ + outputStakers[staker.TxID] = &stakersData{ TxBytes: nil, } if err := outputWeights[weightKey].Add(true, staker.Weight); err != nil { @@ -1524,8 +1522,7 @@ func (s *state) processPendingStakers() (map[ids.ID]*stakersData, error) { } for _, staker := range validatorDiff.deletedDelegators { - txID := staker.TxID - output[txID] = &stakersData{ + output[staker.TxID] = &stakersData{ TxBytes: nil, } } From f552616c2c5944cef224f341d504c1168010d430 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 12 Dec 2023 13:25:13 -0500 Subject: [PATCH 126/132] rename uptimes to uptimeMetadata --- vms/platformvm/state/state.go | 10 +++++----- vms/platformvm/state/state_helpers.go | 3 +-- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 1a1a083addbc..0ef47949a344 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -281,7 +281,7 @@ type state struct { indexedUTXOsDB database.Database // Node ID --> SubnetID --> Uptime of the node on the subnet - modifiedLocalUptimes map[ids.NodeID]map[ids.ID]*uptimes + modifiedLocalUptimes map[ids.NodeID]map[ids.ID]*uptimeMetadata localUptimesDB database.Database flatValidatorWeightDiffsDB database.Database @@ -426,7 +426,7 @@ func newState( indexedUTXOsDB: indexedUTXOsDB, - modifiedLocalUptimes: make(map[ids.NodeID]map[ids.ID]*uptimes), + modifiedLocalUptimes: make(map[ids.NodeID]map[ids.ID]*uptimeMetadata), localUptimesDB: localUptimesDB, flatValidatorWeightDiffsDB: flatValidatorWeightDiffsDB, @@ -1950,7 +1950,7 @@ func (s *state) GetUptime(vdrID ids.NodeID, subnetID ids.ID) (upDuration time.Du uptimeBytes, err := s.localUptimesDB.Get(key) switch err { case nil: - var uptime uptimes + var uptime uptimeMetadata if _, err := txs.GenesisCodec.Unmarshal(uptimeBytes, &uptime); err != nil { return 0, time.Time{}, err } @@ -1967,10 +1967,10 @@ func (s *state) GetUptime(vdrID ids.NodeID, subnetID ids.ID) (upDuration time.Du func (s *state) SetUptime(vdrID ids.NodeID, subnetID ids.ID, upDuration time.Duration, lastUpdated time.Time) error { updatedNodeUptimes, ok := s.modifiedLocalUptimes[vdrID] if !ok { - updatedNodeUptimes = make(map[ids.ID]*uptimes, 0) + updatedNodeUptimes = make(map[ids.ID]*uptimeMetadata, 0) s.modifiedLocalUptimes[vdrID] = updatedNodeUptimes } - updatedNodeUptimes[subnetID] = &uptimes{ + updatedNodeUptimes[subnetID] = &uptimeMetadata{ Duration: upDuration, LastUpdated: uint64(lastUpdated.Unix()), lastUpdated: lastUpdated, diff --git a/vms/platformvm/state/state_helpers.go b/vms/platformvm/state/state_helpers.go index 3dde278d18e9..848a26af780c 100644 --- a/vms/platformvm/state/state_helpers.go +++ b/vms/platformvm/state/state_helpers.go @@ -9,8 +9,7 @@ import ( "github.com/ava-labs/avalanchego/ids" ) -// helpers types to store data on merkleDB -type uptimes struct { +type uptimeMetadata struct { Duration time.Duration `serialize:"true"` LastUpdated uint64 `serialize:"true"` // Unix time in seconds From 346eba903693f89da55d5dcdec3cf9d50f608481 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 12 Dec 2023 13:25:50 -0500 Subject: [PATCH 127/132] rename stakersData to stakingTxAndReward --- vms/platformvm/state/diff.go | 4 +-- vms/platformvm/state/state.go | 36 +++++++++++++-------------- vms/platformvm/state/state_helpers.go | 2 +- 3 files changed, 21 insertions(+), 21 deletions(-) diff --git a/vms/platformvm/state/diff.go b/vms/platformvm/state/diff.go index c19f782b65dd..ec0ac5fc6ec6 100644 --- a/vms/platformvm/state/diff.go +++ b/vms/platformvm/state/diff.go @@ -520,7 +520,7 @@ func (d *diff) getMerkleChanges() (merkledb.ViewChanges, error) { return merkledb.ViewChanges{}, err } - stakersDataBytes, err := txs.GenesisCodec.Marshal(txs.Version, &stakersData{ + stakersDataBytes, err := txs.GenesisCodec.Marshal(txs.Version, &stakingTxAndReward{ TxBytes: tx.Bytes(), PotentialReward: txIDAndReward.reward, }) @@ -579,7 +579,7 @@ func (d *diff) getMerkleChanges() (merkledb.ViewChanges, error) { return merkledb.ViewChanges{}, err } - stakersDataBytes, err := txs.GenesisCodec.Marshal(txs.Version, &stakersData{ + stakersDataBytes, err := txs.GenesisCodec.Marshal(txs.Version, &stakingTxAndReward{ TxBytes: tx.Bytes(), PotentialReward: txIDAndReward.reward, }) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 0ef47949a344..4b035b0ed117 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -1044,7 +1044,7 @@ func (s *state) loadCurrentStakers() error { iter := s.merkleDB.NewIteratorWithPrefix(currentStakersSectionPrefix) defer iter.Release() for iter.Next() { - var data stakersData + var data stakingTxAndReward if _, err := txs.GenesisCodec.Unmarshal(iter.Value(), data); err != nil { return fmt.Errorf("failed to deserialize current stakers data: %w", err) } @@ -1089,7 +1089,7 @@ func (s *state) loadPendingStakers() error { iter := s.merkleDB.NewIteratorWithPrefix(prefix) defer iter.Release() for iter.Next() { - var data stakersData + var data stakingTxAndReward if _, err := txs.GenesisCodec.Unmarshal(iter.Value(), &data); err != nil { return fmt.Errorf("failed to deserialize pending stakers data: %w", err) } @@ -1324,7 +1324,7 @@ func (s *state) GetBlockIDAtHeight(height uint64) (ids.ID, error) { return database.GetID(s.blockIDDB, key) } -func (*state) writeCurrentStakers(batchOps *[]database.BatchOp, currentData map[ids.ID]*stakersData) error { +func (*state) writeCurrentStakers(batchOps *[]database.BatchOp, currentData map[ids.ID]*stakingTxAndReward) error { for stakerTxID, stakerData := range currentData { key := merkleCurrentStakersKey(stakerTxID) @@ -1377,14 +1377,14 @@ func (s *state) SetDelegateeReward(subnetID ids.ID, vdrID ids.NodeID, amount uin // DB Operations func (s *state) processCurrentStakers() ( - map[ids.ID]*stakersData, + map[ids.ID]*stakingTxAndReward, map[weightDiffKey]*ValidatorWeightDiff, map[ids.NodeID]*bls.PublicKey, map[weightDiffKey]*diffValidator, error, ) { var ( - outputStakers = make(map[ids.ID]*stakersData) + outputStakers = make(map[ids.ID]*stakingTxAndReward) outputWeights = make(map[weightDiffKey]*ValidatorWeightDiff) outputBlsKey = make(map[ids.NodeID]*bls.PublicKey) outputValSet = make(map[weightDiffKey]*diffValidator) @@ -1418,7 +1418,7 @@ func (s *state) processCurrentStakers() ( return nil, nil, nil, nil, fmt.Errorf("failed loading current validator tx, %w", err) } - outputStakers[txID] = &stakersData{ + outputStakers[txID] = &stakingTxAndReward{ TxBytes: tx.Bytes(), PotentialReward: potentialReward, } @@ -1438,7 +1438,7 @@ func (s *state) processCurrentStakers() ( blkKey = validatorDiff.validator.PublicKey ) - outputStakers[txID] = &stakersData{ + outputStakers[txID] = &stakingTxAndReward{ TxBytes: nil, } outputWeights[weightKey].Amount = weight @@ -1460,7 +1460,7 @@ func (s *state) processCurrentStakers() ( return nil, nil, nil, nil, fmt.Errorf("failed loading current delegator tx, %w", err) } - outputStakers[staker.TxID] = &stakersData{ + outputStakers[staker.TxID] = &stakingTxAndReward{ TxBytes: tx.Bytes(), PotentialReward: staker.PotentialReward, } @@ -1470,7 +1470,7 @@ func (s *state) processCurrentStakers() ( } for _, staker := range validatorDiff.deletedDelegators { - outputStakers[staker.TxID] = &stakersData{ + outputStakers[staker.TxID] = &stakingTxAndReward{ TxBytes: nil, } if err := outputWeights[weightKey].Add(true, staker.Weight); err != nil { @@ -1482,8 +1482,8 @@ func (s *state) processCurrentStakers() ( return outputStakers, outputWeights, outputBlsKey, outputValSet, nil } -func (s *state) processPendingStakers() (map[ids.ID]*stakersData, error) { - output := make(map[ids.ID]*stakersData) +func (s *state) processPendingStakers() (map[ids.ID]*stakingTxAndReward, error) { + output := make(map[ids.ID]*stakingTxAndReward) for subnetID, subnetValidatorDiffs := range s.pendingStakers.validatorDiffs { delete(s.pendingStakers.validatorDiffs, subnetID) for _, validatorDiff := range subnetValidatorDiffs { @@ -1496,13 +1496,13 @@ func (s *state) processPendingStakers() (map[ids.ID]*stakersData, error) { if err != nil { return nil, fmt.Errorf("failed loading pending validator tx, %w", err) } - output[txID] = &stakersData{ + output[txID] = &stakingTxAndReward{ TxBytes: tx.Bytes(), PotentialReward: 0, } case deleted: txID := validatorDiff.validator.TxID - output[txID] = &stakersData{ + output[txID] = &stakingTxAndReward{ TxBytes: nil, } } @@ -1515,14 +1515,14 @@ func (s *state) processPendingStakers() (map[ids.ID]*stakersData, error) { if err != nil { return nil, fmt.Errorf("failed loading pending delegator tx, %w", err) } - output[staker.TxID] = &stakersData{ + output[staker.TxID] = &stakingTxAndReward{ TxBytes: tx.Bytes(), PotentialReward: 0, } } for _, staker := range validatorDiff.deletedDelegators { - output[staker.TxID] = &stakersData{ + output[staker.TxID] = &stakingTxAndReward{ TxBytes: nil, } } @@ -1535,7 +1535,7 @@ func (s *state) NewView() (merkledb.TrieView, error) { return s.merkleDB.NewView(context.TODO(), merkledb.ViewChanges{}) } -func (s *state) getMerkleChanges(currentData, pendingData map[ids.ID]*stakersData) ([]database.BatchOp, error) { +func (s *state) getMerkleChanges(currentData, pendingData map[ids.ID]*stakingTxAndReward) ([]database.BatchOp, error) { batchOps := make([]database.BatchOp, 0) err := utils.Err( s.writeMetadata(&batchOps), @@ -1552,7 +1552,7 @@ func (s *state) getMerkleChanges(currentData, pendingData map[ids.ID]*stakersDat return batchOps, err } -func (s *state) writeMerkleState(currentData, pendingData map[ids.ID]*stakersData) error { +func (s *state) writeMerkleState(currentData, pendingData map[ids.ID]*stakingTxAndReward) error { changes, err := s.getMerkleChanges(currentData, pendingData) if err != nil { return err @@ -1572,7 +1572,7 @@ func (s *state) writeMerkleState(currentData, pendingData map[ids.ID]*stakersDat return nil } -func (*state) writePendingStakers(batchOps *[]database.BatchOp, pendingData map[ids.ID]*stakersData) error { +func (*state) writePendingStakers(batchOps *[]database.BatchOp, pendingData map[ids.ID]*stakingTxAndReward) error { for stakerTxID, data := range pendingData { key := merklePendingStakersKey(stakerTxID) diff --git a/vms/platformvm/state/state_helpers.go b/vms/platformvm/state/state_helpers.go index 848a26af780c..ed809863f585 100644 --- a/vms/platformvm/state/state_helpers.go +++ b/vms/platformvm/state/state_helpers.go @@ -17,7 +17,7 @@ type uptimeMetadata struct { lastUpdated time.Time } -type stakersData struct { +type stakingTxAndReward struct { TxBytes []byte `serialize:"true"` // nil means the staker is removed PotentialReward uint64 `serialize:"true"` } From 2f0e53084472a95782bb3493fcc72064045b5d1f Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 12 Dec 2023 13:31:04 -0500 Subject: [PATCH 128/132] add missing reference --- vms/platformvm/state/state.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 4b035b0ed117..4e10937ea0fd 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -1045,7 +1045,7 @@ func (s *state) loadCurrentStakers() error { defer iter.Release() for iter.Next() { var data stakingTxAndReward - if _, err := txs.GenesisCodec.Unmarshal(iter.Value(), data); err != nil { + if _, err := txs.GenesisCodec.Unmarshal(iter.Value(), &data); err != nil { return fmt.Errorf("failed to deserialize current stakers data: %w", err) } From 57a27197e47c4f1cc7484e600cbc90ff23732e64 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 12 Dec 2023 13:58:41 -0500 Subject: [PATCH 129/132] naming nits; fix test --- vms/platformvm/block/builder/builder_test.go | 12 ++++++++++-- vms/platformvm/state/state_helpers_test.go | 4 ++-- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/vms/platformvm/block/builder/builder_test.go b/vms/platformvm/block/builder/builder_test.go index fcfba8998e2c..0ad5e77a5a6f 100644 --- a/vms/platformvm/block/builder/builder_test.go +++ b/vms/platformvm/block/builder/builder_test.go @@ -22,6 +22,7 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/block" "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/state" + "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" @@ -210,16 +211,23 @@ func TestBuildBlockAdvanceTime(t *testing.T) { }() var ( - now = env.backend.Clk.Time() - nextTime = now.Add(2 * txexecutor.SyncBound) + now = env.backend.Clk.Time() + nextTime = now.Add(2 * txexecutor.SyncBound) + addValidatorTx = &txs.Tx{} ) // Add a staker to [env.state] env.state.PutCurrentValidator(&state.Staker{ + TxID: addValidatorTx.ID(), NextTime: nextTime, Priority: txs.PrimaryNetworkValidatorCurrentPriority, }) + // We'll fail in [shutdownEnvironment] if we fail to look up the + // tx that added the staker to [env.state], so add a dummy + // tx to write to disk. + env.state.AddTx(addValidatorTx, status.Committed) + // Advance wall clock to [nextTime] env.backend.Clk.Set(nextTime) diff --git a/vms/platformvm/state/state_helpers_test.go b/vms/platformvm/state/state_helpers_test.go index 1905c8f782f9..47bceaf4d853 100644 --- a/vms/platformvm/state/state_helpers_test.go +++ b/vms/platformvm/state/state_helpers_test.go @@ -12,7 +12,7 @@ import ( "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" ) -func TestSuppliesKeyTest(t *testing.T) { +func TestMerkleSuppliesKey(t *testing.T) { require := require.New(t) subnetID := ids.GenerateTestID() @@ -23,7 +23,7 @@ func TestSuppliesKeyTest(t *testing.T) { require.Equal(subnetID, retrievedSubnetID) } -func TestPermissionedSubnetKey(t *testing.T) { +func TestMerklePermissionedSubnetKey(t *testing.T) { require := require.New(t) subnetID := ids.GenerateTestID() prefix := permissionedSubnetSectionPrefix From cc95bb6a0332b8adf6c4871d4d1d68bc4e59f0b2 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Tue, 19 Dec 2023 21:12:03 +0100 Subject: [PATCH 130/132] reduced diff from dev --- vms/platformvm/block/builder/helpers_test.go | 6 +-- vms/platformvm/block/executor/helpers_test.go | 6 +-- vms/platformvm/state/state.go | 44 +++++++++---------- vms/platformvm/state/state_test.go | 5 ++- vms/platformvm/txs/executor/helpers_test.go | 17 +++---- .../validators/manager_benchmark_test.go | 4 +- vms/platformvm/vm.go | 2 +- vms/platformvm/vm_regression_test.go | 4 +- 8 files changed, 47 insertions(+), 41 deletions(-) diff --git a/vms/platformvm/block/builder/helpers_test.go b/vms/platformvm/block/builder/helpers_test.go index ea5ce6e7a3e0..3a63ab1325bf 100644 --- a/vms/platformvm/block/builder/helpers_test.go +++ b/vms/platformvm/block/builder/helpers_test.go @@ -135,7 +135,7 @@ func newEnvironment(t *testing.T) *environment { res.fx = defaultFx(t, res.clk, res.ctx.Log, res.isBootstrapped.Get()) rewardsCalc := reward.NewCalculator(res.config.RewardConfig) - res.state = defaultState(t, res.config.Validators, res.ctx, res.baseDB, rewardsCalc) + res.state = defaultState(t, res.config, res.ctx, res.baseDB, rewardsCalc) res.atomicUTXOs = avax.NewAtomicUTXOManager(res.ctx.SharedMemory, txs.Codec) res.uptimes = uptime.NewManager(res.state, res.clk) @@ -237,7 +237,7 @@ func addSubnet(t *testing.T, env *environment) { func defaultState( t *testing.T, - validators validators.Manager, + cfg *config.Config, ctx *snow.Context, db database.Database, rewards reward.Calculator, @@ -248,7 +248,7 @@ func defaultState( state, err := state.New( db, genesisBytes, - validators, + cfg, ctx, metrics.Noop, rewards, diff --git a/vms/platformvm/block/executor/helpers_test.go b/vms/platformvm/block/executor/helpers_test.go index 91f4c31c1a93..a40678632428 100644 --- a/vms/platformvm/block/executor/helpers_test.go +++ b/vms/platformvm/block/executor/helpers_test.go @@ -145,7 +145,7 @@ func newEnvironment(t *testing.T, ctrl *gomock.Controller) *environment { res.atomicUTXOs = avax.NewAtomicUTXOManager(res.ctx.SharedMemory, txs.Codec) if ctrl == nil { - res.state = defaultState(res.config.Validators, res.ctx, res.baseDB, rewardsCalc) + res.state = defaultState(res.config, res.ctx, res.baseDB, rewardsCalc) res.uptimes = uptime.NewManager(res.state, res.clk) res.utxosHandler = utxo.NewHandler(res.ctx, res.clk, res.fx) res.txBuilder = p_tx_builder.New( @@ -263,7 +263,7 @@ func addSubnet(env *environment) { } func defaultState( - validators validators.Manager, + cfg *config.Config, ctx *snow.Context, db database.Database, rewards reward.Calculator, @@ -272,7 +272,7 @@ func defaultState( state, err := state.New( db, genesisBytes, - validators, + cfg, ctx, metrics.Noop, rewards, diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 4e10937ea0fd..0dcadcda77d6 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -35,6 +35,7 @@ import ( "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/block" + "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/fx" "github.com/ava-labs/avalanchego/vms/platformvm/genesis" "github.com/ava-labs/avalanchego/vms/platformvm/metrics" @@ -227,10 +228,10 @@ type stateBlk struct { // - BLS Key Diffs // - Reward UTXOs type state struct { - validators validators.Manager - ctx *snow.Context - metrics metrics.Metrics - rewards reward.Calculator + ctx *snow.Context + cfg *config.Config + metrics metrics.Metrics + rewards reward.Calculator baseDB *versiondb.Database singletonDB database.Database @@ -326,7 +327,7 @@ type txAndStatus struct { func New( db database.Database, genesisBytes []byte, - validators validators.Manager, + cfg *config.Config, ctx *snow.Context, metrics metrics.Metrics, rewards reward.Calculator, @@ -334,7 +335,7 @@ func New( s, err := newState( db, metrics, - validators, + cfg, ctx, rewards, ) @@ -354,7 +355,7 @@ func New( func newState( db database.Database, metrics metrics.Metrics, - validators validators.Manager, + cfg *config.Config, ctx *snow.Context, rewards reward.Calculator, ) (*state, error) { @@ -389,11 +390,10 @@ func newState( } return &state{ - validators: validators, - ctx: ctx, - metrics: metrics, - rewards: rewards, - + ctx: ctx, + cfg: cfg, + metrics: metrics, + rewards: rewards, baseDB: baseDB, singletonDB: singletonDB, baseMerkleDB: baseMerkleDB, @@ -1127,21 +1127,21 @@ func (s *state) loadPendingStakers() error { // been called. func (s *state) initValidatorSets() error { for subnetID, validators := range s.currentStakers.validators { - if s.validators.Count(subnetID) != 0 { + if s.cfg.Validators.Count(subnetID) != 0 { // Enforce the invariant that the validator set is empty here. return fmt.Errorf("%w: %s", errValidatorSetAlreadyPopulated, subnetID) } for nodeID, validator := range validators { validatorStaker := validator.validator - if err := s.validators.AddStaker(subnetID, nodeID, validatorStaker.PublicKey, validatorStaker.TxID, validatorStaker.Weight); err != nil { + if err := s.cfg.Validators.AddStaker(subnetID, nodeID, validatorStaker.PublicKey, validatorStaker.TxID, validatorStaker.Weight); err != nil { return err } delegatorIterator := NewTreeIterator(validator.delegators) for delegatorIterator.Next() { delegatorStaker := delegatorIterator.Value() - if err := s.validators.AddWeight(subnetID, nodeID, delegatorStaker.Weight); err != nil { + if err := s.cfg.Validators.AddWeight(subnetID, nodeID, delegatorStaker.Weight); err != nil { delegatorIterator.Release() return err } @@ -1150,8 +1150,8 @@ func (s *state) initValidatorSets() error { } } - s.metrics.SetLocalStake(s.validators.GetWeight(constants.PrimaryNetworkID, s.ctx.NodeID)) - totalWeight, err := s.validators.TotalWeight(constants.PrimaryNetworkID) + s.metrics.SetLocalStake(s.cfg.Validators.GetWeight(constants.PrimaryNetworkID, s.ctx.NodeID)) + totalWeight, err := s.cfg.Validators.TotalWeight(constants.PrimaryNetworkID) if err != nil { return fmt.Errorf("failed to get total weight of primary network validators: %w", err) } @@ -1886,11 +1886,11 @@ func (s *state) updateValidatorSet( } if weightDiff.Decrease { - err = s.validators.RemoveWeight(subnetID, nodeID, weightDiff.Amount) + err = s.cfg.Validators.RemoveWeight(subnetID, nodeID, weightDiff.Amount) } else { if validatorDiff.validatorStatus == added { staker := validatorDiff.validator - err = s.validators.AddStaker( + err = s.cfg.Validators.AddStaker( subnetID, nodeID, staker.PublicKey, @@ -1898,7 +1898,7 @@ func (s *state) updateValidatorSet( weightDiff.Amount, ) } else { - err = s.validators.AddWeight(subnetID, nodeID, weightDiff.Amount) + err = s.cfg.Validators.AddWeight(subnetID, nodeID, weightDiff.Amount) } } if err != nil { @@ -1906,8 +1906,8 @@ func (s *state) updateValidatorSet( } } - s.metrics.SetLocalStake(s.validators.GetWeight(constants.PrimaryNetworkID, s.ctx.NodeID)) - totalWeight, err := s.validators.TotalWeight(constants.PrimaryNetworkID) + s.metrics.SetLocalStake(s.cfg.Validators.GetWeight(constants.PrimaryNetworkID, s.ctx.NodeID)) + totalWeight, err := s.cfg.Validators.TotalWeight(constants.PrimaryNetworkID) if err != nil { return fmt.Errorf("failed to get total weight: %w", err) } diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index d8e4040f0849..49ef23f42b22 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -27,6 +27,7 @@ import ( "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/block" + "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/fx" "github.com/ava-labs/avalanchego/vms/platformvm/genesis" "github.com/ava-labs/avalanchego/vms/platformvm/metrics" @@ -164,7 +165,9 @@ func newStateFromDB(require *require.Assertions, db database.Database) State { state, err := newState( db, metrics.Noop, - validators.NewManager(), + &config.Config{ + Validators: validators.NewManager(), + }, &snow.Context{ Log: logging.NoLog{}, }, diff --git a/vms/platformvm/txs/executor/helpers_test.go b/vms/platformvm/txs/executor/helpers_test.go index b37ee777552a..17ceb2b5c452 100644 --- a/vms/platformvm/txs/executor/helpers_test.go +++ b/vms/platformvm/txs/executor/helpers_test.go @@ -130,7 +130,7 @@ func newEnvironment(t *testing.T, postBanff, postCortina bool) *environment { fx := defaultFx(clk, ctx.Log, isBootstrapped.Get()) rewards := reward.NewCalculator(config.RewardConfig) - baseState := defaultState(config.Validators, ctx, baseDB, rewards) + baseState := defaultState(config, ctx, baseDB, rewards) atomicUTXOs := avax.NewAtomicUTXOManager(ctx.SharedMemory, txs.Codec) uptimes := uptime.NewManager(baseState, clk) @@ -138,7 +138,7 @@ func newEnvironment(t *testing.T, postBanff, postCortina bool) *environment { txBuilder := builder.New( ctx, - &config, + config, clk, fx, baseState, @@ -147,7 +147,7 @@ func newEnvironment(t *testing.T, postBanff, postCortina bool) *environment { ) backend := Backend{ - Config: &config, + Config: config, Ctx: ctx, Clk: clk, Bootstrapped: &isBootstrapped, @@ -159,7 +159,7 @@ func newEnvironment(t *testing.T, postBanff, postCortina bool) *environment { env := &environment{ isBootstrapped: &isBootstrapped, - config: &config, + config: config, clk: clk, baseDB: baseDB, ctx: ctx, @@ -213,10 +213,11 @@ func addSubnet( stateDiff.AddTx(testSubnet1, status.Committed) require.NoError(stateDiff.Apply(env.state)) + require.NoError(env.state.Commit()) } func defaultState( - validators validators.Manager, + cfg *config.Config, ctx *snow.Context, db database.Database, rewards reward.Calculator, @@ -225,7 +226,7 @@ func defaultState( state, err := state.New( db, genesisBytes, - validators, + cfg, ctx, metrics.Noop, rewards, @@ -275,7 +276,7 @@ func defaultCtx(db database.Database) (*snow.Context, *mutableSharedMemory) { return ctx, msm } -func defaultConfig(postBanff, postCortina bool) config.Config { +func defaultConfig(postBanff, postCortina bool) *config.Config { banffTime := mockable.MaxTime if postBanff { banffTime = defaultValidateEndTime.Add(-2 * time.Second) @@ -285,7 +286,7 @@ func defaultConfig(postBanff, postCortina bool) config.Config { cortinaTime = defaultValidateStartTime.Add(-2 * time.Second) } - return config.Config{ + return &config.Config{ Chains: chains.TestManager, UptimeLockedCalculator: uptime.NewLockedCalculator(), Validators: validators.NewManager(), diff --git a/vms/platformvm/validators/manager_benchmark_test.go b/vms/platformvm/validators/manager_benchmark_test.go index 1b549cca00e8..3448105bddbd 100644 --- a/vms/platformvm/validators/manager_benchmark_test.go +++ b/vms/platformvm/validators/manager_benchmark_test.go @@ -108,7 +108,9 @@ func BenchmarkGetValidatorSet(b *testing.B) { s, err := state.New( db, genesisBytes, - vdrs, + &config.Config{ + Validators: vdrs, + }, &snow.Context{ NetworkID: constants.UnitTestID, NodeID: ids.GenerateTestNodeID(), diff --git a/vms/platformvm/vm.go b/vms/platformvm/vm.go index b0f56588ff7e..65fc00f95f08 100644 --- a/vms/platformvm/vm.go +++ b/vms/platformvm/vm.go @@ -135,7 +135,7 @@ func (vm *VM) Initialize( vm.state, err = state.New( vm.db, genesisBytes, - vm.Config.Validators, + &vm.Config, vm.ctx, vm.metrics, rewards, diff --git a/vms/platformvm/vm_regression_test.go b/vms/platformvm/vm_regression_test.go index e2f989bcd5e2..20c2a7518784 100644 --- a/vms/platformvm/vm_regression_test.go +++ b/vms/platformvm/vm_regression_test.go @@ -644,7 +644,7 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { newState, err := state.New( vm.db, nil, - vm.Config.Validators, + &vm.Config, vm.ctx, metrics.Noop, reward.NewCalculator(vm.Config.RewardConfig), @@ -948,7 +948,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { newState, err := state.New( vm.db, nil, - vm.Config.Validators, + &vm.Config, vm.ctx, metrics.Noop, reward.NewCalculator(vm.Config.RewardConfig), From 0e933d96d064e7402ebf12d464e3409702645fa3 Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Wed, 20 Dec 2023 09:55:57 +0100 Subject: [PATCH 131/132] wip: fixing merge --- vms/platformvm/block/builder/builder_test.go | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/vms/platformvm/block/builder/builder_test.go b/vms/platformvm/block/builder/builder_test.go index 1066ba2b7922..5623e87a00de 100644 --- a/vms/platformvm/block/builder/builder_test.go +++ b/vms/platformvm/block/builder/builder_test.go @@ -263,10 +263,17 @@ func TestBuildBlockForceAdvanceTime(t *testing.T) { ) // Add a staker to [env.state] - env.state.PutCurrentValidator(&state.Staker{ + dummyTx := txs.Tx{ + Unsigned: &txs.AddValidatorTx{}, + } + dummyTx.SetBytes([]byte{0x01}, []byte{0x02}) + staker := &state.Staker{ + TxID: dummyTx.ID(), NextTime: nextTime, Priority: txs.PrimaryNetworkValidatorCurrentPriority, - }) + } + env.state.PutCurrentValidator(staker) + env.state.AddTx(&dummyTx, status.Committed) // Advance wall clock to [nextTime] + [txexecutor.SyncBound] env.backend.Clk.Set(nextTime.Add(txexecutor.SyncBound)) From 04690839fcd9c034a59af7d56ea8857d86654b8e Mon Sep 17 00:00:00 2001 From: Alberto Benegiamo Date: Thu, 4 Jan 2024 14:11:32 +0100 Subject: [PATCH 132/132] added regression test --- vms/platformvm/state/state_regression_test.go | 115 ++++++++++++++++++ 1 file changed, 115 insertions(+) create mode 100644 vms/platformvm/state/state_regression_test.go diff --git a/vms/platformvm/state/state_regression_test.go b/vms/platformvm/state/state_regression_test.go new file mode 100644 index 000000000000..5aa6acde685b --- /dev/null +++ b/vms/platformvm/state/state_regression_test.go @@ -0,0 +1,115 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package state + +import ( + "testing" + + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/chains" + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/database/prefixdb" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/snowtest" + "github.com/ava-labs/avalanchego/snow/uptime" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/formatting" + "github.com/ava-labs/avalanchego/utils/formatting/address" + "github.com/ava-labs/avalanchego/utils/json" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/vms/platformvm/api" + "github.com/ava-labs/avalanchego/vms/platformvm/config" + "github.com/ava-labs/avalanchego/vms/platformvm/metrics" + "github.com/ava-labs/avalanchego/vms/platformvm/reward" +) + +func TestCantStateBeRebuilt(t *testing.T) { + require := require.New(t) + + // elements commont to the two VMs + var ( + _, genesisBytes = simpleGenesisWithASingleUTXO(t, snowtest.AVAXAssetID) + stateBaseDB = memdb.New() + + ctx = snowtest.Context(t, snowtest.PChainID) + + cfg = &config.Config{ + Chains: chains.TestManager, + Validators: validators.NewManager(), + UptimeLockedCalculator: uptime.NewLockedCalculator(), + } + + rewards = reward.NewCalculator(cfg.RewardConfig) + registerer = prometheus.NewRegistry() + + dummyMetrics metrics.Metrics + err error + ) + + dummyMetrics, err = metrics.New("", registerer) + require.NoError(err) + + // Instantiate state the first time + firstDB := prefixdb.New([]byte{}, stateBaseDB) + firstState, err := New( + firstDB, + genesisBytes, + cfg, + ctx, + dummyMetrics, + rewards, + ) + require.NoError(err) + require.NotNil(firstState) + + // Instantiate VM a second time + secondDB := prefixdb.New([]byte{}, stateBaseDB) + secondState, err := New( + secondDB, + genesisBytes, + cfg, + ctx, + dummyMetrics, + rewards, + ) + require.NoError(err) + require.NotNil(secondState) +} + +func simpleGenesisWithASingleUTXO(t *testing.T, avaxAssetID ids.ID) (*api.BuildGenesisArgs, []byte) { + require := require.New(t) + + id := ids.GenerateTestShortID() + addr, err := address.FormatBech32(constants.UnitTestHRP, id.Bytes()) + require.NoError(err) + genesisUTXOs := []api.UTXO{ + { + Amount: json.Uint64(2000 * units.Avax), + Address: addr, + }, + } + + buildGenesisArgs := api.BuildGenesisArgs{ + Encoding: formatting.Hex, + NetworkID: json.Uint32(constants.UnitTestID), + AvaxAssetID: avaxAssetID, + UTXOs: genesisUTXOs, + Validators: make([]api.GenesisPermissionlessValidator, 0), + Chains: nil, + Time: json.Uint64(12345678), + InitialSupply: json.Uint64(360 * units.MegaAvax), + } + + buildGenesisResponse := api.BuildGenesisReply{} + platformvmSS := api.StaticService{} + require.NoError(platformvmSS.BuildGenesis(nil, &buildGenesisArgs, &buildGenesisResponse)) + + genesisBytes, err := formatting.Decode(buildGenesisResponse.Encoding, buildGenesisResponse.Bytes) + require.NoError(err) + + return &buildGenesisArgs, genesisBytes +}