diff --git a/api/rpc/client/client.go b/api/rpc/client/client.go index ff206d723e..56b4a54d19 100644 --- a/api/rpc/client/client.go +++ b/api/rpc/client/client.go @@ -9,6 +9,7 @@ import ( "github.com/celestiaorg/celestia-node/api/rpc/perms" "github.com/celestiaorg/celestia-node/nodebuilder/blob" + "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" "github.com/celestiaorg/celestia-node/nodebuilder/da" "github.com/celestiaorg/celestia-node/nodebuilder/das" "github.com/celestiaorg/celestia-node/nodebuilder/fraud" @@ -26,15 +27,16 @@ var ( ) type Client struct { - Fraud fraud.API - Header header.API - State state.API - Share share.API - DAS das.API - P2P p2p.API - Node node.API - Blob blob.API - DA da.API + Fraud fraud.API + Header header.API + State state.API + Share share.API + DAS das.API + P2P p2p.API + Node node.API + Blob blob.API + DA da.API + Blobstream blobstream.API closer multiClientCloser } @@ -85,14 +87,15 @@ func newClient(ctx context.Context, addr string, authHeader http.Header) (*Clien func moduleMap(client *Client) map[string]interface{} { // TODO: this duplication of strings many times across the codebase can be avoided with issue #1176 return map[string]interface{}{ - "share": &client.Share.Internal, - "state": &client.State.Internal, - "header": &client.Header.Internal, - "fraud": &client.Fraud.Internal, - "das": &client.DAS.Internal, - "p2p": &client.P2P.Internal, - "node": &client.Node.Internal, - "blob": &client.Blob.Internal, - "da": &client.DA.Internal, + "share": &client.Share.Internal, + "state": &client.State.Internal, + "header": &client.Header.Internal, + "fraud": &client.Fraud.Internal, + "das": &client.DAS.Internal, + "p2p": &client.P2P.Internal, + "node": &client.Node.Internal, + "blob": &client.Blob.Internal, + "da": &client.DA.Internal, + "blobstream": &client.Blobstream.Internal, } } diff --git a/api/rpc_test.go b/api/rpc_test.go index 29191b93a2..db65d2a5f8 100644 --- a/api/rpc_test.go +++ b/api/rpc_test.go @@ -22,6 +22,8 @@ import ( "github.com/celestiaorg/celestia-node/nodebuilder" "github.com/celestiaorg/celestia-node/nodebuilder/blob" blobMock "github.com/celestiaorg/celestia-node/nodebuilder/blob/mocks" + "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" + blobstreamMock "github.com/celestiaorg/celestia-node/nodebuilder/blobstream/mocks" "github.com/celestiaorg/celestia-node/nodebuilder/da" daMock "github.com/celestiaorg/celestia-node/nodebuilder/da/mocks" "github.com/celestiaorg/celestia-node/nodebuilder/das" @@ -90,15 +92,16 @@ func TestRPCCallsUnderlyingNode(t *testing.T) { // api contains all modules that are made available as the node's // public API surface type api struct { - Fraud fraud.Module - Header header.Module - State statemod.Module - Share share.Module - DAS das.Module - Node node.Module - P2P p2p.Module - Blob blob.Module - DA da.Module + Fraud fraud.Module + Header header.Module + State statemod.Module + Share share.Module + DAS das.Module + Node node.Module + P2P p2p.Module + Blob blob.Module + DA da.Module + Blobstream blobstream.Module } func TestModulesImplementFullAPI(t *testing.T) { @@ -312,6 +315,7 @@ func setupNodeWithAuthedRPC(t *testing.T, nodeMock.NewMockModule(ctrl), blobMock.NewMockModule(ctrl), daMock.NewMockModule(ctrl), + blobstreamMock.NewMockModule(ctrl), } // given the behavior of fx.Invoke, this invoke will be called last as it is added at the root @@ -342,13 +346,14 @@ func setupNodeWithAuthedRPC(t *testing.T, } type mockAPI struct { - State *stateMock.MockModule - Share *shareMock.MockModule - Fraud *fraudMock.MockModule - Header *headerMock.MockModule - Das *dasMock.MockModule - P2P *p2pMock.MockModule - Node *nodeMock.MockModule - Blob *blobMock.MockModule - DA *daMock.MockModule + State *stateMock.MockModule + Share *shareMock.MockModule + Fraud *fraudMock.MockModule + Header *headerMock.MockModule + Das *dasMock.MockModule + P2P *p2pMock.MockModule + Node *nodeMock.MockModule + Blob *blobMock.MockModule + DA *daMock.MockModule + Blobstream *blobstreamMock.MockModule } diff --git a/blob/blob.go b/blob/blob.go index 89177b713e..610b0cba96 100644 --- a/blob/blob.go +++ b/blob/blob.go @@ -17,20 +17,6 @@ import ( var errEmptyShares = errors.New("empty shares") -// Commitment is a Merkle Root of the subtree built from shares of the Blob. -// It is computed by splitting the blob into shares and building the Merkle subtree to be included -// after Submit. -type Commitment []byte - -func (com Commitment) String() string { - return string(com) -} - -// Equal ensures that commitments are the same -func (com Commitment) Equal(c Commitment) bool { - return bytes.Equal(com, c) -} - // The Proof is a set of nmt proofs that can be verified only through // the included method (due to limitation of the nmt https://github.com/celestiaorg/nmt/issues/218). // Proof proves the WHOLE namespaced data to the row roots. diff --git a/blob/commitment_proof.go b/blob/commitment_proof.go new file mode 100644 index 0000000000..5cf419a659 --- /dev/null +++ b/blob/commitment_proof.go @@ -0,0 +1,134 @@ +package blob + +import ( + "bytes" + "fmt" + + coretypes "github.com/tendermint/tendermint/types" + + "github.com/celestiaorg/celestia-app/pkg/appconsts" + "github.com/celestiaorg/celestia-app/pkg/shares" + "github.com/celestiaorg/nmt" + "github.com/celestiaorg/nmt/namespace" + + "github.com/celestiaorg/celestia-node/share" +) + +// Commitment is a Merkle Root of the subtree built from shares of the Blob. +// It is computed by splitting the blob into shares and building the Merkle subtree to be included +// after Submit. +type Commitment []byte + +// CommitmentProof is an inclusion proof of a commitment to the data root. +type CommitmentProof struct { + // SubtreeRoots are the subtree roots of the blob's data that are + // used to create the commitment. + SubtreeRoots [][]byte `json:"subtree_roots"` + // SubtreeRootProofs are the NMT proofs for the subtree roots + // to the row roots. + SubtreeRootProofs []*nmt.Proof `json:"subtree_root_proofs"` + // NamespaceID is the namespace id of the commitment being proven. This + // namespace id is used when verifying the proof. If the namespace id doesn't + // match the namespace of the shares, the proof will fail verification. + NamespaceID namespace.ID `json:"namespace_id"` + // RowProof is the proof of the rows containing the blob's data to the + // data root. + RowProof coretypes.RowProof `json:"row_proof"` + NamespaceVersion uint8 `json:"namespace_version"` +} + +func (com Commitment) String() string { + return string(com) +} + +// Equal ensures that commitments are the same +func (com Commitment) Equal(c Commitment) bool { + return bytes.Equal(com, c) +} + +// Validate performs basic validation to the commitment proof. +// Note: it doesn't verify if the proof is valid or not. +// Check Verify() for that. +func (commitmentProof *CommitmentProof) Validate() error { + if len(commitmentProof.SubtreeRoots) < len(commitmentProof.SubtreeRootProofs) { + return fmt.Errorf( + "the number of subtree roots %d should be bigger than the number of subtree root proofs %d", + len(commitmentProof.SubtreeRoots), + len(commitmentProof.SubtreeRootProofs), + ) + } + if len(commitmentProof.SubtreeRootProofs) != len(commitmentProof.RowProof.Proofs) { + return fmt.Errorf( + "the number of subtree root proofs %d should be equal to the number of row root proofs %d", + len(commitmentProof.SubtreeRootProofs), + len(commitmentProof.RowProof.Proofs), + ) + } + if int(commitmentProof.RowProof.EndRow-commitmentProof.RowProof.StartRow+1) != len(commitmentProof.RowProof.RowRoots) { + return fmt.Errorf( + "the number of rows %d must equal the number of row roots %d", + int(commitmentProof.RowProof.EndRow-commitmentProof.RowProof.StartRow+1), + len(commitmentProof.RowProof.RowRoots), + ) + } + if len(commitmentProof.RowProof.Proofs) != len(commitmentProof.RowProof.RowRoots) { + return fmt.Errorf( + "the number of proofs %d must equal the number of row roots %d", + len(commitmentProof.RowProof.Proofs), + len(commitmentProof.RowProof.RowRoots), + ) + } + return nil +} + +// Verify verifies that a commitment proof is valid, i.e., the subtree roots commit +// to some data that was posted to a square. +// Expects the commitment proof to be properly formulated and validated +// using the Validate() function. +func (commitmentProof *CommitmentProof) Verify(root []byte, subtreeRootThreshold int) (bool, error) { + nmtHasher := nmt.NewNmtHasher(appconsts.NewBaseHashFunc(), share.NamespaceSize, true) + + // computes the total number of shares proven. + numberOfShares := 0 + for _, proof := range commitmentProof.SubtreeRootProofs { + numberOfShares += proof.End() - proof.Start() + } + + // use the computed total number of shares to calculate the subtree roots + // width. + // the subtree roots width is defined in ADR-013: + // + //https://github.com/celestiaorg/celestia-app/blob/main/docs/architecture/adr-013-non-interactive-default-rules-for-zero-padding.md + subtreeRootsWidth := shares.SubTreeWidth(numberOfShares, subtreeRootThreshold) + + // verify the proof of the subtree roots + subtreeRootsCursor := 0 + for i, subtreeRootProof := range commitmentProof.SubtreeRootProofs { + // calculate the share range that each subtree root commits to. + ranges, err := nmt.ToLeafRanges(subtreeRootProof.Start(), subtreeRootProof.End(), subtreeRootsWidth) + if err != nil { + return false, err + } + valid, err := subtreeRootProof.VerifySubtreeRootInclusion( + nmtHasher, + commitmentProof.SubtreeRoots[subtreeRootsCursor:subtreeRootsCursor+len(ranges)], + subtreeRootsWidth, + commitmentProof.RowProof.RowRoots[i], + ) + if err != nil { + return false, err + } + if !valid { + return false, + fmt.Errorf( + "subtree root proof for range [%d, %d) is invalid", + subtreeRootProof.Start(), + subtreeRootProof.End(), + ) + } + subtreeRootsCursor += len(ranges) + } + + // verify row roots to data root proof + return commitmentProof.RowProof.VerifyProof(root), nil +} diff --git a/blob/service.go b/blob/service.go index 3a35c140f2..70714d53b6 100644 --- a/blob/service.go +++ b/blob/service.go @@ -1,7 +1,9 @@ package blob import ( + bytes2 "bytes" "context" + "encoding/hex" "errors" "fmt" "slices" @@ -14,7 +16,12 @@ import ( "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" + "github.com/celestiaorg/celestia-app/pkg/appconsts" + appns "github.com/celestiaorg/celestia-app/pkg/namespace" + pkgproof "github.com/celestiaorg/celestia-app/pkg/proof" "github.com/celestiaorg/celestia-app/pkg/shares" + "github.com/celestiaorg/nmt" + "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/libs/utils" @@ -387,3 +394,194 @@ func (s *Service) getBlobs( _, _, err = s.retrieve(ctx, header.Height(), namespace, sharesParser) return blobs, err } + +func (s *Service) GetCommitmentProof( + ctx context.Context, + height uint64, + namespace share.Namespace, + shareCommitment []byte, +) (*CommitmentProof, error) { + log.Debugw("proving share commitment", "height", height, "commitment", shareCommitment, "namespace", namespace) + if height == 0 { + return nil, fmt.Errorf("height cannot be equal to 0") + } + + // get the blob to compute the subtree roots + log.Debugw( + "getting the blob", + "height", + height, + "commitment", + shareCommitment, + "namespace", + namespace, + ) + blb, err := s.Get(ctx, height, namespace, shareCommitment) + if err != nil { + return nil, err + } + + log.Debugw( + "converting the blob to shares", + "height", + height, + "commitment", + shareCommitment, + "namespace", + namespace, + ) + blobShares, err := BlobsToShares(blb) + if err != nil { + return nil, err + } + if len(blobShares) == 0 { + return nil, fmt.Errorf("the blob shares for commitment %s are empty", hex.EncodeToString(shareCommitment)) + } + + // get the extended header + log.Debugw( + "getting the extended header", + "height", + height, + ) + extendedHeader, err := s.headerGetter(ctx, height) + if err != nil { + return nil, err + } + + log.Debugw("getting eds", "height", height) + eds, err := s.shareGetter.GetEDS(ctx, extendedHeader) + if err != nil { + return nil, err + } + + return ProveCommitment(eds, namespace, blobShares) +} + +func ProveCommitment( + eds *rsmt2d.ExtendedDataSquare, + namespace share.Namespace, + blobShares []share.Share, +) (*CommitmentProof, error) { + // find the blob shares in the EDS + blobSharesStartIndex := -1 + for index, share := range eds.FlattenedODS() { + if bytes2.Equal(share, blobShares[0]) { + blobSharesStartIndex = index + } + } + if blobSharesStartIndex < 0 { + return nil, fmt.Errorf("couldn't find the blob shares in the ODS") + } + + nID, err := appns.From(namespace) + if err != nil { + return nil, err + } + + log.Debugw( + "generating the blob share proof for commitment", + "start_share", + blobSharesStartIndex, + "end_share", + blobSharesStartIndex+len(blobShares), + ) + sharesProof, err := pkgproof.NewShareInclusionProofFromEDS( + eds, + nID, + shares.NewRange(blobSharesStartIndex, blobSharesStartIndex+len(blobShares)), + ) + if err != nil { + return nil, err + } + + // convert the shares to row root proofs to nmt proofs + nmtProofs := make([]*nmt.Proof, 0) + for _, proof := range sharesProof.ShareProofs { + nmtProof := nmt.NewInclusionProof(int(proof.Start), + int(proof.End), + proof.Nodes, + true) + nmtProofs = append( + nmtProofs, + &nmtProof, + ) + } + + // compute the subtree roots of the blob shares + log.Debugw("computing the subtree roots") + subtreeRoots := make([][]byte, 0) + dataCursor := 0 + for _, proof := range nmtProofs { + // TODO: do we want directly use the default subtree root threshold + // or want to allow specifying which version to use? + ranges, err := nmt.ToLeafRanges( + proof.Start(), + proof.End(), + shares.SubTreeWidth(len(blobShares), appconsts.DefaultSubtreeRootThreshold), + ) + if err != nil { + return nil, err + } + roots, err := computeSubtreeRoots( + blobShares[dataCursor:dataCursor+proof.End()-proof.Start()], + ranges, + proof.Start(), + ) + if err != nil { + return nil, err + } + subtreeRoots = append(subtreeRoots, roots...) + dataCursor += proof.End() - proof.Start() + } + + log.Debugw("successfully proved the share commitment") + commitmentProof := CommitmentProof{ + SubtreeRoots: subtreeRoots, + SubtreeRootProofs: nmtProofs, + NamespaceID: namespace.ID(), + RowProof: sharesProof.RowProof, + NamespaceVersion: namespace.Version(), + } + return &commitmentProof, nil +} + +// computeSubtreeRoots takes a set of shares and ranges and returns the corresponding subtree roots. +// the offset is the number of shares that are before the subtree roots we're calculating. +func computeSubtreeRoots(shares []share.Share, ranges []nmt.LeafRange, offset int) ([][]byte, error) { + if len(shares) == 0 { + return nil, fmt.Errorf("cannot compute subtree roots for an empty shares list") + } + if len(ranges) == 0 { + return nil, fmt.Errorf("cannot compute subtree roots for an empty ranges list") + } + if offset < 0 { + return nil, fmt.Errorf("the offset %d cannot be stricly negative", offset) + } + + // create a tree containing the shares to generate their subtree roots + tree := nmt.New( + appconsts.NewBaseHashFunc(), + nmt.IgnoreMaxNamespace(true), + nmt.NamespaceIDSize(share.NamespaceSize), + ) + for _, sh := range shares { + leafData := make([]byte, 0) + leafData = append(append(leafData, share.GetNamespace(sh)...), sh...) + err := tree.Push(leafData) + if err != nil { + return nil, err + } + } + + // generate the subtree roots + subtreeRoots := make([][]byte, 0) + for _, rg := range ranges { + root, err := tree.ComputeSubtreeRoot(rg.Start-offset, rg.End-offset) + if err != nil { + return nil, err + } + subtreeRoots = append(subtreeRoots, root) + } + return subtreeRoots, nil +} diff --git a/blob/service_test.go b/blob/service_test.go index fc48f96eb9..efbc6a65bf 100644 --- a/blob/service_test.go +++ b/blob/service_test.go @@ -15,18 +15,24 @@ import ( ds_sync "github.com/ipfs/go-datastore/sync" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto/merkle" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/celestiaorg/celestia-app/pkg/appconsts" appns "github.com/celestiaorg/celestia-app/pkg/namespace" + pkgproof "github.com/celestiaorg/celestia-app/pkg/proof" "github.com/celestiaorg/celestia-app/pkg/shares" + blobtypes "github.com/celestiaorg/celestia-app/x/blob/types" "github.com/celestiaorg/go-header/store" + "github.com/celestiaorg/nmt" + "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/blob/blobtest" "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/header/headertest" shareMock "github.com/celestiaorg/celestia-node/nodebuilder/share/mocks" "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds/edstest" "github.com/celestiaorg/celestia-node/share/getters" "github.com/celestiaorg/celestia-node/share/ipld" ) @@ -705,3 +711,145 @@ func createService(ctx context.Context, t testing.TB, blobs []*Blob) *Service { } return NewService(nil, getters.NewIPLDGetter(bs), fn) } + +// TestProveCommitmentAllCombinations tests proving all the commitments in a block. +// The number of shares per blob increases with each blob to cover proving a large number +// of possibilities. +func TestProveCommitmentAllCombinations(t *testing.T) { + tests := map[string]struct { + blobSize int + }{ + "very small blobs that take less than a share": {blobSize: 350}, + "small blobs that take 2 shares": {blobSize: 1000}, + "small blobs that take ~10 shares": {blobSize: 5000}, + "large blobs ~100 shares": {blobSize: 50000}, + "large blobs ~150 shares": {blobSize: 75000}, + "large blobs ~300 shares": {blobSize: 150000}, + "very large blobs ~1500 shares": {blobSize: 750000}, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + proveAndVerifyShareCommitments(t, tc.blobSize) + }) + } +} + +func proveAndVerifyShareCommitments(t *testing.T, blobSize int) { + msgs, blobs, nss, eds, _, _, dataRoot := edstest.GenerateTestBlock(t, blobSize, 10) + for msgIndex, msg := range msgs { + t.Run(fmt.Sprintf("msgIndex=%d", msgIndex), func(t *testing.T) { + blb, err := NewBlob(uint8(blobs[msgIndex].ShareVersion), nss[msgIndex].Bytes(), blobs[msgIndex].Data) + require.NoError(t, err) + blobShares, err := BlobsToShares(blb) + require.NoError(t, err) + // compute the commitment + actualCommitmentProof, err := ProveCommitment(eds, nss[msgIndex].Bytes(), blobShares) + require.NoError(t, err) + + // make sure the actual commitment attests to the data + require.NoError(t, actualCommitmentProof.Validate()) + valid, err := actualCommitmentProof.Verify( + dataRoot, + appconsts.DefaultSubtreeRootThreshold, + ) + require.NoError(t, err) + require.True(t, valid) + + // generate an expected proof and verify it's valid + expectedCommitmentProof := generateCommitmentProofFromBlock(t, eds, nss[msgIndex].Bytes(), blobs[msgIndex], dataRoot) + require.NoError(t, expectedCommitmentProof.Validate()) + valid, err = expectedCommitmentProof.Verify( + dataRoot, + appconsts.DefaultSubtreeRootThreshold, + ) + require.NoError(t, err) + require.True(t, valid) + + // make sure the expected proof is the same as the actual on + assert.Equal(t, expectedCommitmentProof, *actualCommitmentProof) + + // make sure the expected commitment commits to the subtree roots in the result proof + actualCommitment, _ := merkle.ProofsFromByteSlices(actualCommitmentProof.SubtreeRoots) + assert.Equal(t, msg.ShareCommitments[0], actualCommitment) + }) + } +} + +// generateCommitmentProofFromBlock takes a block and a PFB index and generates the commitment proof +// using the traditional way of doing, instead of using the API. +func generateCommitmentProofFromBlock( + t *testing.T, + eds *rsmt2d.ExtendedDataSquare, + ns share.Namespace, + blob *blobtypes.Blob, + dataRoot []byte, +) CommitmentProof { + // create the blob from the data + blb, err := NewBlob( + uint8(blob.ShareVersion), + ns, + blob.Data, + ) + require.NoError(t, err) + + // convert the blob to a number of shares + blobShares, err := BlobsToShares(blb) + require.NoError(t, err) + + // find the first share of the blob in the ODS + startShareIndex := -1 + for i, sh := range eds.FlattenedODS() { + if bytes.Equal(sh, blobShares[0]) { + startShareIndex = i + break + } + } + require.Greater(t, startShareIndex, 0) + + // create an inclusion proof of the blob using the share range instead of the commitment + sharesProof, err := pkgproof.NewShareInclusionProofFromEDS( + eds, + ns.ToAppNamespace(), + shares.NewRange(startShareIndex, startShareIndex+len(blobShares)), + ) + require.NoError(t, err) + require.NoError(t, sharesProof.Validate(dataRoot)) + + // calculate the subtree roots + subtreeRoots := make([][]byte, 0) + dataCursor := 0 + for _, proof := range sharesProof.ShareProofs { + ranges, err := nmt.ToLeafRanges( + int(proof.Start), + int(proof.End), + shares.SubTreeWidth(len(blobShares), appconsts.DefaultSubtreeRootThreshold), + ) + require.NoError(t, err) + roots, err := computeSubtreeRoots( + blobShares[dataCursor:int32(dataCursor)+proof.End-proof.Start], + ranges, + int(proof.Start), + ) + require.NoError(t, err) + subtreeRoots = append(subtreeRoots, roots...) + dataCursor += int(proof.End - proof.Start) + } + + // convert the nmt proof to be accepted by the commitment proof + nmtProofs := make([]*nmt.Proof, 0) + for _, proof := range sharesProof.ShareProofs { + nmtProof := nmt.NewInclusionProof(int(proof.Start), int(proof.End), proof.Nodes, true) + nmtProofs = append(nmtProofs, &nmtProof) + } + + commitmentProof := CommitmentProof{ + SubtreeRoots: subtreeRoots, + SubtreeRootProofs: nmtProofs, + NamespaceID: sharesProof.NamespaceID, + RowProof: sharesProof.RowProof, + NamespaceVersion: uint8(sharesProof.NamespaceVersion), + } + + return commitmentProof +} diff --git a/go.mod b/go.mod index 74f46f470d..2143209f86 100644 --- a/go.mod +++ b/go.mod @@ -8,11 +8,11 @@ require ( github.com/BurntSushi/toml v1.4.0 github.com/alecthomas/jsonschema v0.0.0-20220216202328-9eeeec9d044b github.com/benbjohnson/clock v1.3.5 - github.com/celestiaorg/celestia-app v1.12.0 + github.com/celestiaorg/celestia-app v1.13.0 github.com/celestiaorg/go-fraud v0.2.1 github.com/celestiaorg/go-header v0.6.2 github.com/celestiaorg/go-libp2p-messenger v0.2.0 - github.com/celestiaorg/nmt v0.21.0 + github.com/celestiaorg/nmt v0.22.0 github.com/celestiaorg/rsmt2d v0.13.1 github.com/cosmos/cosmos-sdk v0.46.16 github.com/cristalhq/jwt/v5 v5.4.0 @@ -356,5 +356,5 @@ replace ( github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1 // broken goleveldb needs to be replaced for the cosmos-sdk and celestia-app github.com/syndtr/goleveldb => github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 - github.com/tendermint/tendermint => github.com/celestiaorg/celestia-core v1.37.0-tm-v0.34.29 + github.com/tendermint/tendermint => github.com/celestiaorg/celestia-core v1.38.0-tm-v0.34.29 ) diff --git a/go.sum b/go.sum index a97fb6619b..154d7dc132 100644 --- a/go.sum +++ b/go.sum @@ -353,10 +353,10 @@ github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7 github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -github.com/celestiaorg/celestia-app v1.12.0 h1:7SMTI/sB8jxp7QPJQRi/liAREnToAD5nOyA7M+naPIc= -github.com/celestiaorg/celestia-app v1.12.0/go.mod h1:O/idsViCLLFdcaE4cJ+iZctZLX0KWfRheKT2W18W2uM= -github.com/celestiaorg/celestia-core v1.37.0-tm-v0.34.29 h1:9nJDE37cTg/Cx+f4FS2g7yYeoLrsaNJg36XsQ47sS1A= -github.com/celestiaorg/celestia-core v1.37.0-tm-v0.34.29/go.mod h1:IIdMu9gnDtjUmZkFuBN4Bf11z/rBtlL2rtwbQxdbRAU= +github.com/celestiaorg/celestia-app v1.13.0 h1:7MWEox6lim6WDyiP84Y2/ERfWUJxWPfZlKxzO6OFcig= +github.com/celestiaorg/celestia-app v1.13.0/go.mod h1:CF9VZwWAlTU0Is/BOsmxqkbkYnnmrgl0YRlSBIzr0m0= +github.com/celestiaorg/celestia-core v1.38.0-tm-v0.34.29 h1:HwbA4OegRvXX0aNchBA7Cmu+oIxnH7xRcOhISuDP0ak= +github.com/celestiaorg/celestia-core v1.38.0-tm-v0.34.29/go.mod h1:MyElURdWAOJkOp84WZnfEUJ+OLvTwOOHG2lbK9E8XRI= github.com/celestiaorg/cosmos-sdk v1.23.0-sdk-v0.46.16 h1:N2uETI13szEKnGAdKhtTR0EsrpcW0AwRKYER74WLnuw= github.com/celestiaorg/cosmos-sdk v1.23.0-sdk-v0.46.16/go.mod h1:Bpl1LSWiDpQumgOhhMTZBMopqa0j7fRasIhvTZB44P0= github.com/celestiaorg/dagstore v0.0.0-20230824094345-537c012aa403 h1:Lj73O3S+KJx5/hgZ+IeOLEIoLsAveJN/7/ZtQQtPSVw= @@ -369,8 +369,8 @@ github.com/celestiaorg/go-libp2p-messenger v0.2.0 h1:/0MuPDcFamQMbw9xTZ73yImqgTO github.com/celestiaorg/go-libp2p-messenger v0.2.0/go.mod h1:s9PIhMi7ApOauIsfBcQwbr7m+HBzmVfDIS+QLdgzDSo= github.com/celestiaorg/merkletree v0.0.0-20230308153949-c33506a7aa26 h1:P2RI1xJ49EZ8cuHMcH+ZSBonfRDtBS8OS9Jdt1BWX3k= github.com/celestiaorg/merkletree v0.0.0-20230308153949-c33506a7aa26/go.mod h1:2m8ukndOegwB0PU0AfJCwDUQHqd7QQRlSXvQL5VToVY= -github.com/celestiaorg/nmt v0.21.0 h1:81MBqxNn3orByoiCtdNVjwi5WsLgMkzHwP02ZMhTBHM= -github.com/celestiaorg/nmt v0.21.0/go.mod h1:ia/EpCk0enD5yO5frcxoNoFToz2Ghtk2i+blmCRjIY8= +github.com/celestiaorg/nmt v0.22.0 h1:AGtfmBiVgreR1KkIV5R7XFNeMp/H4IUDLlBbLjZZ3zk= +github.com/celestiaorg/nmt v0.22.0/go.mod h1:ia/EpCk0enD5yO5frcxoNoFToz2Ghtk2i+blmCRjIY8= github.com/celestiaorg/quantum-gravity-bridge/v2 v2.1.2 h1:Q8nr5SAtDW5gocrBwqwDJcSS/JedqU58WwQA2SP+nXw= github.com/celestiaorg/quantum-gravity-bridge/v2 v2.1.2/go.mod h1:s/LzLUw0WeYPJ6qdk4q46jKLOq7rc9Z5Mdrxtfpcigw= github.com/celestiaorg/rsmt2d v0.13.1 h1:eRhp79DKTkDojwInKVs1lRK6f6zJc1BVlmZfUfI19yQ= diff --git a/nodebuilder/blob/blob.go b/nodebuilder/blob/blob.go index c4c0352516..dc5c099f47 100644 --- a/nodebuilder/blob/blob.go +++ b/nodebuilder/blob/blob.go @@ -34,15 +34,52 @@ type Module interface { // Included checks whether a blob's given commitment(Merkle subtree root) is included at // given height and under the namespace. Included(_ context.Context, height uint64, _ share.Namespace, _ *blob.Proof, _ blob.Commitment) (bool, error) + // GetCommitmentProof generates a commitment proof for a share commitment. + GetCommitmentProof( + ctx context.Context, + height uint64, + namespace share.Namespace, + shareCommitment []byte, + ) (*blob.CommitmentProof, error) } type API struct { Internal struct { - Submit func(context.Context, []*blob.Blob, *blob.SubmitOptions) (uint64, error) `perm:"write"` - Get func(context.Context, uint64, share.Namespace, blob.Commitment) (*blob.Blob, error) `perm:"read"` - GetAll func(context.Context, uint64, []share.Namespace) ([]*blob.Blob, error) `perm:"read"` - GetProof func(context.Context, uint64, share.Namespace, blob.Commitment) (*blob.Proof, error) `perm:"read"` - Included func(context.Context, uint64, share.Namespace, *blob.Proof, blob.Commitment) (bool, error) `perm:"read"` + Submit func( + context.Context, + []*blob.Blob, + *blob.SubmitOptions, + ) (uint64, error) `perm:"write"` + Get func( + context.Context, + uint64, + share.Namespace, + blob.Commitment, + ) (*blob.Blob, error) `perm:"read"` + GetAll func( + context.Context, + uint64, + []share.Namespace, + ) ([]*blob.Blob, error) `perm:"read"` + GetProof func( + context.Context, + uint64, + share.Namespace, + blob.Commitment, + ) (*blob.Proof, error) `perm:"read"` + Included func( + context.Context, + uint64, + share.Namespace, + *blob.Proof, + blob.Commitment, + ) (bool, error) `perm:"read"` + GetCommitmentProof func( + ctx context.Context, + height uint64, + namespace share.Namespace, + shareCommitment []byte, + ) (*blob.CommitmentProof, error) `perm:"read"` } } @@ -72,6 +109,15 @@ func (api *API) GetProof( return api.Internal.GetProof(ctx, height, namespace, commitment) } +func (api *API) GetCommitmentProof( + ctx context.Context, + height uint64, + namespace share.Namespace, + shareCommitment []byte, +) (*blob.CommitmentProof, error) { + return api.Internal.GetCommitmentProof(ctx, height, namespace, shareCommitment) +} + func (api *API) Included( ctx context.Context, height uint64, diff --git a/nodebuilder/blob/mocks/api.go b/nodebuilder/blob/mocks/api.go index 8b46c42d6c..39815179bd 100644 --- a/nodebuilder/blob/mocks/api.go +++ b/nodebuilder/blob/mocks/api.go @@ -67,6 +67,21 @@ func (mr *MockModuleMockRecorder) GetAll(arg0, arg1, arg2 interface{}) *gomock.C return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAll", reflect.TypeOf((*MockModule)(nil).GetAll), arg0, arg1, arg2) } +// GetCommitmentProof mocks base method. +func (m *MockModule) GetCommitmentProof(arg0 context.Context, arg1 uint64, arg2 share.Namespace, arg3 []byte) (*blob.CommitmentProof, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCommitmentProof", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*blob.CommitmentProof) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetCommitmentProof indicates an expected call of GetCommitmentProof. +func (mr *MockModuleMockRecorder) GetCommitmentProof(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCommitmentProof", reflect.TypeOf((*MockModule)(nil).GetCommitmentProof), arg0, arg1, arg2, arg3) +} + // GetProof mocks base method. func (m *MockModule) GetProof(arg0 context.Context, arg1 uint64, arg2 share.Namespace, arg3 blob.Commitment) (*blob.Proof, error) { m.ctrl.T.Helper() diff --git a/nodebuilder/blobstream/blobstream.go b/nodebuilder/blobstream/blobstream.go new file mode 100644 index 0000000000..57a8ab26cf --- /dev/null +++ b/nodebuilder/blobstream/blobstream.go @@ -0,0 +1,48 @@ +package blobstream + +import ( + "context" +) + +var _ Module = (*API)(nil) + +// Module defines the API related to interacting with the data root tuples proofs +// +//go:generate mockgen -destination=mocks/api.go -package=mocks . Module +type Module interface { + // GetDataRootTupleRoot collects the data roots over a provided ordered range of blocks, + // and then creates a new Merkle root of those data roots. The range is end exclusive. + // It's in the header module because it only needs access to the headers to generate the proof. + GetDataRootTupleRoot(ctx context.Context, start, end uint64) (*DataRootTupleRoot, error) + + // GetDataRootTupleInclusionProof creates an inclusion proof, for the data root tuple of block + // height `height`, in the set of blocks defined by `start` and `end`. The range + // is end exclusive. + // It's in the header module because it only needs access to the headers to generate the proof. + GetDataRootTupleInclusionProof( + ctx context.Context, + height, start, end uint64, + ) (*DataRootTupleInclusionProof, error) +} + +// API is a wrapper around the Module for RPC. +type API struct { + Internal struct { + GetDataRootTupleRoot func(ctx context.Context, start, end uint64) (*DataRootTupleRoot, error) `perm:"read"` + GetDataRootTupleInclusionProof func( + ctx context.Context, + height, start, end uint64, + ) (*DataRootTupleInclusionProof, error) `perm:"read"` + } +} + +func (api *API) GetDataRootTupleRoot(ctx context.Context, start, end uint64) (*DataRootTupleRoot, error) { + return api.Internal.GetDataRootTupleRoot(ctx, start, end) +} + +func (api *API) GetDataRootTupleInclusionProof( + ctx context.Context, + height, start, end uint64, +) (*DataRootTupleInclusionProof, error) { + return api.Internal.GetDataRootTupleInclusionProof(ctx, height, start, end) +} diff --git a/nodebuilder/blobstream/data_root_tuple_root.go b/nodebuilder/blobstream/data_root_tuple_root.go new file mode 100644 index 0000000000..de2d60c4fa --- /dev/null +++ b/nodebuilder/blobstream/data_root_tuple_root.go @@ -0,0 +1,207 @@ +package blobstream + +import ( + "context" + "encoding/hex" + "fmt" + "strconv" + + nodeheader "github.com/celestiaorg/celestia-node/header" + + "github.com/tendermint/tendermint/crypto/merkle" + "github.com/tendermint/tendermint/libs/bytes" + + "github.com/celestiaorg/celestia-node/nodebuilder/header" +) + +// DataRootTupleRoot is the root of the merkle tree created +// from a set of data root tuples. +type DataRootTupleRoot bytes.HexBytes + +// DataRootTupleInclusionProof is the binary merkle +// inclusion proof of a height to a data commitment. +type DataRootTupleInclusionProof *merkle.Proof + +// padBytes Pad bytes to given length +func padBytes(byt []byte, length int) ([]byte, error) { + l := len(byt) + if l > length { + return nil, fmt.Errorf( + "cannot pad bytes because length of bytes array: %d is greater than given length: %d", + l, + length, + ) + } + if l == length { + return byt, nil + } + tmp := make([]byte, length) + copy(tmp[length-l:], byt) + return tmp, nil +} + +// to32PaddedHexBytes takes a number and returns its hex representation padded to 32 bytes. +// Used to mimic the result of `abi.encode(number)` in Ethereum. +func to32PaddedHexBytes(number uint64) ([]byte, error) { + hexRepresentation := strconv.FormatUint(number, 16) + // Make sure hex representation has even length. + // The `strconv.FormatUint` can return odd length hex encodings. + // For example, `strconv.FormatUint(10, 16)` returns `a`. + // Thus, we need to pad it. + if len(hexRepresentation)%2 == 1 { + hexRepresentation = "0" + hexRepresentation + } + hexBytes, hexErr := hex.DecodeString(hexRepresentation) + if hexErr != nil { + return nil, hexErr + } + paddedBytes, padErr := padBytes(hexBytes, 32) + if padErr != nil { + return nil, padErr + } + return paddedBytes, nil +} + +// encodeDataRootTuple takes a height and a data root, and returns the equivalent of +// `abi.encode(...)` in Ethereum. +// The encoded type is a dataRootTuple, which has the following ABI: +// +// { +// "components":[ +// { +// "internalType":"uint256", +// "name":"height", +// "type":"uint256" +// }, +// { +// "internalType":"bytes32", +// "name":"dataRoot", +// "type":"bytes32" +// }, +// { +// "internalType":"structDataRootTuple", +// "name":"_tuple", +// "type":"tuple" +// } +// ] +// } +// +// padding the hex representation of the height padded to 32 bytes concatenated to the data root. +// For more information, refer to: +// https://github.com/celestiaorg/blobstream-contracts/blob/master/src/DataRootTuple.sol +func encodeDataRootTuple(height uint64, dataRoot [32]byte) ([]byte, error) { + paddedHeight, err := to32PaddedHexBytes(height) + if err != nil { + return nil, err + } + return append(paddedHeight, dataRoot[:]...), nil +} + +// dataRootTupleRootBlocksLimit The maximum number of blocks to be used to create a data commitment. +// It's a local parameter to protect the API from creating unnecessarily large commitments. +const dataRootTupleRootBlocksLimit = 10_000 // ~27 hours of blocks assuming 10-second blocks. + +// validateDataRootTupleRootRange runs basic checks on the ascending sorted list of +// heights that will be used subsequently in generating data commitments over +// the defined set of heights by ensuring the range exists in the chain. +func (s *Service) validateDataRootTupleRootRange(ctx context.Context, start, end uint64) error { + if start == 0 { + return header.ErrHeightZero + } + if start >= end { + return fmt.Errorf("end block is smaller or equal to the start block") + } + + heightsRange := end - start + if heightsRange > uint64(dataRootTupleRootBlocksLimit) { + return fmt.Errorf("the query exceeds the limit of allowed blocks %d", dataRootTupleRootBlocksLimit) + } + + currentLocalHeader, err := s.headerServ.LocalHead(ctx) + if err != nil { + return fmt.Errorf("could not get the local head to validate the data root tuple root range: %w", err) + } + // the data commitment range is end exclusive + if end > currentLocalHeader.Height()+1 { + return fmt.Errorf( + "end block %d is higher than local chain height %d. Wait for the node until it syncs up to %d", + end, + currentLocalHeader.Height(), + end, + ) + } + return nil +} + +// hashDataRootTuples hashes a list of encoded blocks data root tuples, i.e., height, data root and +// square size, then returns their merkle root. +func hashDataRootTuples(encodedDataRootTuples [][]byte) ([]byte, error) { + if len(encodedDataRootTuples) == 0 { + return nil, fmt.Errorf("cannot hash an empty list of encoded data root tuples") + } + root := merkle.HashFromByteSlices(encodedDataRootTuples) + return root, nil +} + +// validateDataRootInclusionProofRequest validates the request to generate a data root +// inclusion proof. +func (s *Service) validateDataRootInclusionProofRequest( + ctx context.Context, + height, start, end uint64, +) error { + err := s.validateDataRootTupleRootRange(ctx, start, end) + if err != nil { + return err + } + if height < start || height >= end { + return fmt.Errorf( + "height %d should be in the end exclusive interval first_block %d last_block %d", + height, + start, + end, + ) + } + return nil +} + +// proveDataRootTuples returns the merkle inclusion proof for a height. +// expects the list of encoded data root tuples to be ordered and the heights to be consecutive. +func proveDataRootTuples(encodedDataRootTuples [][]byte, rangeStartHeight, height uint64) (*merkle.Proof, error) { + if len(encodedDataRootTuples) == 0 { + return nil, fmt.Errorf("cannot prove an empty list of encoded data root tuples") + } + if height == 0 || rangeStartHeight == 0 { + return nil, header.ErrHeightZero + } + _, proofs := merkle.ProofsFromByteSlices(encodedDataRootTuples) + return proofs[height-rangeStartHeight], nil +} + +// fetchEncodedDataRootTuples takes an end exclusive range of heights and fetches its +// corresponding data root tuples. +// end is not included in the range. +func (s *Service) fetchEncodedDataRootTuples(ctx context.Context, start, end uint64) ([][]byte, error) { + encodedDataRootTuples := make([][]byte, 0, end-start) + headers := make([]*nodeheader.ExtendedHeader, 0, end-start) + + startHeader, err := s.headerServ.GetByHeight(ctx, start) + if err != nil { + return nil, err + } + headers = append(headers, startHeader) + + headerRange, err := s.headerServ.GetRangeByHeight(ctx, startHeader, end) + if err != nil { + return nil, err + } + headers = append(headers, headerRange...) + + for _, header := range headers { + encodedDataRootTuple, err := encodeDataRootTuple(header.Height(), *(*[32]byte)(header.DataHash)) + if err != nil { + return nil, err + } + encodedDataRootTuples = append(encodedDataRootTuples, encodedDataRootTuple) + } + return encodedDataRootTuples, nil +} diff --git a/nodebuilder/blobstream/mocks/api.go b/nodebuilder/blobstream/mocks/api.go new file mode 100644 index 0000000000..e4dff86a78 --- /dev/null +++ b/nodebuilder/blobstream/mocks/api.go @@ -0,0 +1,66 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/celestiaorg/celestia-node/nodebuilder/blobstream (interfaces: Module) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + blobstream "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" + gomock "github.com/golang/mock/gomock" +) + +// MockModule is a mock of Module interface. +type MockModule struct { + ctrl *gomock.Controller + recorder *MockModuleMockRecorder +} + +// MockModuleMockRecorder is the mock recorder for MockModule. +type MockModuleMockRecorder struct { + mock *MockModule +} + +// NewMockModule creates a new mock instance. +func NewMockModule(ctrl *gomock.Controller) *MockModule { + mock := &MockModule{ctrl: ctrl} + mock.recorder = &MockModuleMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockModule) EXPECT() *MockModuleMockRecorder { + return m.recorder +} + +// GetDataRootTupleInclusionProof mocks base method. +func (m *MockModule) GetDataRootTupleInclusionProof(arg0 context.Context, arg1, arg2, arg3 uint64) (*blobstream.DataRootTupleInclusionProof, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDataRootTupleInclusionProof", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*blobstream.DataRootTupleInclusionProof) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDataRootTupleInclusionProof indicates an expected call of GetDataRootTupleInclusionProof. +func (mr *MockModuleMockRecorder) GetDataRootTupleInclusionProof(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDataRootTupleInclusionProof", reflect.TypeOf((*MockModule)(nil).GetDataRootTupleInclusionProof), arg0, arg1, arg2, arg3) +} + +// GetDataRootTupleRoot mocks base method. +func (m *MockModule) GetDataRootTupleRoot(arg0 context.Context, arg1, arg2 uint64) (*blobstream.DataRootTupleRoot, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDataRootTupleRoot", arg0, arg1, arg2) + ret0, _ := ret[0].(*blobstream.DataRootTupleRoot) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDataRootTupleRoot indicates an expected call of GetDataRootTupleRoot. +func (mr *MockModuleMockRecorder) GetDataRootTupleRoot(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDataRootTupleRoot", reflect.TypeOf((*MockModule)(nil).GetDataRootTupleRoot), arg0, arg1, arg2) +} diff --git a/nodebuilder/blobstream/module.go b/nodebuilder/blobstream/module.go new file mode 100644 index 0000000000..c8deb1db10 --- /dev/null +++ b/nodebuilder/blobstream/module.go @@ -0,0 +1,12 @@ +package blobstream + +import "go.uber.org/fx" + +func ConstructModule() fx.Option { + return fx.Module("blobstream", + fx.Provide(NewService), + fx.Provide(func(serv *Service) Module { + return serv + }), + ) +} diff --git a/nodebuilder/blobstream/service.go b/nodebuilder/blobstream/service.go new file mode 100644 index 0000000000..5803e19012 --- /dev/null +++ b/nodebuilder/blobstream/service.go @@ -0,0 +1,81 @@ +package blobstream + +import ( + "context" + + logging "github.com/ipfs/go-log/v2" + + headerServ "github.com/celestiaorg/celestia-node/nodebuilder/header" +) + +var _ Module = (*Service)(nil) + +var log = logging.Logger("go-blobstream") + +type Service struct { + headerServ headerServ.Module +} + +func NewService(headerMod headerServ.Module) *Service { + return &Service{ + headerServ: headerMod, + } +} + +// GetDataRootTupleRoot collects the data roots over a provided ordered range of blocks, +// and then creates a new Merkle root of those data roots. The range is end exclusive. +func (s *Service) GetDataRootTupleRoot(ctx context.Context, start, end uint64) (*DataRootTupleRoot, error) { + log.Debugw("validating the data commitment range", "start", start, "end", end) + err := s.validateDataRootTupleRootRange(ctx, start, end) + if err != nil { + return nil, err + } + log.Debugw("fetching the data root tuples", "start", start, "end", end) + encodedDataRootTuples, err := s.fetchEncodedDataRootTuples(ctx, start, end) + if err != nil { + return nil, err + } + log.Debugw("hashing the data root tuples", "start", start, "end", end) + root, err := hashDataRootTuples(encodedDataRootTuples) + if err != nil { + return nil, err + } + // Create data commitment + dataRootTupleRoot := DataRootTupleRoot(root) + return &dataRootTupleRoot, nil +} + +// GetDataRootTupleInclusionProof creates an inclusion proof for the data root of block +// height `height` in the set of blocks defined by `start` and `end`. The range +// is end exclusive. +func (s *Service) GetDataRootTupleInclusionProof( + ctx context.Context, + height, start, end uint64, +) (*DataRootTupleInclusionProof, error) { + log.Debugw( + "validating the data root inclusion proof request", + "start", + start, + "end", + end, + "height", + height, + ) + err := s.validateDataRootInclusionProofRequest(ctx, height, start, end) + if err != nil { + return nil, err + } + log.Debugw("fetching the data root tuples", "start", start, "end", end) + + encodedDataRootTuples, err := s.fetchEncodedDataRootTuples(ctx, start, end) + if err != nil { + return nil, err + } + log.Debugw("proving the data root tuples", "start", start, "end", end) + proof, err := proveDataRootTuples(encodedDataRootTuples, start, height) + if err != nil { + return nil, err + } + dataRootTupleInclusionProof := DataRootTupleInclusionProof(proof) + return &dataRootTupleInclusionProof, nil +} diff --git a/nodebuilder/blobstream/service_test.go b/nodebuilder/blobstream/service_test.go new file mode 100644 index 0000000000..77ee47efff --- /dev/null +++ b/nodebuilder/blobstream/service_test.go @@ -0,0 +1,187 @@ +package blobstream + +import ( + "encoding/hex" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto/merkle" +) + +func TestPadBytes(t *testing.T) { + tests := []struct { + input []byte + length int + expected []byte + expectErr bool + }{ + {input: []byte{1, 2, 3}, length: 5, expected: []byte{0, 0, 1, 2, 3}}, + {input: []byte{1, 2, 3}, length: 3, expected: []byte{1, 2, 3}}, + {input: []byte{1, 2, 3}, length: 2, expected: nil, expectErr: true}, + {input: []byte{}, length: 3, expected: []byte{0, 0, 0}}, + } + + for _, test := range tests { + result, err := padBytes(test.input, test.length) + if test.expectErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, test.expected, result) + } + } +} + +func TestTo32PaddedHexBytes(t *testing.T) { + tests := []struct { + number uint64 + expected []byte + expectError bool + }{ + { + number: 10, + expected: func() []byte { + res, _ := hex.DecodeString("000000000000000000000000000000000000000000000000000000000000000a") + return res + }(), + }, + { + number: 255, + expected: func() []byte { + res, _ := hex.DecodeString("00000000000000000000000000000000000000000000000000000000000000ff") + return res + }(), + }, + { + number: 255, + expected: func() []byte { + res, _ := hex.DecodeString("00000000000000000000000000000000000000000000000000000000000000ff") + return res + }(), + }, + { + number: 4294967295, + expected: func() []byte { + res, _ := hex.DecodeString("00000000000000000000000000000000000000000000000000000000ffffffff") + return res + }(), + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("number: %d", test.number), func(t *testing.T) { + result, err := to32PaddedHexBytes(test.number) + if test.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, test.expected, result) + } + }) + } +} + +func TestEncodeDataRootTuple(t *testing.T) { + height := uint64(2) + dataRoot, err := hex.DecodeString("82dc1607d84557d3579ce602a45f5872e821c36dbda7ec926dfa17ebc8d5c013") + require.NoError(t, err) + + expectedEncoding, err := hex.DecodeString( + // hex representation of height padded to 32 bytes + "0000000000000000000000000000000000000000000000000000000000000002" + + // data root + "82dc1607d84557d3579ce602a45f5872e821c36dbda7ec926dfa17ebc8d5c013", + ) + require.NoError(t, err) + require.NotNil(t, expectedEncoding) + + actualEncoding, err := encodeDataRootTuple(height, *(*[32]byte)(dataRoot)) + require.NoError(t, err) + require.NotNil(t, actualEncoding) + + // Check that the length of packed data is correct + assert.Equal(t, len(actualEncoding), 64) + assert.Equal(t, expectedEncoding, actualEncoding) +} + +func TestHashDataRootTuples(t *testing.T) { + tests := map[string]struct { + tuples [][]byte + expectedHash []byte + expectErr bool + }{ + "empty tuples list": {tuples: nil, expectErr: true}, + "valid list of encoded data root tuples": { + tuples: func() [][]byte { + tuple1, _ := encodeDataRootTuple(1, [32]byte{0x1}) + tuple2, _ := encodeDataRootTuple(2, [32]byte{0x2}) + return [][]byte{tuple1, tuple2} + }(), + expectedHash: func() []byte { + tuple1, _ := encodeDataRootTuple(1, [32]byte{0x1}) + tuple2, _ := encodeDataRootTuple(2, [32]byte{0x2}) + + return merkle.HashFromByteSlices([][]byte{tuple1, tuple2}) + }(), + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + result, err := hashDataRootTuples(tc.tuples) + if tc.expectErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.expectedHash, result) + } + }) + } +} + +func TestProveDataRootTuples(t *testing.T) { + tests := map[string]struct { + tuples [][]byte + height uint64 + rangeStart uint64 + expectedProof merkle.Proof + expectErr bool + }{ + "empty tuples list": {tuples: [][]byte{{0x1}}, expectErr: true}, + "start height == 0": {tuples: [][]byte{{0x1}}, expectErr: true}, + "range start height == 0": {tuples: [][]byte{{0x1}}, expectErr: true}, + "valid proof": { + height: 3, + rangeStart: 1, + tuples: func() [][]byte { + encodedTuple1, _ := encodeDataRootTuple(1, [32]byte{0x1}) + encodedTuple2, _ := encodeDataRootTuple(2, [32]byte{0x2}) + encodedTuple3, _ := encodeDataRootTuple(3, [32]byte{0x3}) + encodedTuple4, _ := encodeDataRootTuple(4, [32]byte{0x4}) + return [][]byte{encodedTuple1, encodedTuple2, encodedTuple3, encodedTuple4} + }(), + expectedProof: func() merkle.Proof { + encodedTuple1, _ := encodeDataRootTuple(1, [32]byte{0x1}) + encodedTuple2, _ := encodeDataRootTuple(2, [32]byte{0x2}) + encodedTuple3, _ := encodeDataRootTuple(3, [32]byte{0x3}) + encodedTuple4, _ := encodeDataRootTuple(4, [32]byte{0x4}) + _, proofs := merkle.ProofsFromByteSlices([][]byte{encodedTuple1, encodedTuple2, encodedTuple3, encodedTuple4}) + return *proofs[2] + }(), + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + result, err := proveDataRootTuples(tc.tuples, tc.rangeStart, tc.height) + if tc.expectErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.expectedProof, *result) + } + }) + } +} diff --git a/nodebuilder/header/service.go b/nodebuilder/header/service.go index e769cd5299..944562ee61 100644 --- a/nodebuilder/header/service.go +++ b/nodebuilder/header/service.go @@ -13,6 +13,9 @@ import ( modfraud "github.com/celestiaorg/celestia-node/nodebuilder/fraud" ) +// ErrHeightZero returned when the provided block height is equal to 0. +var ErrHeightZero = errors.New("height is equal to 0") + // Service represents the header Service that can be started / stopped on a node. // Service's main function is to manage its sub-services. Service can contain several // sub-services, such as Exchange, ExchangeServer, Syncer, and so forth. @@ -64,6 +67,9 @@ func (s *Service) GetRangeByHeight( } func (s *Service) GetByHeight(ctx context.Context, height uint64) (*header.ExtendedHeader, error) { + if height == 0 { + return nil, ErrHeightZero + } head, err := s.syncer.Head(ctx) switch { case err != nil: diff --git a/nodebuilder/module.go b/nodebuilder/module.go index 8f196f3b1d..5a774b8b9b 100644 --- a/nodebuilder/module.go +++ b/nodebuilder/module.go @@ -8,6 +8,7 @@ import ( "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/libs/fxutil" "github.com/celestiaorg/celestia-node/nodebuilder/blob" + "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" "github.com/celestiaorg/celestia-node/nodebuilder/core" "github.com/celestiaorg/celestia-node/nodebuilder/da" "github.com/celestiaorg/celestia-node/nodebuilder/das" @@ -56,6 +57,7 @@ func ConstructModule(tp node.Type, network p2p.Network, cfg *Config, store Store node.ConstructModule(tp), pruner.ConstructModule(tp, &cfg.Pruner), rpc.ConstructModule(tp, &cfg.RPC), + blobstream.ConstructModule(), ) return fx.Module( diff --git a/nodebuilder/node.go b/nodebuilder/node.go index e17c9d3922..c0ba8f78e8 100644 --- a/nodebuilder/node.go +++ b/nodebuilder/node.go @@ -22,6 +22,7 @@ import ( "github.com/celestiaorg/celestia-node/api/gateway" "github.com/celestiaorg/celestia-node/api/rpc" "github.com/celestiaorg/celestia-node/nodebuilder/blob" + "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" "github.com/celestiaorg/celestia-node/nodebuilder/da" "github.com/celestiaorg/celestia-node/nodebuilder/das" "github.com/celestiaorg/celestia-node/nodebuilder/fraud" @@ -69,14 +70,15 @@ type Node struct { // p2p protocols PubSub *pubsub.PubSub // services - ShareServ share.Module // not optional - HeaderServ header.Module // not optional - StateServ state.Module // not optional - FraudServ fraud.Module // not optional - BlobServ blob.Module // not optional - DASer das.Module // not optional - AdminServ node.Module // not optional - DAMod da.Module // not optional + ShareServ share.Module // not optional + HeaderServ header.Module // not optional + StateServ state.Module // not optional + FraudServ fraud.Module // not optional + BlobServ blob.Module // not optional + DASer das.Module // not optional + AdminServ node.Module // not optional + DAMod da.Module // not optional + BlobstreamMod blobstream.Module // start and stop control ref internal fx.App lifecycle funcs to be called from Start and Stop start, stop lifecycleFunc diff --git a/nodebuilder/rpc/constructors.go b/nodebuilder/rpc/constructors.go index 6509e38b96..a686202bfd 100644 --- a/nodebuilder/rpc/constructors.go +++ b/nodebuilder/rpc/constructors.go @@ -5,6 +5,7 @@ import ( "github.com/celestiaorg/celestia-node/api/rpc" "github.com/celestiaorg/celestia-node/nodebuilder/blob" + "github.com/celestiaorg/celestia-node/nodebuilder/blobstream" "github.com/celestiaorg/celestia-node/nodebuilder/da" "github.com/celestiaorg/celestia-node/nodebuilder/das" "github.com/celestiaorg/celestia-node/nodebuilder/fraud" @@ -26,6 +27,7 @@ func registerEndpoints( nodeMod node.Module, blobMod blob.Module, daMod da.Module, + blobstreamMod blobstream.Module, serv *rpc.Server, ) { serv.RegisterService("fraud", fraudMod, &fraud.API{}) @@ -37,6 +39,7 @@ func registerEndpoints( serv.RegisterService("node", nodeMod, &node.API{}) serv.RegisterService("blob", blobMod, &blob.API{}) serv.RegisterService("da", daMod, &da.API{}) + serv.RegisterService("blobstream", blobstreamMod, &blobstream.API{}) } func server(cfg *Config, signer jwt.Signer, verifier jwt.Verifier) *rpc.Server { diff --git a/nodebuilder/share/constructors.go b/nodebuilder/share/constructors.go index 12c6b9c628..10bec434b3 100644 --- a/nodebuilder/share/constructors.go +++ b/nodebuilder/share/constructors.go @@ -9,14 +9,15 @@ import ( "github.com/celestiaorg/celestia-app/pkg/da" + headerServ "github.com/celestiaorg/celestia-node/nodebuilder/header" "github.com/celestiaorg/celestia-node/share" "github.com/celestiaorg/celestia-node/share/eds" "github.com/celestiaorg/celestia-node/share/getters" "github.com/celestiaorg/celestia-node/share/ipld" ) -func newShareModule(getter share.Getter, avail share.Availability) Module { - return &module{getter, avail} +func newShareModule(getter share.Getter, avail share.Availability, header headerServ.Module) Module { + return &module{getter, avail, header} } // ensureEmptyCARExists adds an empty EDS to the provided EDS store. diff --git a/nodebuilder/share/mocks/api.go b/nodebuilder/share/mocks/api.go index 4e21cecae0..c24a5dc771 100644 --- a/nodebuilder/share/mocks/api.go +++ b/nodebuilder/share/mocks/api.go @@ -12,6 +12,7 @@ import ( share "github.com/celestiaorg/celestia-node/share" rsmt2d "github.com/celestiaorg/rsmt2d" gomock "github.com/golang/mock/gomock" + types "github.com/tendermint/tendermint/types" ) // MockModule is a mock of Module interface. @@ -52,6 +53,22 @@ func (mr *MockModuleMockRecorder) GetEDS(arg0, arg1 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEDS", reflect.TypeOf((*MockModule)(nil).GetEDS), arg0, arg1) } +// GetRange mocks base method. +func (m *MockModule) GetRange(arg0 context.Context, arg1 uint64, arg2, arg3 int) ([][]byte, *types.ShareProof, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetRange", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].([][]byte) + ret1, _ := ret[1].(*types.ShareProof) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// GetRange indicates an expected call of GetRange. +func (mr *MockModuleMockRecorder) GetRange(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRange", reflect.TypeOf((*MockModule)(nil).GetRange), arg0, arg1, arg2, arg3) +} + // GetShare mocks base method. func (m *MockModule) GetShare(arg0 context.Context, arg1 *header.ExtendedHeader, arg2, arg3 int) ([]byte, error) { m.ctrl.T.Helper() diff --git a/nodebuilder/share/share.go b/nodebuilder/share/share.go index a8e1e1c895..a2cef51170 100644 --- a/nodebuilder/share/share.go +++ b/nodebuilder/share/share.go @@ -3,14 +3,25 @@ package share import ( "context" + "github.com/tendermint/tendermint/types" + "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/header" + headerServ "github.com/celestiaorg/celestia-node/nodebuilder/header" "github.com/celestiaorg/celestia-node/share" + "github.com/celestiaorg/celestia-node/share/eds" ) var _ Module = (*API)(nil) +// GetRangeResult wraps the return value of the GetRange endpoint +// because Json-RPC doesn't support more than two return values. +type GetRangeResult struct { + Shares []share.Share + Proof *types.ShareProof +} + // Module provides access to any data square or block share on the network. // // All Get methods provided on Module follow the following flow: @@ -40,6 +51,8 @@ type Module interface { GetSharesByNamespace( ctx context.Context, header *header.ExtendedHeader, namespace share.Namespace, ) (share.NamespacedShares, error) + // GetRange gets a list of shares and their corresponding proof. + GetRange(ctx context.Context, height uint64, start, end int) (*GetRangeResult, error) } // API is a wrapper around Module for the RPC. @@ -61,6 +74,11 @@ type API struct { header *header.ExtendedHeader, namespace share.Namespace, ) (share.NamespacedShares, error) `perm:"read"` + GetRange func( + ctx context.Context, + height uint64, + start, end int, + ) (*GetRangeResult, error) `perm:"read"` } } @@ -76,6 +94,10 @@ func (api *API) GetEDS(ctx context.Context, header *header.ExtendedHeader) (*rsm return api.Internal.GetEDS(ctx, header) } +func (api *API) GetRange(ctx context.Context, height uint64, start, end int) (*GetRangeResult, error) { + return api.Internal.GetRange(ctx, height, start, end) +} + func (api *API) GetSharesByNamespace( ctx context.Context, header *header.ExtendedHeader, @@ -87,8 +109,28 @@ func (api *API) GetSharesByNamespace( type module struct { share.Getter share.Availability + hs headerServ.Module } func (m module) SharesAvailable(ctx context.Context, header *header.ExtendedHeader) error { return m.Availability.SharesAvailable(ctx, header) } + +func (m module) GetRange(ctx context.Context, height uint64, start, end int) (*GetRangeResult, error) { + extendedHeader, err := m.hs.GetByHeight(ctx, height) + if err != nil { + return nil, err + } + extendedDataSquare, err := m.GetEDS(ctx, extendedHeader) + if err != nil { + return nil, err + } + proof, err := eds.ProveShares(extendedDataSquare, start, end) + if err != nil { + return nil, err + } + return &GetRangeResult{ + extendedDataSquare.FlattenedODS()[start:end], + proof, + }, nil +} diff --git a/share/eds/eds.go b/share/eds/eds.go index b8a332f275..079d7636b9 100644 --- a/share/eds/eds.go +++ b/share/eds/eds.go @@ -11,7 +11,10 @@ import ( "github.com/ipfs/go-cid" "github.com/ipld/go-car" "github.com/ipld/go-car/util" + "github.com/tendermint/tendermint/types" + pkgproof "github.com/celestiaorg/celestia-app/pkg/proof" + "github.com/celestiaorg/celestia-app/pkg/shares" "github.com/celestiaorg/celestia-app/pkg/wrapper" "github.com/celestiaorg/nmt" "github.com/celestiaorg/rsmt2d" @@ -271,3 +274,24 @@ func ReadEDS(ctx context.Context, r io.Reader, root share.DataHash) (eds *rsmt2d } return eds, nil } + +// ProveShares generates a share proof for a share range. +// The share range, defined by start and end, is end-exclusive. +func ProveShares(eds *rsmt2d.ExtendedDataSquare, start, end int) (*types.ShareProof, error) { + log.Debugw("proving share range", "start", start, "end", end) + + odsShares, err := shares.FromBytes(eds.FlattenedODS()) + if err != nil { + return nil, err + } + nID, err := pkgproof.ParseNamespace(odsShares, start, end) + if err != nil { + return nil, err + } + log.Debugw("generating the share proof", "start", start, "end", end) + proof, err := pkgproof.NewShareInclusionProofFromEDS(eds, nID, shares.NewRange(start, end)) + if err != nil { + return nil, err + } + return &proof, nil +} diff --git a/share/eds/eds_test.go b/share/eds/eds_test.go index b5e02fe14a..fb615fb84f 100644 --- a/share/eds/eds_test.go +++ b/share/eds/eds_test.go @@ -15,9 +15,13 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/libs/rand" + coretypes "github.com/tendermint/tendermint/types" "github.com/celestiaorg/celestia-app/pkg/appconsts" "github.com/celestiaorg/celestia-app/pkg/da" + "github.com/celestiaorg/celestia-app/pkg/namespace" + pkgproof "github.com/celestiaorg/celestia-app/pkg/proof" + "github.com/celestiaorg/celestia-app/pkg/shares" "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/share" @@ -281,3 +285,66 @@ func createTestData(t *testing.T, testDir string) { //nolint:unused require.NoError(t, err, "writing example root to file") f.Close() } + +func TestProveShares(t *testing.T) { + ns := namespace.RandomBlobNamespace() + eds, dataRoot := edstest.RandEDSWithNamespace( + t, + ns.Bytes(), + 16, + ) + + tests := map[string]struct { + start, end int + expectedProof coretypes.ShareProof + expectErr bool + }{ + "start share == end share": { + start: 2, + end: 2, + expectErr: true, + }, + "start share > end share": { + start: 3, + end: 2, + expectErr: true, + }, + "start share > number of shares in the block": { + start: 2000, + end: 2010, + expectErr: true, + }, + "end share > number of shares in the block": { + start: 1, + end: 2010, + expectErr: true, + }, + "valid case": { + start: 0, + end: 2, + expectedProof: func() coretypes.ShareProof { + proof, err := pkgproof.NewShareInclusionProofFromEDS( + eds, + ns, + shares.NewRange(0, 2), + ) + require.NoError(t, err) + require.NoError(t, proof.Validate(dataRoot.Hash())) + return proof + }(), + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + result, err := ProveShares(eds, tc.start, tc.end) + if tc.expectErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.expectedProof, *result) + assert.NoError(t, result.Validate(dataRoot.Hash())) + } + }) + } +} diff --git a/share/eds/edstest/testing.go b/share/eds/edstest/testing.go index 474a58b2dc..5f6dcfa6f7 100644 --- a/share/eds/edstest/testing.go +++ b/share/eds/edstest/testing.go @@ -4,8 +4,19 @@ import ( "testing" "github.com/stretchr/testify/require" + coretypes "github.com/tendermint/tendermint/types" + "github.com/celestiaorg/celestia-app/app" + "github.com/celestiaorg/celestia-app/app/encoding" + "github.com/celestiaorg/celestia-app/pkg/appconsts" + "github.com/celestiaorg/celestia-app/pkg/da" + "github.com/celestiaorg/celestia-app/pkg/namespace" + "github.com/celestiaorg/celestia-app/pkg/shares" + "github.com/celestiaorg/celestia-app/pkg/square" "github.com/celestiaorg/celestia-app/pkg/wrapper" + "github.com/celestiaorg/celestia-app/test/util/blobfactory" + "github.com/celestiaorg/celestia-app/test/util/testfactory" + "github.com/celestiaorg/celestia-app/x/blob/types" "github.com/celestiaorg/nmt" "github.com/celestiaorg/rsmt2d" @@ -45,3 +56,95 @@ func RandEDSWithNamespace( require.NoError(t, err) return eds, dah } + +// GenerateTestBlock generates a set of test blocks with a specific blob size and number of +// transactions +func GenerateTestBlock( + t *testing.T, + blobSize, numberOfTransactions int, +) ( + []*types.MsgPayForBlobs, + []*types.Blob, + []namespace.Namespace, + *rsmt2d.ExtendedDataSquare, + coretypes.Txs, + *da.DataAvailabilityHeader, + []byte, +) { + nss, msgs, blobs, coreTxs := createTestBlobTransactions( + t, + numberOfTransactions, + blobSize, + ) + + txs := make(coretypes.Txs, 0) + txs = append(txs, coreTxs...) + dataSquare, err := square.Construct( + txs.ToSliceOfBytes(), + appconsts.LatestVersion, + appconsts.SquareSizeUpperBound(appconsts.LatestVersion), + ) + require.NoError(t, err) + + // erasure the data square which we use to create the data root. + eds, err := da.ExtendShares(shares.ToBytes(dataSquare)) + require.NoError(t, err) + + // create the new data root by creating the data availability header (merkle + // roots of each row and col of the erasure data). + dah, err := da.NewDataAvailabilityHeader(eds) + require.NoError(t, err) + dataRoot := dah.Hash() + + return msgs, blobs, nss, eds, coreTxs, &dah, dataRoot +} + +// createTestBlobTransactions generates a set of transactions that can be added to a blob. +// The number of transactions dictates the number of PFBs that will be returned. +// The size refers to the size of the data contained in the PFBs in bytes. +func createTestBlobTransactions( + t *testing.T, + numberOfTransactions, size int, +) ([]namespace.Namespace, []*types.MsgPayForBlobs, []*types.Blob, []coretypes.Tx) { + acc := "blobstream-api-tests" + kr := testfactory.GenerateKeyring(acc) + signer := types.NewKeyringSigner(kr, acc, "test") + + nss := make([]namespace.Namespace, 0) + msgs := make([]*types.MsgPayForBlobs, 0) + blobs := make([]*types.Blob, 0) + coreTxs := make([]coretypes.Tx, 0) + for i := 0; i < numberOfTransactions; i++ { + ns, msg, blob, coreTx := createTestBlobTransaction(t, signer, size+i*1000) + nss = append(nss, ns) + msgs = append(msgs, msg) + blobs = append(blobs, blob) + coreTxs = append(coreTxs, coreTx) + } + + return nss, msgs, blobs, coreTxs +} + +// createTestBlobTransaction creates a test blob transaction using a specific signer and a specific +// PFB size. The size is in bytes. +func createTestBlobTransaction( + t *testing.T, + signer *types.KeyringSigner, + size int, +) (namespace.Namespace, *types.MsgPayForBlobs, *types.Blob, coretypes.Tx) { + addr, err := signer.GetSignerInfo().GetAddress() + require.NoError(t, err) + + ns := namespace.RandomBlobNamespace() + msg, blob := blobfactory.RandMsgPayForBlobsWithNamespaceAndSigner(addr.String(), ns, size) + require.NoError(t, err) + + builder := signer.NewTxBuilder() + stx, err := signer.BuildSignedTx(builder, msg) + require.NoError(t, err) + rawTx, err := encoding.MakeConfig(app.ModuleEncodingRegisters...).TxConfig.TxEncoder()(stx) + require.NoError(t, err) + cTx, err := coretypes.MarshalBlobTx(rawTx, blob) + require.NoError(t, err) + return ns, msg, blob, cTx +}