diff --git a/api/docgen/exampledata/blobProof.json b/api/docgen/exampledata/blobProof.json index 1ee87af9e3..d23f4b6aa0 100644 --- a/api/docgen/exampledata/blobProof.json +++ b/api/docgen/exampledata/blobProof.json @@ -1,31 +1,53 @@ -[ - { - "end": 8, - "nodes": [ - "/////////////////////////////////////////////////////////////////////////////wuxStDHcZ7+b5byNQMVLJbzBT3wmObsThoQ0sCTjTCP" +{ + "ShareToRowRootProof": [ + { + "start": 3, + "end": 4, + "nodes": [ + "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQ72eTVOUxB9THxFjAEwtTePJQA1b0xcz2f6TJc400Uw", + "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABBD0CYbGYoGN4q9VfSmeGZeg/h1NDBA/jtXjZrrKRHE6", + "/////////////////////////////////////////////////////////////////////////////8KDE4JDf0N2lZB7DW1Fpasdk/wz4jHOxuBPAk5Vf5ZI" + ] + }, + { + "end": 1, + "nodes": [ + "//////////////////////////////////////7//////////////////////////////////////plEqgR/c4IAVkNdYRWOYOAESD4whneKR54Dz5Dfe4p2", + "//////////////////////////////////////7//////////////////////////////////////lrD0qJ9dspxSO1Yl8NDioZfgOm8Yj63Y+BGDRHlKCRj", + "/////////////////////////////////////////////////////////////////////////////xQyI+g89aM6rhy9rl2eKr0Uc2NPauf3fkLY3Z+gBtuM" + ] + } + ], + "RowProof": { + "row_roots": [ + "00000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000808080808080808BC517066A5A8C81E2A4353DB500EBB3410047A93D2EE8ADF0B6797B9A5519557", + "0000000000000000000000000000000000000000000808080808080808FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFE015AB6AAC6FAF0ABF26F9453AF390FDA3B39EB384F0B71D0170D84CF69CBA2BC" ], - "is_max_namespace_ignored": true - }, - { - "end": 8, - "nodes": [ - "//////////////////////////////////////////////////////////////////////////////n1NeJxPU2bZUAccKZZ+LAu2Wj5ajbVYURV9ojhSKwp" + "proofs": [ + { + "total": 16, + "index": 1, + "leaf_hash": "lJek/BHnKH6PyRB8jlk69F6EY9Tfx2LRanaF74JVciU=", + "aunts": [ + "bLjvftajE6jVsgQQBkV4RUPESRc+v4bhP0Ljf36858Q=", + "QaF9mNskaURxk98S3BExB1PzRAjOqVydrDLvUu0B5/M=", + "K2xW8JJ3Ff4FvtbfZi5ZD/ygnswaNCNIKXsSzbO2Jrc=", + "uySRG/gINLAgGgywJCTiXMlFkfQivF1O1zLg5+RRUP8=" + ] + }, + { + "total": 16, + "index": 2, + "leaf_hash": "h+4ND52kT4qkc9nWW22dIMAK/4YjkC6fBoD01WF0+Uo=", + "aunts": [ + "2x8OISRBMLYJRV8NfTNtVvZUg2F7MtCK5xCZuE9fQwQ=", + "Xvr5IalE2y3pxHjxh5kcHFSRaz4g5MxdOj4NIGwRXY0=", + "K2xW8JJ3Ff4FvtbfZi5ZD/ygnswaNCNIKXsSzbO2Jrc=", + "uySRG/gINLAgGgywJCTiXMlFkfQivF1O1zLg5+RRUP8=" + ] + } ], - "is_max_namespace_ignored": true - }, - { - "end": 8, - "nodes": [ - "/////////////////////////////////////////////////////////////////////////////0xK8BKnzDmwK0HR4ZJvyB4kh3jPPXGxaGPFoga8vPxF" - ], - "is_max_namespace_ignored": true - }, - { - "end": 7, - "nodes": [ - "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAMJ/xGlNMdEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwn/EaU0x0UTO9HUGKjyjcv5U2gHeSjJ8S1rftqv6k8kxlVWW8e/7", - "/////////////////////////////////////////////////////////////////////////////wexh4khLQ9HQ2X6nh9wU5B+m6r+LWwPTEDTa5/CosDF" - ], - "is_max_namespace_ignored": true + "start_row": 1, + "end_row": 3 } -] +} diff --git a/blob/blob.go b/blob/blob.go index 9536734809..f56c78c7fa 100644 --- a/blob/blob.go +++ b/blob/blob.go @@ -6,54 +6,162 @@ import ( "errors" "fmt" + "github.com/tendermint/tendermint/crypto/merkle" + coretypes "github.com/tendermint/tendermint/types" + "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" - "github.com/celestiaorg/go-square/merkle" "github.com/celestiaorg/go-square/v2/inclusion" libshare "github.com/celestiaorg/go-square/v2/share" "github.com/celestiaorg/nmt" ) -// appVersion is the current application version of celestia-app. -const appVersion = appconsts.LatestVersion - var errEmptyShares = errors.New("empty shares") -var subtreeRootThreshold = appconsts.SubtreeRootThreshold(appVersion) +// Proof constructs the proof of a blob to the data root. +type Proof struct { + // SubtreeRoots are the subtree roots of the blob's data that are + // used to create the commitment. + SubtreeRoots [][]byte `json:"subtree_roots"` + // SubtreeRootProofs the proofs of the subtree roots to the row roots they belong to. + // If the blob spans across multiple rows, then this will contain multiple proofs. + SubtreeRootProofs []*nmt.Proof `json:"share_to_row_root_proofs"` + // RowToDataRootProof the proofs of the row roots containing the blob shares + // to the data root. + RowToDataRootProof coretypes.RowProof `json:"row_to_data_root_proof"` +} -// The Proof is a set of nmt proofs that can be verified only through -// the included method (due to limitation of the nmt https://github.com/celestiaorg/nmt/issues/218). -// Proof proves the WHOLE namespaced data to the row roots. -// TODO (@vgonkivs): rework `Proof` in order to prove a particular blob. -// https://github.com/celestiaorg/celestia-node/issues/2303 -type Proof []*nmt.Proof +// namespaceToRowRootProof a proof of a set of namespace shares to the row +// roots they belong to. +type namespaceToRowRootProof []*nmt.Proof -func (p Proof) Len() int { return len(p) } +// Commitment is a Merkle Root of the subtree built from shares of the Blob. +// It is computed by splitting the blob into shares and building the Merkle subtree to be included +// after Submit. +type Commitment []byte -// equal is a temporary method that compares two proofs. -// should be removed in BlobService V1. -func (p Proof) equal(input Proof) error { - if p.Len() != input.Len() { - return ErrInvalidProof +// Verify takes a data root and verifies if the +// provided proof's subtree roots were committed to the given data root. +func (p *Proof) Verify(dataRoot []byte) (bool, error) { + if len(dataRoot) == 0 { + return false, errors.New("root must be non-empty") } - for i, proof := range p { - pNodes := proof.Nodes() - inputNodes := input[i].Nodes() - for i, node := range pNodes { - if !bytes.Equal(node, inputNodes[i]) { - return ErrInvalidProof - } - } + subtreeRootThreshold := appconsts.SubtreeRootThreshold(appconsts.LatestVersion) + if subtreeRootThreshold <= 0 { + return false, errors.New("subtreeRootThreshold must be > 0") + } + + // this check is < instead of != because we can have two subtree roots + // at the same height, depending on the subtree root threshold, + // and they can be used to create the above inner node without needing a proof inner node. + if len(p.SubtreeRoots) < len(p.SubtreeRootProofs) { + return false, fmt.Errorf( + "the number of subtree roots %d should be bigger than the number of subtree root proofs %d", + len(p.SubtreeRoots), + len(p.SubtreeRootProofs), + ) + } + + // for each row, one or more subtree roots' inclusion is verified against + // their corresponding row root. then, these row roots' inclusion is verified + // against the data root. so their number should be the same. + if len(p.SubtreeRootProofs) != len(p.RowToDataRootProof.Proofs) { + return false, fmt.Errorf( + "the number of subtree root proofs %d should be equal to the number of row root proofs %d", + len(p.SubtreeRootProofs), + len(p.RowToDataRootProof.Proofs), + ) + } + + // the row root proofs' ranges are defined as [startRow, endRow]. + if int(p.RowToDataRootProof.EndRow-p.RowToDataRootProof.StartRow+1) != len(p.RowToDataRootProof.RowRoots) { + return false, fmt.Errorf( + "the number of rows %d must equal the number of row roots %d", + int(p.RowToDataRootProof.EndRow-p.RowToDataRootProof.StartRow+1), + len(p.RowToDataRootProof.RowRoots), + ) + } + if len(p.RowToDataRootProof.Proofs) != len(p.RowToDataRootProof.RowRoots) { + return false, fmt.Errorf( + "the number of proofs %d must equal the number of row roots %d", + len(p.RowToDataRootProof.Proofs), + len(p.RowToDataRootProof.RowRoots), + ) + } + + // verify the inclusion of the rows to the data root + if err := p.RowToDataRootProof.Validate(dataRoot); err != nil { + return false, err + } + + // computes the total number of shares proven given that each subtree root + // references a specific set of leaves. + numberOfShares := 0 + for _, proof := range p.SubtreeRootProofs { + numberOfShares += proof.End() - proof.Start() + } - if proof.Start() != input[i].Start() || proof.End() != input[i].End() { - return ErrInvalidProof + // use the computed total number of shares to calculate the subtree roots + // width. + // the subtree roots width is defined in ADR-013: + // + //https://github.com/celestiaorg/celestia-app/blob/main/docs/architecture/adr-013-non-interactive-default-rules-for-zero-padding.md + subtreeRootsWidth := inclusion.SubTreeWidth(numberOfShares, subtreeRootThreshold) + + nmtHasher := nmt.NewNmtHasher(appconsts.NewBaseHashFunc(), libshare.NamespaceSize, true) + // verify the proof of the subtree roots + subtreeRootsCursor := 0 + for i, subtreeRootProof := range p.SubtreeRootProofs { + // calculate the share range that each subtree root commits to. + ranges, err := nmt.ToLeafRanges(subtreeRootProof.Start(), subtreeRootProof.End(), subtreeRootsWidth) + if err != nil { + return false, err } - if !bytes.Equal(proof.LeafHash(), input[i].LeafHash()) { - return ErrInvalidProof + if len(p.SubtreeRoots) < subtreeRootsCursor { + return false, fmt.Errorf("len(commitmentProof.SubtreeRoots)=%d < subtreeRootsCursor=%d", + len(p.SubtreeRoots), subtreeRootsCursor) } + if len(p.SubtreeRoots) < subtreeRootsCursor+len(ranges) { + return false, fmt.Errorf("len(commitmentProof.SubtreeRoots)=%d < subtreeRootsCursor+len(ranges)=%d", + len(p.SubtreeRoots), subtreeRootsCursor+len(ranges)) + } + valid, err := subtreeRootProof.VerifySubtreeRootInclusion( + nmtHasher, + p.SubtreeRoots[subtreeRootsCursor:subtreeRootsCursor+len(ranges)], + subtreeRootsWidth, + p.RowToDataRootProof.RowRoots[i], + ) + if err != nil { + return false, err + } + if !valid { + return false, + fmt.Errorf( + "subtree root proof for range [%d, %d) is invalid", + subtreeRootProof.Start(), + subtreeRootProof.End(), + ) + } + subtreeRootsCursor += len(ranges) } - return nil + + return true, nil +} + +// GenerateCommitment generates the share commitment corresponding +// to the proof's subtree roots +func (p *Proof) GenerateCommitment() Commitment { + return merkle.HashFromByteSlices(p.SubtreeRoots) +} + +func (com Commitment) String() string { + return string(com) +} + +// Equal ensures that commitments are the same +func (com Commitment) Equal(c Commitment) bool { + return bytes.Equal(com, c) } // Blob represents any application-specific binary data that anyone can submit to Celestia. @@ -94,7 +202,7 @@ func NewBlob(shareVersion uint8, namespace libshare.Namespace, data, signer []by return nil, err } - com, err := inclusion.CreateCommitment(libBlob, merkle.HashFromByteSlices, subtreeRootThreshold) + com, err := inclusion.CreateCommitment(libBlob, merkle.HashFromByteSlices, appconsts.DefaultSubtreeRootThreshold) if err != nil { return nil, err } @@ -177,3 +285,14 @@ func (b *Blob) UnmarshalJSON(data []byte) error { *b = *blob return nil } + +func (b *Blob) ComputeSubtreeRoots() ([][]byte, error) { + return inclusion.GenerateSubtreeRoots(b.Blob, appconsts.DefaultSubtreeRootThreshold) +} + +// proveRowRootsToDataRoot creates a set of binary merkle proofs for all the +// roots defined by the range [start, end). +func proveRowRootsToDataRoot(roots [][]byte, start, end int) []*merkle.Proof { + _, proofs := merkle.ProofsFromByteSlices(roots) + return proofs[start:end] +} diff --git a/blob/blob_fuzz_test.go b/blob/blob_fuzz_test.go index 1927ff3d46..1a9fd5f330 100644 --- a/blob/blob_fuzz_test.go +++ b/blob/blob_fuzz_test.go @@ -41,9 +41,9 @@ func FuzzProofEqual(f *testing.F) { } type verifyCorpus struct { - CP *CommitmentProof `json:"commitment_proof"` - Root []byte `json:"root"` - SThreshold int `json:"sub_threshold"` + Proof *Proof `json:"proof"` + Root []byte `json:"root"` + SThreshold int `json:"sub_threshold"` } func FuzzCommitmentProofVerify(f *testing.F) { @@ -83,10 +83,10 @@ func FuzzCommitmentProofVerify(f *testing.F) { if err := json.Unmarshal(valueJSON, val); err != nil { return } - commitProof := val.CP + commitProof := val.Proof if commitProof == nil { return } - _, _ = commitProof.Verify(val.Root, val.SThreshold) + _, _ = commitProof.Verify(val.Root) }) } diff --git a/blob/blob_test.go b/blob/blob_test.go index 221e2d8395..af4290997f 100644 --- a/blob/blob_test.go +++ b/blob/blob_test.go @@ -6,8 +6,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto/merkle" - "github.com/celestiaorg/go-square/merkle" + "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" "github.com/celestiaorg/go-square/v2/inclusion" libshare "github.com/celestiaorg/go-square/v2/share" ) @@ -28,7 +29,7 @@ func TestBlob(t *testing.T) { expectedRes: func(t *testing.T) { require.NotEmpty(t, blob) require.NotEmpty(t, blob[0].Namespace()) - require.NotEmpty(t, blob[0].Data) + require.NotEmpty(t, blob[0].Data()) require.NotEmpty(t, blob[0].Commitment) }, }, @@ -38,7 +39,7 @@ func TestBlob(t *testing.T) { comm, err := inclusion.CreateCommitment( blob[0].Blob, merkle.HashFromByteSlices, - subtreeRootThreshold, + appconsts.DefaultSubtreeRootThreshold, ) require.NoError(t, err) assert.Equal(t, blob[0].Commitment, Commitment(comm)) diff --git a/blob/commitment_proof.go b/blob/commitment_proof.go deleted file mode 100644 index 887f420552..0000000000 --- a/blob/commitment_proof.go +++ /dev/null @@ -1,155 +0,0 @@ -package blob - -import ( - "bytes" - "errors" - "fmt" - - "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" - "github.com/celestiaorg/celestia-app/v3/pkg/proof" - "github.com/celestiaorg/go-square/v2/inclusion" - libshare "github.com/celestiaorg/go-square/v2/share" - "github.com/celestiaorg/nmt" - "github.com/celestiaorg/nmt/namespace" -) - -// Commitment is a Merkle Root of the subtree built from shares of the Blob. -// It is computed by splitting the blob into shares and building the Merkle subtree to be included -// after Submit. -type Commitment []byte - -// CommitmentProof is an inclusion proof of a commitment to the data root. -type CommitmentProof struct { - // SubtreeRoots are the subtree roots of the blob's data that are - // used to create the commitment. - SubtreeRoots [][]byte `json:"subtree_roots"` - // SubtreeRootProofs are the NMT proofs for the subtree roots - // to the row roots. - SubtreeRootProofs []*nmt.Proof `json:"subtree_root_proofs"` - // NamespaceID is the namespace id of the commitment being proven. This - // namespace id is used when verifying the proof. If the namespace id doesn't - // match the namespace of the shares, the proof will fail verification. - NamespaceID namespace.ID `json:"namespace_id"` - // RowProof is the proof of the rows containing the blob's data to the - // data root. - RowProof proof.RowProof `json:"row_proof"` - NamespaceVersion uint8 `json:"namespace_version"` -} - -func (com Commitment) String() string { - return string(com) -} - -// Equal ensures that commitments are the same -func (com Commitment) Equal(c Commitment) bool { - return bytes.Equal(com, c) -} - -// Validate performs basic validation to the commitment proof. -// Note: it doesn't verify if the proof is valid or not. -// Check Verify() for that. -func (commitmentProof *CommitmentProof) Validate() error { - if len(commitmentProof.SubtreeRoots) < len(commitmentProof.SubtreeRootProofs) { - return fmt.Errorf( - "the number of subtree roots %d should be bigger than the number of subtree root proofs %d", - len(commitmentProof.SubtreeRoots), - len(commitmentProof.SubtreeRootProofs), - ) - } - if len(commitmentProof.SubtreeRootProofs) != len(commitmentProof.RowProof.Proofs) { - return fmt.Errorf( - "the number of subtree root proofs %d should be equal to the number of row root proofs %d", - len(commitmentProof.SubtreeRootProofs), - len(commitmentProof.RowProof.Proofs), - ) - } - if int(commitmentProof.RowProof.EndRow-commitmentProof.RowProof.StartRow+1) != len(commitmentProof.RowProof.RowRoots) { - return fmt.Errorf( - "the number of rows %d must equal the number of row roots %d", - int(commitmentProof.RowProof.EndRow-commitmentProof.RowProof.StartRow+1), - len(commitmentProof.RowProof.RowRoots), - ) - } - if len(commitmentProof.RowProof.Proofs) != len(commitmentProof.RowProof.RowRoots) { - return fmt.Errorf( - "the number of proofs %d must equal the number of row roots %d", - len(commitmentProof.RowProof.Proofs), - len(commitmentProof.RowProof.RowRoots), - ) - } - return nil -} - -// Verify verifies that a commitment proof is valid, i.e., the subtree roots commit -// to some data that was posted to a square. -// Expects the commitment proof to be properly formulated and validated -// using the Validate() function. -func (commitmentProof *CommitmentProof) Verify(root []byte, subtreeRootThreshold int) (bool, error) { - if len(root) == 0 { - return false, errors.New("root must be non-empty") - } - - rp := commitmentProof.RowProof - if err := rp.Validate(root); err != nil { - return false, err - } - - nmtHasher := nmt.NewNmtHasher(appconsts.NewBaseHashFunc(), libshare.NamespaceSize, true) - - // computes the total number of shares proven. - numberOfShares := 0 - for _, proof := range commitmentProof.SubtreeRootProofs { - numberOfShares += proof.End() - proof.Start() - } - - if subtreeRootThreshold <= 0 { - return false, errors.New("subtreeRootThreshould must be > 0") - } - - // use the computed total number of shares to calculate the subtree roots - // width. - // the subtree roots width is defined in ADR-013: - // - //https://github.com/celestiaorg/celestia-app/blob/main/docs/architecture/adr-013-non-interactive-default-rules-for-zero-padding.md - subtreeRootsWidth := inclusion.SubTreeWidth(numberOfShares, subtreeRootThreshold) - - // verify the proof of the subtree roots - subtreeRootsCursor := 0 - for i, subtreeRootProof := range commitmentProof.SubtreeRootProofs { - // calculate the share range that each subtree root commits to. - ranges, err := nmt.ToLeafRanges(subtreeRootProof.Start(), subtreeRootProof.End(), subtreeRootsWidth) - if err != nil { - return false, err - } - - if len(commitmentProof.SubtreeRoots) < subtreeRootsCursor { - return false, fmt.Errorf("len(commitmentProof.SubtreeRoots)=%d < subtreeRootsCursor=%d", - len(commitmentProof.SubtreeRoots), subtreeRootsCursor) - } - if len(commitmentProof.SubtreeRoots) < subtreeRootsCursor+len(ranges) { - return false, fmt.Errorf("len(commitmentProof.SubtreeRoots)=%d < subtreeRootsCursor+len(ranges)=%d", - len(commitmentProof.SubtreeRoots), subtreeRootsCursor+len(ranges)) - } - valid, err := subtreeRootProof.VerifySubtreeRootInclusion( - nmtHasher, - commitmentProof.SubtreeRoots[subtreeRootsCursor:subtreeRootsCursor+len(ranges)], - subtreeRootsWidth, - commitmentProof.RowProof.RowRoots[i], - ) - if err != nil { - return false, err - } - if !valid { - return false, - fmt.Errorf( - "subtree root proof for range [%d, %d) is invalid", - subtreeRootProof.Start(), - subtreeRootProof.End(), - ) - } - subtreeRootsCursor += len(ranges) - } - - // verify row roots to data root proof - return commitmentProof.RowProof.VerifyProof(root), nil -} diff --git a/blob/parser.go b/blob/parser.go index 977bb2dfd9..5154334f87 100644 --- a/blob/parser.go +++ b/blob/parser.go @@ -4,7 +4,9 @@ import ( "errors" "fmt" - "github.com/celestiaorg/go-square/merkle" + "github.com/tendermint/tendermint/crypto/merkle" + + "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" "github.com/celestiaorg/go-square/v2/inclusion" libshare "github.com/celestiaorg/go-square/v2/share" ) @@ -84,7 +86,7 @@ func (p *parser) parse() (*Blob, error) { return nil, errors.New("unexpected amount of blobs during parsing") } - com, err := inclusion.CreateCommitment(blobs[0], merkle.HashFromByteSlices, subtreeRootThreshold) + com, err := inclusion.CreateCommitment(blobs[0], merkle.HashFromByteSlices, appconsts.DefaultSubtreeRootThreshold) if err != nil { return nil, err } diff --git a/blob/repro_test.go b/blob/repro_test.go index 6cb39948ac..c3cd468e89 100644 --- a/blob/repro_test.go +++ b/blob/repro_test.go @@ -3,32 +3,34 @@ package blob import ( "testing" - "github.com/celestiaorg/celestia-app/v3/pkg/proof" + "github.com/tendermint/tendermint/crypto/merkle" + coretypes "github.com/tendermint/tendermint/types" + "github.com/celestiaorg/nmt" "github.com/celestiaorg/nmt/pb" ) // Reported at https://github.com/celestiaorg/celestia-node/issues/3731. -func TestCommitmentProofRowProofVerifyWithEmptyRoot(t *testing.T) { - cp := &CommitmentProof{ - RowProof: proof.RowProof{ - Proofs: []*proof.Proof{{}}, +func TestProofRowProofVerifyWithEmptyRoot(t *testing.T) { + cp := &Proof{ + RowToDataRootProof: coretypes.RowProof{ + Proofs: []*merkle.Proof{{}}, }, } root := []byte{0xd3, 0x4d, 0x34} - if _, err := cp.Verify(root, 1); err == nil { + if _, err := cp.Verify(root); err == nil { t.Fatal("expected a non-nil error") } } // Reported at https://github.com/celestiaorg/celestia-node/issues/3730. -func TestCommitmentProofRowProofVerify(t *testing.T) { - cp := &CommitmentProof{ - RowProof: proof.RowProof{ - Proofs: []*proof.Proof{{}}, +func TestProofRowProofVerify(t *testing.T) { + cp := &Proof{ + RowToDataRootProof: coretypes.RowProof{ + Proofs: []*merkle.Proof{{}}, }, } - if _, err := cp.Verify(nil, 1); err == nil { + if _, err := cp.Verify(nil); err == nil { t.Fatal("expected a non-nil error") } } @@ -36,20 +38,12 @@ func TestCommitmentProofRowProofVerify(t *testing.T) { // Reported at https://github.com/celestiaorg/celestia-node/issues/3729. func TestCommitmentProofVerifySliceBound(t *testing.T) { proof := nmt.ProtoToProof(pb.Proof{End: 1}) - cp := &CommitmentProof{ + cp := &Proof{ SubtreeRootProofs: []*nmt.Proof{ &proof, }, } - if _, err := cp.Verify(nil, 1); err == nil { - t.Fatal("expected a non-nil error") - } -} - -// Reported at https://github.com/celestiaorg/celestia-node/issues/3728. -func TestCommitmentProofVerifyZeroSubThreshold(t *testing.T) { - cp := new(CommitmentProof) - if _, err := cp.Verify(nil, 0); err == nil { + if _, err := cp.Verify(nil); err == nil { t.Fatal("expected a non-nil error") } } diff --git a/blob/service.go b/blob/service.go index 11449da2b5..5b414f4f00 100644 --- a/blob/service.go +++ b/blob/service.go @@ -1,7 +1,6 @@ package blob import ( - "bytes" "context" "encoding/hex" "errors" @@ -11,6 +10,8 @@ import ( "github.com/cosmos/cosmos-sdk/types" logging "github.com/ipfs/go-log/v2" + "github.com/tendermint/tendermint/libs/bytes" + core "github.com/tendermint/tendermint/types" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" @@ -21,7 +22,6 @@ import ( "github.com/celestiaorg/go-square/v2/inclusion" libshare "github.com/celestiaorg/go-square/v2/share" "github.com/celestiaorg/nmt" - "github.com/celestiaorg/rsmt2d" "github.com/celestiaorg/celestia-node/header" "github.com/celestiaorg/celestia-node/libs/utils" @@ -31,8 +31,9 @@ import ( ) var ( - ErrBlobNotFound = errors.New("blob: not found") - ErrInvalidProof = errors.New("blob: invalid proof") + ErrBlobNotFound = errors.New("blob: not found") + ErrInvalidProof = errors.New("blob: invalid proof") + ErrMismatchCommitment = errors.New("blob: mismatched commitment") log = logging.Logger("blob") tracer = otel.Tracer("blob/service") @@ -235,7 +236,7 @@ func (s *Service) GetProof( return blob.compareCommitments(commitment) }} - _, proof, err = s.retrieve(ctx, height, namespace, sharesParser) + _, proof, err = s.retrieveBlobProof(ctx, height, namespace, sharesParser) return proof, err } @@ -296,6 +297,8 @@ func (s *Service) getAll( // To ensure that blob was included in a specific height, we need: // 1. verify the provided commitment by recomputing it; // 2. verify the provided Proof against subtree roots that were used in 1.; +// Note: this method can be deprecated because it's doing processing that can +// be done locally. func (s *Service) Included( ctx context.Context, height uint64, @@ -311,21 +314,20 @@ func (s *Service) Included( attribute.Int64("height", int64(height)), attribute.String("namespace", namespace.String()), ) - - // In the current implementation, LNs will have to download all shares to recompute the commitment. - // TODO(@vgonkivs): rework the implementation to perform all verification without network requests. - sharesParser := &parser{verifyFn: func(blob *Blob) bool { - return blob.compareCommitments(commitment) - }} - _, resProof, err := s.retrieve(ctx, height, namespace, sharesParser) - switch { - case err == nil: - case errors.Is(err, ErrBlobNotFound): - return false, nil - default: + // verify that the blob subtree roots match the proof subtree roots + if proofCommitment := proof.GenerateCommitment(); !commitment.Equal(proofCommitment) { + return false, fmt.Errorf( + "%w: unequal blob commitment %s and proof commitment %s", + ErrInvalidProof, + hex.EncodeToString(commitment), + hex.EncodeToString(proofCommitment), + ) + } + header, err := s.headerGetter(ctx, height) + if err != nil { return false, err } - return true, resProof.equal(*proof) + return proof.Verify(header.DataHash) } // retrieve retrieves blobs and their proofs by requesting the whole namespace and @@ -336,7 +338,7 @@ func (s *Service) retrieve( height uint64, namespace libshare.Namespace, sharesParser *parser, -) (_ *Blob, _ *Proof, err error) { +) (_ *Blob, _ *namespaceToRowRootProof, err error) { log.Infow("requesting blob", "height", height, "namespace", namespace.String()) @@ -350,7 +352,7 @@ func (s *Service) retrieve( } headerGetterSpan.SetStatus(codes.Ok, "") - headerGetterSpan.AddEvent("received eds", trace.WithAttributes( + headerGetterSpan.AddEvent("received header", trace.WithAttributes( attribute.Int64("eds-size", int64(len(header.DAH.RowRoots))))) rowIndex := -1 @@ -365,6 +367,9 @@ func (s *Service) retrieve( } } + // Note: there is no need to check whether the row index is different from -1 + // because it will be handled at the end to return the correct error. + getCtx, getSharesSpan := tracer.Start(ctx, "get-shares-by-namespace") // collect shares for the requested namespace @@ -383,7 +388,7 @@ func (s *Service) retrieve( var ( appShares = make([]libshare.Share, 0) - proofs = make(Proof, 0) + proofs = make(namespaceToRowRootProof, 0) ) for _, row := range namespacedShares { @@ -468,217 +473,252 @@ func (s *Service) retrieve( return nil, nil, err } -// getBlobs retrieves the DAH and fetches all shares from the requested Namespace and converts -// them to Blobs. -func (s *Service) getBlobs( - ctx context.Context, - namespace libshare.Namespace, - header *header.ExtendedHeader, -) (_ []*Blob, err error) { - ctx, span := tracer.Start(ctx, "get-blobs") - span.SetAttributes( - attribute.Int64("height", int64(header.Height())), - attribute.String("namespace", namespace.String()), - ) - defer func() { - utils.SetStatusAndEnd(span, err) - }() - - blobs := make([]*Blob, 0) - verifyFn := func(blob *Blob) bool { - blobs = append(blobs, blob) - return false - } - sharesParser := &parser{verifyFn: verifyFn} - - _, _, err = s.retrieve(ctx, header.Height(), namespace, sharesParser) - return blobs, err -} - -func (s *Service) GetCommitmentProof( +// retrieve retrieves blobs and their proofs by requesting the whole namespace and +// comparing Commitments. +// Retrieving is stopped once the `verify` condition in shareParser is met. +func (s *Service) retrieveBlobProof( ctx context.Context, height uint64, namespace libshare.Namespace, - shareCommitment []byte, -) (*CommitmentProof, error) { - log.Debugw("proving share commitment", "height", height, "commitment", shareCommitment, "namespace", namespace) - if height == 0 { - return nil, fmt.Errorf("height cannot be equal to 0") - } + sharesParser *parser, +) (_ *Blob, _ *Proof, err error) { + log.Infow("requesting blob proof", + "height", height, + "namespace", namespace.String()) - // get the blob to compute the subtree roots - log.Debugw( - "getting the blob", - "height", - height, - "commitment", - shareCommitment, - "namespace", - namespace, - ) - blb, err := s.Get(ctx, height, namespace, shareCommitment) - if err != nil { - return nil, err - } + getCtx, headerGetterSpan := tracer.Start(ctx, "header-getter") - log.Debugw( - "converting the blob to shares", - "height", - height, - "commitment", - shareCommitment, - "namespace", - namespace, - ) - blobShares, err := BlobsToShares(blb) + header, err := s.headerGetter(getCtx, height) if err != nil { - return nil, err - } - if len(blobShares) == 0 { - return nil, fmt.Errorf("the blob shares for commitment %s are empty", hex.EncodeToString(shareCommitment)) + headerGetterSpan.SetStatus(codes.Error, err.Error()) + return nil, nil, err } - // get the extended header - log.Debugw( - "getting the extended header", - "height", - height, - ) - extendedHeader, err := s.headerGetter(ctx, height) - if err != nil { - return nil, err + // find the index of the row where the blob could start + inclusiveNamespaceStartRowIndex := -1 + for i, row := range header.DAH.RowRoots { + outside, err := share.IsOutsideRange(namespace, row, row) + if err != nil { + return nil, nil, err + } + if !outside { + inclusiveNamespaceStartRowIndex = i + break + } } - - log.Debugw("getting eds", "height", height) - eds, err := s.shareGetter.GetEDS(ctx, extendedHeader) - if err != nil { - return nil, err + if inclusiveNamespaceStartRowIndex == -1 { + return nil, nil, ErrBlobNotFound } - return ProveCommitment(eds, namespace, blobShares) -} + // end exclusive index of the row root containing the namespace + exclusiveNamespaceEndRowIndex := inclusiveNamespaceStartRowIndex + for _, row := range header.DAH.RowRoots[inclusiveNamespaceStartRowIndex:] { + outside, err := share.IsOutsideRange(namespace, row, row) + if err != nil { + return nil, nil, err + } -func ProveCommitment( - eds *rsmt2d.ExtendedDataSquare, - namespace libshare.Namespace, - blobShares []libshare.Share, -) (*CommitmentProof, error) { - // find the blob shares in the EDS - blobSharesStartIndex := -1 - for index, share := range eds.FlattenedODS() { - if bytes.Equal(share, blobShares[0].ToBytes()) { - blobSharesStartIndex = index + if outside { + break } + exclusiveNamespaceEndRowIndex++ } - if blobSharesStartIndex < 0 { - return nil, fmt.Errorf("couldn't find the blob shares in the ODS") + if exclusiveNamespaceEndRowIndex == inclusiveNamespaceStartRowIndex { + return nil, nil, fmt.Errorf("couldn't find the row index of the namespace end") } - log.Debugw( - "generating the blob share proof for commitment", - "start_share", - blobSharesStartIndex, - "end_share", - blobSharesStartIndex+len(blobShares), - ) - sharesProof, err := pkgproof.NewShareInclusionProofFromEDS( - eds, - namespace, - libshare.NewRange(blobSharesStartIndex, blobSharesStartIndex+len(blobShares)), - ) + eds, err := s.shareGetter.GetEDS(ctx, header) if err != nil { - return nil, err + headerGetterSpan.SetStatus(codes.Error, err.Error()) + return nil, nil, err } + headerGetterSpan.SetStatus(codes.Ok, "") + headerGetterSpan.AddEvent("received eds", trace.WithAttributes( + attribute.Int64("eds-size", int64(len(header.DAH.RowRoots))))) - // convert the shares to row root proofs to nmt proofs - nmtProofs := make([]*nmt.Proof, 0) - for _, proof := range sharesProof.ShareProofs { - nmtProof := nmt.NewInclusionProof( - int(proof.Start), - int(proof.End), - proof.Nodes, - true, - ) - nmtProofs = append( - nmtProofs, - &nmtProof, - ) - } + // calculate the square size + squareSize := len(header.DAH.RowRoots) / 2 - // compute the subtree roots of the blob shares - log.Debugw("computing the subtree roots") - subtreeRoots := make([][]byte, 0) - dataCursor := 0 - for _, proof := range nmtProofs { - // TODO: do we want directly use the default subtree root threshold - // or want to allow specifying which version to use? - ranges, err := nmt.ToLeafRanges( - proof.Start(), - proof.End(), - inclusion.SubTreeWidth(len(blobShares), subtreeRootThreshold), - ) + // get all the shares of the rows containing the namespace + _, getSharesSpan := tracer.Start(ctx, "get-all-shares-in-namespace") + // store the ODS shares of the rows containing the blob + odsShares := make([][]byte, 0, (exclusiveNamespaceEndRowIndex-inclusiveNamespaceStartRowIndex)*squareSize) + // store the EDS shares of the rows containing the blob + edsShares := make([][]libshare.Share, exclusiveNamespaceEndRowIndex-inclusiveNamespaceStartRowIndex) + + for rowIndex := inclusiveNamespaceStartRowIndex; rowIndex < exclusiveNamespaceEndRowIndex; rowIndex++ { + rowShares := eds.Row(uint(rowIndex)) + odsShares = append(odsShares, rowShares[:squareSize]...) + rowAppShares, err := libshare.FromBytes(rowShares) if err != nil { - return nil, err + return nil, nil, err } - roots, err := computeSubtreeRoots( - blobShares[dataCursor:dataCursor+proof.End()-proof.Start()], - ranges, - proof.Start(), - ) + edsShares[rowIndex-inclusiveNamespaceStartRowIndex] = rowAppShares + } + + getSharesSpan.SetStatus(codes.Ok, "") + getSharesSpan.AddEvent("received shares", trace.WithAttributes( + attribute.Int64("eds-size", int64(squareSize*2)))) + + // go over the shares until finding the requested blobs + for currentShareIndex := 0; currentShareIndex < len(odsShares); { + currentShareApp, err := libshare.NewShare(odsShares[currentShareIndex]) if err != nil { - return nil, err + return nil, nil, err } - subtreeRoots = append(subtreeRoots, roots...) - dataCursor += proof.End() - proof.Start() - } - log.Debugw("successfully proved the share commitment") - commitmentProof := CommitmentProof{ - SubtreeRoots: subtreeRoots, - SubtreeRootProofs: nmtProofs, - NamespaceID: namespace.ID(), - RowProof: *sharesProof.RowProof, - NamespaceVersion: namespace.Version(), + // skip if it's a padding share + if currentShareApp.IsPadding() { + currentShareIndex++ + continue + } + if currentShareApp.IsCompactShare() { + currentShareIndex++ + continue + } + if currentShareApp.IsSequenceStart() { + // calculate the blob length + blobLen := libshare.SparseSharesNeeded(currentShareApp.SequenceLen()) + + exclusiveEndShareIndex := currentShareIndex + blobLen + if exclusiveEndShareIndex > len(odsShares) { + // this blob spans to the next row which has a namespace > requested namespace. + // this means that we can stop. + return nil, nil, ErrBlobNotFound + } + // convert the blob shares to app shares + blobShares := odsShares[currentShareIndex:exclusiveEndShareIndex] + appBlobShares, err := libshare.FromBytes(blobShares) + if err != nil { + return nil, nil, err + } + + // parse the blob + sharesParser.length = blobLen + _, isComplete := sharesParser.addShares(appBlobShares) + if !isComplete { + return nil, nil, fmt.Errorf("expected the shares to construct a full blob") + } + blob, err := sharesParser.parse() + if err != nil { + return nil, nil, err + } + + // number of shares per EDS row + numberOfSharesPerEDSRow := squareSize * 2 + // number of shares from square start to namespace start + sharesFromSquareStartToNsStart := inclusiveNamespaceStartRowIndex * numberOfSharesPerEDSRow + // number of rows from namespace start row to current row + rowsFromNsStartToCurrentRow := currentShareIndex / squareSize + // number of shares from namespace row start to current row + sharesFromNsStartToCurrentRow := rowsFromNsStartToCurrentRow * numberOfSharesPerEDSRow + // number of shares from the beginning of current row to current share + sharesFromCurrentRowStart := currentShareIndex % squareSize + // setting the index manually since we didn't use the parser.set() method + blob.index = sharesFromSquareStartToNsStart + + sharesFromNsStartToCurrentRow + + sharesFromCurrentRowStart + + if blob.Namespace().Equals(namespace) && sharesParser.verify(blob) { + // now that we found the requested blob, we will create + // its inclusion proof. + inclusiveBlobStartRowIndex := blob.index / (squareSize * 2) + exclusiveBlobEndRowIndex := inclusiveNamespaceStartRowIndex + exclusiveEndShareIndex/squareSize + if (currentShareIndex+blobLen)%squareSize != 0 { + // if the row is not complete with the blob shares, + // then we increment the exclusive blob end row index + // so that it's exclusive. + exclusiveBlobEndRowIndex++ + } + + // create the row roots to data root inclusion proof + rowProofs := proveRowRootsToDataRoot( + append(header.DAH.RowRoots, header.DAH.ColumnRoots...), + inclusiveBlobStartRowIndex, + exclusiveBlobEndRowIndex, + ) + rowRoots := make([]bytes.HexBytes, exclusiveBlobEndRowIndex-inclusiveBlobStartRowIndex) + for index, rowRoot := range header.DAH.RowRoots[inclusiveBlobStartRowIndex:exclusiveBlobEndRowIndex] { + rowRoots[index] = rowRoot + } + + edsShareStart := inclusiveBlobStartRowIndex - inclusiveNamespaceStartRowIndex + edsShareEnd := exclusiveBlobEndRowIndex - inclusiveNamespaceStartRowIndex + // create the share to row root proofs + shareToRowRootProofs, _, err := pkgproof.CreateShareToRowRootProofs( + squareSize, + edsShares[edsShareStart:edsShareEnd], + header.DAH.RowRoots[inclusiveBlobStartRowIndex:exclusiveBlobEndRowIndex], + currentShareIndex%squareSize, + (exclusiveEndShareIndex-1)%squareSize, + ) + if err != nil { + return nil, nil, err + } + + // convert the share to row root proof to an nmt.Proof + nmtShareToRowRootProofs := toNMTProof(shareToRowRootProofs) + + subtreeRoots, err := inclusion.GenerateSubtreeRoots(blob.Blob, appconsts.DefaultSubtreeRootThreshold) + if err != nil { + return nil, nil, err + } + + proof := Proof{ + SubtreeRootProofs: nmtShareToRowRootProofs, + RowToDataRootProof: core.RowProof{ + RowRoots: rowRoots, + Proofs: rowProofs, + StartRow: uint32(inclusiveBlobStartRowIndex), + EndRow: uint32(exclusiveBlobEndRowIndex) - 1, + }, + SubtreeRoots: subtreeRoots, + } + return blob, &proof, nil + } + sharesParser.reset() + currentShareIndex += blobLen + } else { + // this is a continuation of a previous blob + // we can skip + currentShareIndex++ + } } - return &commitmentProof, nil + return nil, nil, ErrBlobNotFound } -// computeSubtreeRoots takes a set of shares and ranges and returns the corresponding subtree roots. -// the offset is the number of shares that are before the subtree roots we're calculating. -func computeSubtreeRoots(shares []libshare.Share, ranges []nmt.LeafRange, offset int) ([][]byte, error) { - if len(shares) == 0 { - return nil, fmt.Errorf("cannot compute subtree roots for an empty shares list") - } - if len(ranges) == 0 { - return nil, fmt.Errorf("cannot compute subtree roots for an empty ranges list") - } - if offset < 0 { - return nil, fmt.Errorf("the offset %d cannot be stricly negative", offset) +func toNMTProof(proofs []*pkgproof.NMTProof) []*nmt.Proof { + nmtShareToRowRootProofs := make([]*nmt.Proof, 0, len(proofs)) + for _, proof := range proofs { + nmtProof := nmt.NewInclusionProof(int(proof.Start), int(proof.End), proof.Nodes, true) + nmtShareToRowRootProofs = append(nmtShareToRowRootProofs, &nmtProof) } + return nmtShareToRowRootProofs +} - // create a tree containing the shares to generate their subtree roots - tree := nmt.New( - appconsts.NewBaseHashFunc(), - nmt.IgnoreMaxNamespace(true), - nmt.NamespaceIDSize(libshare.NamespaceSize), +// getBlobs retrieves the DAH and fetches all shares from the requested Namespace and converts +// them to Blobs. +func (s *Service) getBlobs( + ctx context.Context, + namespace libshare.Namespace, + header *header.ExtendedHeader, +) (_ []*Blob, err error) { + ctx, span := tracer.Start(ctx, "get-blobs") + span.SetAttributes( + attribute.Int64("height", int64(header.Height())), + attribute.String("namespace", namespace.String()), ) - for _, sh := range shares { - leafData := make([]byte, 0) - leafData = append(append(leafData, sh.Namespace().Bytes()...), sh.ToBytes()...) - err := tree.Push(leafData) - if err != nil { - return nil, err - } - } + defer func() { + utils.SetStatusAndEnd(span, err) + }() - // generate the subtree roots - subtreeRoots := make([][]byte, 0) - for _, rg := range ranges { - root, err := tree.ComputeSubtreeRoot(rg.Start-offset, rg.End-offset) - if err != nil { - return nil, err - } - subtreeRoots = append(subtreeRoots, root) + blobs := make([]*Blob, 0) + verifyFn := func(blob *Blob) bool { + blobs = append(blobs, blob) + return false } - return subtreeRoots, nil + sharesParser := &parser{verifyFn: verifyFn} + + _, _, err = s.retrieve(ctx, header.Height(), namespace, sharesParser) + return blobs, err } diff --git a/blob/service_test.go b/blob/service_test.go index 2cbfcf7b03..28777de70e 100644 --- a/blob/service_test.go +++ b/blob/service_test.go @@ -17,13 +17,20 @@ import ( ds_sync "github.com/ipfs/go-datastore/sync" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto/merkle" + bytes2 "github.com/tendermint/tendermint/libs/bytes" tmrand "github.com/tendermint/tendermint/libs/rand" + coretypes "github.com/tendermint/tendermint/types" + "github.com/celestiaorg/celestia-app/v3/app" + "github.com/celestiaorg/celestia-app/v3/app/encoding" "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" pkgproof "github.com/celestiaorg/celestia-app/v3/pkg/proof" + "github.com/celestiaorg/celestia-app/v3/pkg/user" "github.com/celestiaorg/celestia-app/v3/pkg/wrapper" + "github.com/celestiaorg/celestia-app/v3/test/util/testfactory" "github.com/celestiaorg/go-header/store" - "github.com/celestiaorg/go-square/merkle" + libsquare "github.com/celestiaorg/go-square/v2" "github.com/celestiaorg/go-square/v2/inclusion" libshare "github.com/celestiaorg/go-square/v2/share" "github.com/celestiaorg/nmt" @@ -234,24 +241,13 @@ func TestBlobService_Get(t *testing.T) { proof, ok := res.(*Proof) assert.True(t, ok) - verifyFn := func(t *testing.T, rawShares [][]byte, proof *Proof, namespace libshare.Namespace) { - for _, row := range header.DAH.RowRoots { - to := 0 - for _, p := range *proof { - from := to - to = p.End() - p.Start() + from - eq := p.VerifyInclusion(share.NewSHA256Hasher(), namespace.Bytes(), rawShares[from:to], row) - if eq == true { - return - } - } - } - t.Fatal("could not prove the shares") + verifyFn := func(t *testing.T, blob *Blob, proof *Proof) { + valid, err := proof.Verify(header.DataHash) + require.NoError(t, err) + require.True(t, valid) } - rawShares, err := BlobsToShares(blobsWithDiffNamespaces[1]) - require.NoError(t, err) - verifyFn(t, libshare.ToBytes(rawShares), proof, blobsWithDiffNamespaces[1].Namespace()) + verifyFn(t, blobsWithDiffNamespaces[1], proof) }, }, { @@ -294,26 +290,41 @@ func TestBlobService_Get(t *testing.T) { require.ErrorIs(t, err, ErrInvalidProof) included, ok := res.(bool) require.True(t, ok) - require.True(t, included) + require.False(t, included) }, }, { name: "not included", doFn: func() (interface{}, error) { - libBlob, err := libshare.GenerateV0Blobs([]int{10}, false) - require.NoError(t, err) - blob, err := convertBlobs(libBlob...) - require.NoError(t, err) - proof, err := service.GetProof(ctx, 1, blobsWithDiffNamespaces[1].Namespace(), blobsWithDiffNamespaces[1].Commitment, ) require.NoError(t, err) - return service.Included(ctx, 1, blob[0].Namespace(), proof, blob[0].Commitment) + + // tamper with the header getter to get a random data hash at height 12345 + tamperedService := *service + tamperedService.headerGetter = func(ctx context.Context, height uint64) (*header.ExtendedHeader, error) { + if height == 12345 { + return &header.ExtendedHeader{ + RawHeader: header.RawHeader{ + DataHash: []byte{0x01}, + }, + }, nil + } + return service.headerGetter(ctx, height) + } + // this blob was included in height 1, but we will check if it's included in height 2 + return tamperedService.Included( + ctx, + 12345, + blobsWithDiffNamespaces[1].Namespace(), + proof, + blobsWithDiffNamespaces[1].Commitment, + ) }, expectedResult: func(res interface{}, err error) { - require.NoError(t, err) + require.Error(t, err) included, ok := res.(bool) require.True(t, ok) require.False(t, included) @@ -349,7 +360,7 @@ func TestBlobService_Get(t *testing.T) { originalDataWidth := len(h.DAH.RowRoots) / 2 sizes := []int{blobSize0, blobSize1} for i, proof := range proofs { - require.True(t, sizes[i]/originalDataWidth+1 == proof.Len()) + require.True(t, sizes[i]/originalDataWidth+1 == len(proof.SubtreeRootProofs)) } }, }, @@ -397,12 +408,11 @@ func TestBlobService_Get(t *testing.T) { var proof Proof require.NoError(t, json.Unmarshal(jsonData, &proof)) - newProof, err := service.GetProof(ctx, 1, - blobsWithDiffNamespaces[1].Namespace(), - blobsWithDiffNamespaces[1].Commitment, - ) + header, err := service.headerGetter(ctx, 1) + require.NoError(t, err) + valid, err := proof.Verify(header.DataHash) require.NoError(t, err) - require.NoError(t, proof.equal(*newProof)) + require.True(t, valid) }, }, { @@ -441,6 +451,26 @@ func TestBlobService_Get(t *testing.T) { assert.Len(t, blobs, len(blobsWithSameNamespace)) }, }, + { + name: "get blob internal error", + doFn: func() (interface{}, error) { + ctrl := gomock.NewController(t) + shareGetterMock := mock.NewMockGetter(ctrl) + shareGetterMock.EXPECT(). + GetEDS(gomock.Any(), gomock.Any()). + DoAndReturn( + func(context.Context, *header.ExtendedHeader) (*rsmt2d.ExtendedDataSquare, error) { + return nil, errors.New("internal error") + }).AnyTimes() + + service.shareGetter = shareGetterMock + return service.GetProof(ctx, 1, blobsWithDiffNamespaces[0].Namespace(), blobsWithDiffNamespaces[0].Commitment) + }, + expectedResult: func(res interface{}, err error) { + assert.Error(t, err) + assert.Contains(t, err.Error(), "internal error") + }, + }, } for _, tt := range test { @@ -838,19 +868,35 @@ func BenchmarkGetByCommitment(b *testing.B) { } } -func createServiceWithSub(ctx context.Context, t testing.TB, blobs []*Blob) *Service { +func createServiceWithSub(ctx context.Context, t *testing.T, blobs []*Blob) *Service { + acc := "test" + config := encoding.MakeConfig(app.ModuleEncodingRegisters...) + keyring := testfactory.TestKeyring(config.Codec, acc) + account := user.NewAccount(acc, 0, 0) + signer, err := user.NewSigner(keyring, config.TxConfig, testfactory.ChainID, appconsts.LatestVersion, account) + require.NoError(t, err) + bs := ipld.NewMemBlockservice() batching := ds_sync.MutexWrap(ds.NewMapDatastore()) headerStore, err := store.NewStore[*header.ExtendedHeader](batching) require.NoError(t, err) edsses := make([]*rsmt2d.ExtendedDataSquare, len(blobs)) + for i, blob := range blobs { - rawShares, err := BlobsToShares(blob) require.NoError(t, err) - eds, err := ipld.AddShares(ctx, rawShares, bs) + coreTx := edstest.BuildCoreTx(t, signer, acc, blob.Blob) + dataSquare, err := libsquare.Construct( + coretypes.Txs{coreTx}.ToSliceOfBytes(), + appconsts.SquareSizeUpperBound(appconsts.LatestVersion), + appconsts.SubtreeRootThreshold(appconsts.LatestVersion), + ) + require.NoError(t, err) + + eds, err := ipld.AddShares(ctx, dataSquare, bs) require.NoError(t, err) edsses[i] = eds } + headers := headertest.ExtendedHeadersFromEdsses(t, edsses) err = headerStore.Init(ctx, headers[0]) @@ -861,7 +907,6 @@ func createServiceWithSub(ctx context.Context, t testing.TB, blobs []*Blob) *Ser fn := func(ctx context.Context, height uint64) (*header.ExtendedHeader, error) { return headers[height-1], nil - // return headerStore.GetByHeight(ctx, height) } fn2 := func(ctx context.Context) (<-chan *header.ExtendedHeader, error) { headerChan := make(chan *header.ExtendedHeader, len(headers)) @@ -907,6 +952,10 @@ func createService(ctx context.Context, t testing.TB, shares []libshare.Share) * s, err := accessor.Sample(ctx, row, col) return s.Share, err }) + shareGetter.EXPECT().GetEDS(gomock.Any(), gomock.Any()).AnyTimes(). + DoAndReturn(func(context.Context, *header.ExtendedHeader) (*rsmt2d.ExtendedDataSquare, error) { + return square, nil + }) // create header and put it into the store h := headertest.ExtendedHeaderFromEDS(t, 1, square) @@ -957,25 +1006,17 @@ func proveAndVerifyShareCommitments(t *testing.T, blobSize int) { blobShares, err := BlobsToShares(blb) require.NoError(t, err) // compute the commitment - actualCommitmentProof, err := ProveCommitment(eds, nss[msgIndex], blobShares) + actualCommitmentProof, err := proveCommitment(eds, nss[msgIndex], blobs[msgIndex], blobShares) require.NoError(t, err) // make sure the actual commitment attests to the data - require.NoError(t, actualCommitmentProof.Validate()) - valid, err := actualCommitmentProof.Verify( - dataRoot, - appconsts.DefaultSubtreeRootThreshold, - ) + valid, err := actualCommitmentProof.Verify(dataRoot) require.NoError(t, err) require.True(t, valid) // generate an expected proof and verify it's valid - expectedCommitmentProof := generateCommitmentProofFromBlock(t, eds, nss[msgIndex], blobs[msgIndex], dataRoot) - require.NoError(t, expectedCommitmentProof.Validate()) - valid, err = expectedCommitmentProof.Verify( - dataRoot, - appconsts.DefaultSubtreeRootThreshold, - ) + expectedCommitmentProof := generateProofFromBlock(t, eds, nss[msgIndex], blobs[msgIndex], dataRoot) + valid, err = expectedCommitmentProof.Verify(dataRoot) require.NoError(t, err) require.True(t, valid) @@ -989,15 +1030,15 @@ func proveAndVerifyShareCommitments(t *testing.T, blobSize int) { } } -// generateCommitmentProofFromBlock takes a block and a PFB index and generates the commitment proof +// generateProofFromBlock takes a block and a PFB index and generates the commitment proof // using the traditional way of doing, instead of using the API. -func generateCommitmentProofFromBlock( +func generateProofFromBlock( t *testing.T, eds *rsmt2d.ExtendedDataSquare, ns libshare.Namespace, blob *libshare.Blob, dataRoot []byte, -) CommitmentProof { +) Proof { // create the blob from the data blb, err := NewBlob(blob.ShareVersion(), ns, @@ -1030,38 +1071,13 @@ func generateCommitmentProofFromBlock( require.NoError(t, sharesProof.Validate(dataRoot)) // calculate the subtree roots - subtreeRoots := make([][]byte, 0) - dataCursor := 0 - for _, proof := range sharesProof.ShareProofs { - ranges, err := nmt.ToLeafRanges( - int(proof.Start), - int(proof.End), - inclusion.SubTreeWidth(len(blobShares), subtreeRootThreshold), - ) - require.NoError(t, err) - roots, err := computeSubtreeRoots( - blobShares[dataCursor:int32(dataCursor)+proof.End-proof.Start], - ranges, - int(proof.Start), - ) - require.NoError(t, err) - subtreeRoots = append(subtreeRoots, roots...) - dataCursor += int(proof.End - proof.Start) - } - - // convert the nmt proof to be accepted by the commitment proof - nmtProofs := make([]*nmt.Proof, 0) - for _, proof := range sharesProof.ShareProofs { - nmtProof := nmt.NewInclusionProof(int(proof.Start), int(proof.End), proof.Nodes, true) - nmtProofs = append(nmtProofs, &nmtProof) - } + subtreeRoots, err := blb.ComputeSubtreeRoots() + require.NoError(t, err) - commitmentProof := CommitmentProof{ - SubtreeRoots: subtreeRoots, - SubtreeRootProofs: nmtProofs, - NamespaceID: sharesProof.NamespaceId, - RowProof: *sharesProof.RowProof, - NamespaceVersion: uint8(sharesProof.NamespaceVersion), + commitmentProof := Proof{ + SubtreeRoots: subtreeRoots, + SubtreeRootProofs: toNMTProof(sharesProof.ShareProofs), + RowToDataRootProof: toCoreRowProof(sharesProof.RowProof), } return commitmentProof @@ -1076,3 +1092,220 @@ func delimLen(size uint64) int { lenBuf := make([]byte, binary.MaxVarintLen64) return binary.PutUvarint(lenBuf, size) } + +func TestBlobVerify(t *testing.T) { + _, blobs, nss, eds, _, _, dataRoot := edstest.GenerateTestBlock(t, 200, 10) + + // create the blob from the data + blob, err := NewBlob( + blobs[5].ShareVersion(), + nss[5], + blobs[5].Data(), + blobs[5].Signer(), + ) + require.NoError(t, err) + + // convert the blob to a number of shares + blobShares, err := BlobsToShares(blob) + require.NoError(t, err) + + // find the first share of the blob in the ODS + startShareIndex := -1 + for i, sh := range eds.FlattenedODS() { + if bytes.Equal(sh, blobShares[0].ToBytes()) { + startShareIndex = i + break + } + } + require.Greater(t, startShareIndex, 0) + + // create an inclusion proof of the blob using the share range instead of the commitment + sharesProof, err := pkgproof.NewShareInclusionProofFromEDS( + eds, + nss[5], + libshare.NewRange(startShareIndex, startShareIndex+len(blobShares)), + ) + require.NoError(t, err) + require.NoError(t, sharesProof.Validate(dataRoot)) + + subtreeRoots, err := blob.ComputeSubtreeRoots() + require.NoError(t, err) + coreRowProof := toCoreRowProof(sharesProof.RowProof) + blobProof := Proof{ + SubtreeRoots: subtreeRoots, + SubtreeRootProofs: toNMTProof(sharesProof.ShareProofs), + RowToDataRootProof: coreRowProof, + } + tests := []struct { + name string + blob Blob + proof Proof + dataRoot []byte + expectErr bool + }{ + { + name: "invalid blob commitment", + dataRoot: dataRoot, + proof: blobProof, + blob: func() Blob { + b := *blob + b.Commitment = []byte{0x1} + return b + }(), + expectErr: true, + }, + { + name: "invalid row proof", + dataRoot: dataRoot, + proof: func() Proof { + p := blobProof + p.RowToDataRootProof.StartRow = 10 + p.RowToDataRootProof.EndRow = 15 + return p + }(), + blob: *blob, + expectErr: true, + }, + { + name: "malformed blob and proof", + dataRoot: dataRoot, + proof: func() Proof { + inclusionProof := nmt.NewInclusionProof(1, 3, [][]byte{{0x01}}, true) + return Proof{ + SubtreeRoots: subtreeRoots, + SubtreeRootProofs: []*nmt.Proof{&inclusionProof}, + RowToDataRootProof: blobProof.RowToDataRootProof, + } + }(), + blob: func() Blob { + b := *blob + b.Commitment = []byte{0x1} + return b + }(), + expectErr: true, + }, + { + name: "mismatched number of share proofs and row proofs", + dataRoot: dataRoot, + proof: func() Proof { + p := blobProof + invalidProof := nmt.NewInclusionProof( + blobProof.SubtreeRootProofs[0].Start(), + 15, + blobProof.SubtreeRootProofs[0].Nodes(), + true, + ) + p.SubtreeRootProofs[0] = &invalidProof + return p + }(), + blob: *blob, + expectErr: true, + }, + { + name: "invalid data root", + dataRoot: []byte{0x1, 0x2}, + proof: blobProof, + blob: *blob, + expectErr: true, + }, + { + name: "valid proof", + dataRoot: dataRoot, + blob: *blob, + proof: func() Proof { + return Proof{ + SubtreeRootProofs: toNMTProof(sharesProof.ShareProofs), + RowToDataRootProof: coreRowProof, + SubtreeRoots: subtreeRoots, + } + }(), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + valid, err := test.proof.Verify(test.dataRoot) + if test.expectErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.True(t, valid) + } + }) + } +} + +func toCoreRowProof(proof *pkgproof.RowProof) coretypes.RowProof { + tmRowRoots := make([]bytes2.HexBytes, 0, len(proof.RowRoots)) + tmRowProofs := make([]*merkle.Proof, 0, len(proof.RowRoots)) + for index, root := range proof.RowRoots { + tmRowRoots = append(tmRowRoots, root) + tmRowProofs = append(tmRowProofs, &merkle.Proof{ + Total: proof.Proofs[index].Total, + Index: proof.Proofs[index].Index, + LeafHash: proof.Proofs[index].LeafHash, + Aunts: proof.Proofs[index].Aunts, + }) + } + return coretypes.RowProof{ + RowRoots: tmRowRoots, + Proofs: tmRowProofs, + StartRow: proof.StartRow, + EndRow: proof.EndRow, + } +} + +func proveCommitment( + eds *rsmt2d.ExtendedDataSquare, + namespace libshare.Namespace, + blb *libshare.Blob, + blobShares []libshare.Share, +) (*Proof, error) { + // find the blob shares in the EDS + blobSharesStartIndex := -1 + for index, share := range eds.FlattenedODS() { + if bytes.Equal(share, blobShares[0].ToBytes()) { + blobSharesStartIndex = index + } + } + if blobSharesStartIndex < 0 { + return nil, fmt.Errorf("couldn't find the blob shares in the ODS") + } + + sharesProof, err := pkgproof.NewShareInclusionProofFromEDS( + eds, + namespace, + libshare.NewRange(blobSharesStartIndex, blobSharesStartIndex+len(blobShares)), + ) + if err != nil { + return nil, err + } + + // convert the shares to row root proofs to nmt proofs + nmtProofs := make([]*nmt.Proof, 0) + for _, proof := range sharesProof.ShareProofs { + nmtProof := nmt.NewInclusionProof( + int(proof.Start), + int(proof.End), + proof.Nodes, + true, + ) + nmtProofs = append( + nmtProofs, + &nmtProof, + ) + } + + // compute the subtree roots of the blob shares + subtreeRoots, err := inclusion.GenerateSubtreeRoots(blb, appconsts.DefaultSubtreeRootThreshold) + if err != nil { + return nil, err + } + + commitmentProof := Proof{ + SubtreeRoots: subtreeRoots, + SubtreeRootProofs: nmtProofs, + RowToDataRootProof: toCoreRowProof(sharesProof.RowProof), + } + return &commitmentProof, nil +} diff --git a/go.mod b/go.mod index d61ee15167..ad3a39e968 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,6 @@ require ( github.com/celestiaorg/go-fraud v0.2.1 github.com/celestiaorg/go-header v0.6.3 github.com/celestiaorg/go-libp2p-messenger v0.2.0 - github.com/celestiaorg/go-square/merkle v0.0.0-20240117232118-fd78256df076 github.com/celestiaorg/go-square/v2 v2.1.0-rc0 github.com/celestiaorg/nmt v0.22.2 github.com/celestiaorg/rsmt2d v0.14.0 diff --git a/go.sum b/go.sum index 7a1f140965..d93279e0d3 100644 --- a/go.sum +++ b/go.sum @@ -359,8 +359,6 @@ github.com/celestiaorg/go-libp2p-messenger v0.2.0 h1:/0MuPDcFamQMbw9xTZ73yImqgTO github.com/celestiaorg/go-libp2p-messenger v0.2.0/go.mod h1:s9PIhMi7ApOauIsfBcQwbr7m+HBzmVfDIS+QLdgzDSo= github.com/celestiaorg/go-square v1.1.1 h1:Cy3p8WVspVcyOqHM8BWFuuYPwMitO1pYGe+ImILFZRA= github.com/celestiaorg/go-square v1.1.1/go.mod h1:1EXMErhDrWJM8B8V9hN7dqJ2kUTClfwdqMOmF9yQUa0= -github.com/celestiaorg/go-square/merkle v0.0.0-20240117232118-fd78256df076 h1:PYInrsYzrDIsZW9Yb86OTi2aEKuPcpgJt6Mc0Jlc/yg= -github.com/celestiaorg/go-square/merkle v0.0.0-20240117232118-fd78256df076/go.mod h1:hlidgivKyvv7m4Yl2Fdf2mSTmazZYxX8+bnr5IQrI98= github.com/celestiaorg/go-square/v2 v2.1.0-rc0 h1:Ra6bp+mVUXmUT1KYMiOCmW4tBK6EIknpP10uhPngOR8= github.com/celestiaorg/go-square/v2 v2.1.0-rc0/go.mod h1:n3ztrh8CBjWOD6iWYMo3pPOlQIgzLK9yrnqMPcNo6g8= github.com/celestiaorg/merkletree v0.0.0-20230308153949-c33506a7aa26 h1:P2RI1xJ49EZ8cuHMcH+ZSBonfRDtBS8OS9Jdt1BWX3k= diff --git a/header/header.go b/header/header.go index b44447ce42..122b46558f 100644 --- a/header/header.go +++ b/header/header.go @@ -230,7 +230,7 @@ func (eh *ExtendedHeader) UnmarshalBinary(data []byte) error { // Uses tendermint encoder for tendermint compatibility. func (eh *ExtendedHeader) MarshalJSON() ([]byte, error) { // alias the type to avoid going into recursion loop - // because tmjson.Marshal invokes custom json marshalling + // because tmjson.Marshal invokes custom json marshaling type Alias ExtendedHeader return tmjson.Marshal((*Alias)(eh)) } diff --git a/nodebuilder/blob/blob.go b/nodebuilder/blob/blob.go index b8069f82b5..7c41fe33b9 100644 --- a/nodebuilder/blob/blob.go +++ b/nodebuilder/blob/blob.go @@ -35,13 +35,6 @@ type Module interface { // Included checks whether a blob's given commitment(Merkle subtree root) is included at // given height and under the namespace. Included(_ context.Context, height uint64, _ libshare.Namespace, _ *blob.Proof, _ blob.Commitment) (bool, error) - // GetCommitmentProof generates a commitment proof for a share commitment. - GetCommitmentProof( - ctx context.Context, - height uint64, - namespace libshare.Namespace, - shareCommitment []byte, - ) (*blob.CommitmentProof, error) // Subscribe to published blobs from the given namespace as they are included. Subscribe(_ context.Context, _ libshare.Namespace) (<-chan *blob.SubscriptionResponse, error) } @@ -77,12 +70,6 @@ type API struct { *blob.Proof, blob.Commitment, ) (bool, error) `perm:"read"` - GetCommitmentProof func( - ctx context.Context, - height uint64, - namespace libshare.Namespace, - shareCommitment []byte, - ) (*blob.CommitmentProof, error) `perm:"read"` Subscribe func( context.Context, libshare.Namespace, @@ -116,15 +103,6 @@ func (api *API) GetProof( return api.Internal.GetProof(ctx, height, namespace, commitment) } -func (api *API) GetCommitmentProof( - ctx context.Context, - height uint64, - namespace libshare.Namespace, - shareCommitment []byte, -) (*blob.CommitmentProof, error) { - return api.Internal.GetCommitmentProof(ctx, height, namespace, shareCommitment) -} - func (api *API) Included( ctx context.Context, height uint64, diff --git a/nodebuilder/blob/mocks/api.go b/nodebuilder/blob/mocks/api.go index a7c1d7d909..e567d8f17f 100644 --- a/nodebuilder/blob/mocks/api.go +++ b/nodebuilder/blob/mocks/api.go @@ -67,21 +67,6 @@ func (mr *MockModuleMockRecorder) GetAll(arg0, arg1, arg2 interface{}) *gomock.C return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAll", reflect.TypeOf((*MockModule)(nil).GetAll), arg0, arg1, arg2) } -// GetCommitmentProof mocks base method. -func (m *MockModule) GetCommitmentProof(arg0 context.Context, arg1 uint64, arg2 share.Namespace, arg3 []byte) (*blob.CommitmentProof, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetCommitmentProof", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(*blob.CommitmentProof) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetCommitmentProof indicates an expected call of GetCommitmentProof. -func (mr *MockModuleMockRecorder) GetCommitmentProof(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCommitmentProof", reflect.TypeOf((*MockModule)(nil).GetCommitmentProof), arg0, arg1, arg2, arg3) -} - // GetProof mocks base method. func (m *MockModule) GetProof(arg0 context.Context, arg1 uint64, arg2 share.Namespace, arg3 blob.Commitment) (*blob.Proof, error) { m.ctrl.T.Helper() diff --git a/nodebuilder/share/mocks/api.go b/nodebuilder/share/mocks/api.go index cccc81a452..51b1a8eef2 100644 --- a/nodebuilder/share/mocks/api.go +++ b/nodebuilder/share/mocks/api.go @@ -54,10 +54,10 @@ func (mr *MockModuleMockRecorder) GetEDS(arg0, arg1 interface{}) *gomock.Call { } // GetRange mocks base method. -func (m *MockModule) GetRange(arg0 context.Context, arg1 uint64, arg2, arg3 int) (*share.GetRangeResult, error) { +func (m *MockModule) GetRange(arg0 context.Context, arg1 uint64, arg2, arg3 int) (*share.RangeResult, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetRange", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(*share.GetRangeResult) + ret0, _ := ret[0].(*share.RangeResult) ret1, _ := ret[1].(error) return ret0, ret1 } diff --git a/nodebuilder/share/share.go b/nodebuilder/share/share.go index 8a3efcc757..7805d3783e 100644 --- a/nodebuilder/share/share.go +++ b/nodebuilder/share/share.go @@ -1,7 +1,10 @@ package share import ( + "bytes" "context" + "errors" + "fmt" "github.com/tendermint/tendermint/types" @@ -16,11 +19,30 @@ import ( var _ Module = (*API)(nil) -// GetRangeResult wraps the return value of the GetRange endpoint -// because Json-RPC doesn't support more than two return values. -type GetRangeResult struct { - Shares []libshare.Share - Proof *types.ShareProof +// RangeResult wraps the return value of the GetRange endpoint. +// It contains a set of shares along with their proof to +// the data root. +type RangeResult struct { + // Shares the queried shares. + Shares []libshare.Share `json:"shares"` + // Proof the proof of Shares up to the data root. + Proof *types.ShareProof `json:"proof"` +} + +// Verify verifies if the shares and proof in the range +// are being committed to by the provided data root. +// Returns nil if the proof is valid and a sensible error otherwise. +func (r RangeResult) Verify(dataRoot []byte) error { + if len(dataRoot) == 0 { + return errors.New("root must be non-empty") + } + + for index, data := range r.Shares { + if !bytes.Equal(data.ToBytes(), r.Proof.Data[index]) { + return fmt.Errorf("mismatching share %d between the range result and the proof", index) + } + } + return r.Proof.Validate(dataRoot) } // Module provides access to any data square or block share on the network. @@ -53,7 +75,7 @@ type Module interface { ctx context.Context, height uint64, namespace libshare.Namespace, ) (shwap.NamespaceData, error) // GetRange gets a list of shares and their corresponding proof. - GetRange(ctx context.Context, height uint64, start, end int) (*GetRangeResult, error) + GetRange(ctx context.Context, height uint64, start, end int) (*RangeResult, error) } // API is a wrapper around Module for the RPC. @@ -78,7 +100,7 @@ type API struct { ctx context.Context, height uint64, start, end int, - ) (*GetRangeResult, error) `perm:"read"` + ) (*RangeResult, error) `perm:"read"` } } @@ -94,7 +116,7 @@ func (api *API) GetEDS(ctx context.Context, height uint64) (*rsmt2d.ExtendedData return api.Internal.GetEDS(ctx, height) } -func (api *API) GetRange(ctx context.Context, height uint64, start, end int) (*GetRangeResult, error) { +func (api *API) GetRange(ctx context.Context, height uint64, start, end int) (*RangeResult, error) { return api.Internal.GetRange(ctx, height, start, end) } @@ -136,7 +158,7 @@ func (m module) SharesAvailable(ctx context.Context, height uint64) error { return m.avail.SharesAvailable(ctx, header) } -func (m module) GetRange(ctx context.Context, height uint64, start, end int) (*GetRangeResult, error) { +func (m module) GetRange(ctx context.Context, height uint64, start, end int) (*RangeResult, error) { extendedDataSquare, err := m.GetEDS(ctx, height) if err != nil { return nil, err @@ -151,7 +173,7 @@ func (m module) GetRange(ctx context.Context, height uint64, start, end int) (*G if err != nil { return nil, err } - return &GetRangeResult{ + return &RangeResult{ Shares: shares, Proof: proof, }, nil diff --git a/share/eds/edstest/testing.go b/share/eds/edstest/testing.go index a022087978..d589dfe1c7 100644 --- a/share/eds/edstest/testing.go +++ b/share/eds/edstest/testing.go @@ -191,7 +191,20 @@ func createTestBlobTransaction( ns := libshare.RandomBlobNamespace() account := signer.Account(accountName) msg, b := blobfactory.RandMsgPayForBlobsWithNamespaceAndSigner(account.Address().String(), ns, size) + cTx := BuildCoreTx(t, signer, accountName, b) + return ns, msg, b, cTx +} + +// BuildCoreTx takes a signer, a message and a blob and creates a core transaction. +// The core transaction is the final form of a transaction that gets pushed +// into the square builder. +func BuildCoreTx( + t *testing.T, + signer *user.Signer, + accountName string, + b *libshare.Blob, +) coretypes.Tx { cTx, _, err := signer.CreatePayForBlobs(accountName, []*libshare.Blob{b}) require.NoError(t, err) - return ns, msg, b, cTx + return cTx }