diff --git a/.changelog/config.toml b/.changelog/config.toml new file mode 100644 index 0000000000..de0fee50c2 --- /dev/null +++ b/.changelog/config.toml @@ -0,0 +1 @@ +project_url = 'https://github.com/cometbft/cometbft' diff --git a/.changelog/epilogue.md b/.changelog/epilogue.md new file mode 100644 index 0000000000..1e68f6b728 --- /dev/null +++ b/.changelog/epilogue.md @@ -0,0 +1,14 @@ +--- + +CometBFT is a fork of [Tendermint +Core](https://github.com/tendermint/tendermint) as of late December 2022. + +## Bug bounty + +Friendly reminder, we have a [bug bounty program](https://hackerone.com/cosmos). + +## Previous changes + +For changes released before the creation of CometBFT, please refer to the +Tendermint Core +[CHANGELOG.md](https://github.com/tendermint/tendermint/blob/a9feb1c023e172b542c972605311af83b777855b/CHANGELOG.md). diff --git a/.changelog/unreleased/.gitkeep b/.changelog/unreleased/.gitkeep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.changelog/v0.34.27/breaking-changes/152-rename-binary-docker.md b/.changelog/v0.34.27/breaking-changes/152-rename-binary-docker.md new file mode 100644 index 0000000000..3870f96f92 --- /dev/null +++ b/.changelog/v0.34.27/breaking-changes/152-rename-binary-docker.md @@ -0,0 +1,2 @@ +- Rename binary to `cometbft` and Docker image to `cometbft/cometbft` + ([\#152](https://github.com/cometbft/cometbft/pull/152)) diff --git a/.changelog/v0.34.27/breaking-changes/211-deprecate-tmhome.md b/.changelog/v0.34.27/breaking-changes/211-deprecate-tmhome.md new file mode 100644 index 0000000000..d2bded0f27 --- /dev/null +++ b/.changelog/v0.34.27/breaking-changes/211-deprecate-tmhome.md @@ -0,0 +1,3 @@ +- The `TMHOME` environment variable was renamed to `CMTHOME`, and all + environment variables starting with `TM_` are instead prefixed with `CMT_` + ([\#211](https://github.com/cometbft/cometbft/issues/211)) diff --git a/.changelog/v0.34.27/breaking-changes/360-update-to-go-119.md b/.changelog/v0.34.27/breaking-changes/360-update-to-go-119.md new file mode 100644 index 0000000000..97fafda93b --- /dev/null +++ b/.changelog/v0.34.27/breaking-changes/360-update-to-go-119.md @@ -0,0 +1,2 @@ +- Use Go 1.19 to build CometBFT, since Go 1.18 has reached end-of-life. + ([\#360](https://github.com/cometbft/cometbft/issues/360)) diff --git a/.changelog/v0.34.27/bug-fixes/383-txindexer-fix-slash-parsing.md b/.changelog/v0.34.27/bug-fixes/383-txindexer-fix-slash-parsing.md new file mode 100644 index 0000000000..c08824da9d --- /dev/null +++ b/.changelog/v0.34.27/bug-fixes/383-txindexer-fix-slash-parsing.md @@ -0,0 +1,3 @@ +- `[state/kvindexer]` Resolved crashes when event values contained slashes, + introduced after adding event sequences. + (\#[383](https://github.com/cometbft/cometbft/pull/383): @jmalicevic) diff --git a/.changelog/v0.34.27/bug-fixes/386-quick-fix-needproofblock.md b/.changelog/v0.34.27/bug-fixes/386-quick-fix-needproofblock.md new file mode 100644 index 0000000000..d3d2f5b738 --- /dev/null +++ b/.changelog/v0.34.27/bug-fixes/386-quick-fix-needproofblock.md @@ -0,0 +1,6 @@ +- `[consensus]` Short-term fix for the case when `needProofBlock` cannot find + previous block meta by defaulting to the creation of a new proof block. + ([\#386](https://github.com/cometbft/cometbft/pull/386): @adizere) + - Special thanks to the [Vega.xyz](https://vega.xyz/) team, and in particular + to Zohar (@ze97286), for reporting the problem and working with us to get to + a fix. diff --git a/.changelog/v0.34.27/bug-fixes/4-busy-loop-send-block-part.md b/.changelog/v0.34.27/bug-fixes/4-busy-loop-send-block-part.md new file mode 100644 index 0000000000..414ec44cb1 --- /dev/null +++ b/.changelog/v0.34.27/bug-fixes/4-busy-loop-send-block-part.md @@ -0,0 +1,3 @@ +- `[consensus]` Fixed a busy loop that happened when sending of a block part + failed by sleeping in case of error. + ([\#4](https://github.com/informalsystems/tendermint/pull/4)) diff --git a/.changelog/v0.34.27/bug-fixes/9936-p2p-fix-envelope-sending.md b/.changelog/v0.34.27/bug-fixes/9936-p2p-fix-envelope-sending.md new file mode 100644 index 0000000000..fd38b79b9f --- /dev/null +++ b/.changelog/v0.34.27/bug-fixes/9936-p2p-fix-envelope-sending.md @@ -0,0 +1,5 @@ +- `[p2p]` Correctly use non-blocking `TrySendEnvelope` method when attempting to + send messages, as opposed to the blocking `SendEnvelope` method. It is unclear + whether this has a meaningful impact on P2P performance, but this patch does + correct the underlying behaviour to what it should be + ([tendermint/tendermint\#9936](https://github.com/tendermint/tendermint/pull/9936)) diff --git a/.changelog/v0.34.27/dependencies/160-tmdb-to-cometbftdb.md b/.changelog/v0.34.27/dependencies/160-tmdb-to-cometbftdb.md new file mode 100644 index 0000000000..e4c1351312 --- /dev/null +++ b/.changelog/v0.34.27/dependencies/160-tmdb-to-cometbftdb.md @@ -0,0 +1,3 @@ +- Replace [tm-db](https://github.com/tendermint/tm-db) with + [cometbft-db](https://github.com/cometbft/cometbft-db) + ([\#160](https://github.com/cometbft/cometbft/pull/160)) \ No newline at end of file diff --git a/.changelog/v0.34.27/dependencies/165-bump-tmloadtest.md b/.changelog/v0.34.27/dependencies/165-bump-tmloadtest.md new file mode 100644 index 0000000000..175163ac00 --- /dev/null +++ b/.changelog/v0.34.27/dependencies/165-bump-tmloadtest.md @@ -0,0 +1,2 @@ +- Bump tm-load-test to v1.3.0 to remove implicit dependency on Tendermint Core + ([\#165](https://github.com/cometbft/cometbft/pull/165)) \ No newline at end of file diff --git a/.changelog/v0.34.27/dependencies/9787-btcec-dep-update.md b/.changelog/v0.34.27/dependencies/9787-btcec-dep-update.md new file mode 100644 index 0000000000..d155748e0c --- /dev/null +++ b/.changelog/v0.34.27/dependencies/9787-btcec-dep-update.md @@ -0,0 +1,3 @@ +- `[crypto]` Update to use btcec v2 and the latest btcutil + ([tendermint/tendermint\#9787](https://github.com/tendermint/tendermint/pull/9787): + @wcsiu) diff --git a/.changelog/v0.34.27/features/9759-kvindexer-match-event.md b/.changelog/v0.34.27/features/9759-kvindexer-match-event.md new file mode 100644 index 0000000000..281f6cd1fb --- /dev/null +++ b/.changelog/v0.34.27/features/9759-kvindexer-match-event.md @@ -0,0 +1,3 @@ +- `[rpc]` Add `match_event` query parameter to indicate to the RPC that it + should match events _within_ attributes, not only within a height + ([tendermint/tendermint\#9759](https://github.com/tendermint/tendermint/pull/9759)) diff --git a/.changelog/v0.34.27/improvements/136-remove-tm-signer-harness.md b/.changelog/v0.34.27/improvements/136-remove-tm-signer-harness.md new file mode 100644 index 0000000000..6eb6c2158c --- /dev/null +++ b/.changelog/v0.34.27/improvements/136-remove-tm-signer-harness.md @@ -0,0 +1,2 @@ +- `[tools/tm-signer-harness]` Remove the folder as it is unused + ([\#136](https://github.com/cometbft/cometbft/issues/136)) \ No newline at end of file diff --git a/.changelog/v0.34.27/improvements/204-version-commit-hash.md b/.changelog/v0.34.27/improvements/204-version-commit-hash.md new file mode 100644 index 0000000000..675a1a2924 --- /dev/null +++ b/.changelog/v0.34.27/improvements/204-version-commit-hash.md @@ -0,0 +1,2 @@ +- Append the commit hash to the version of CometBFT being built + ([\#204](https://github.com/cometbft/cometbft/pull/204)) \ No newline at end of file diff --git a/.changelog/v0.34.27/improvements/314-prio-mempool-badtxlog.md b/.changelog/v0.34.27/improvements/314-prio-mempool-badtxlog.md new file mode 100644 index 0000000000..ba4ac031e2 --- /dev/null +++ b/.changelog/v0.34.27/improvements/314-prio-mempool-badtxlog.md @@ -0,0 +1,3 @@ +- `[mempool/v1]` Suppress "rejected bad transaction" in priority mempool logs by + reducing log level from info to debug + ([\#314](https://github.com/cometbft/cometbft/pull/314): @JayT106) diff --git a/.changelog/v0.34.27/improvements/56-rpc-cache-rpc-responses.md b/.changelog/v0.34.27/improvements/56-rpc-cache-rpc-responses.md new file mode 100644 index 0000000000..344b3df93b --- /dev/null +++ b/.changelog/v0.34.27/improvements/56-rpc-cache-rpc-responses.md @@ -0,0 +1,2 @@ +- `[e2e]` Add functionality for uncoordinated (minor) upgrades + ([\#56](https://github.com/tendermint/tendermint/pull/56)) \ No newline at end of file diff --git a/.changelog/v0.34.27/improvements/9733-consensus-metrics.md b/.changelog/v0.34.27/improvements/9733-consensus-metrics.md new file mode 100644 index 0000000000..77d8c743ec --- /dev/null +++ b/.changelog/v0.34.27/improvements/9733-consensus-metrics.md @@ -0,0 +1,4 @@ +- `[consensus]` Add `consensus_block_gossip_parts_received` and + `consensus_step_duration_seconds` metrics in order to aid in investigating the + impact of database compaction on consensus performance + ([tendermint/tendermint\#9733](https://github.com/tendermint/tendermint/pull/9733)) diff --git a/.changelog/v0.34.27/improvements/9759-kvindexer-match-event.md b/.changelog/v0.34.27/improvements/9759-kvindexer-match-event.md new file mode 100644 index 0000000000..8b5757cb8e --- /dev/null +++ b/.changelog/v0.34.27/improvements/9759-kvindexer-match-event.md @@ -0,0 +1,3 @@ +- `[state/kvindexer]` Add `match.event` keyword to support condition evaluation + based on the event the attributes belong to + ([tendermint/tendermint\#9759](https://github.com/tendermint/tendermint/pull/9759)) diff --git a/.changelog/v0.34.27/improvements/9764-p2p-fix-logspam.md b/.changelog/v0.34.27/improvements/9764-p2p-fix-logspam.md new file mode 100644 index 0000000000..78fa6844fe --- /dev/null +++ b/.changelog/v0.34.27/improvements/9764-p2p-fix-logspam.md @@ -0,0 +1,4 @@ +- `[p2p]` Reduce log spam through reducing log level of "Dialing peer" and + "Added peer" messages from info to debug + ([tendermint/tendermint\#9764](https://github.com/tendermint/tendermint/pull/9764): + @faddat) diff --git a/.changelog/v0.34.27/improvements/9776-consensus-vote-bandwidth.md b/.changelog/v0.34.27/improvements/9776-consensus-vote-bandwidth.md new file mode 100644 index 0000000000..2bfdd05acf --- /dev/null +++ b/.changelog/v0.34.27/improvements/9776-consensus-vote-bandwidth.md @@ -0,0 +1,3 @@ +- `[consensus]` Reduce bandwidth consumption of consensus votes by roughly 50% + through fixing a small logic bug + ([tendermint/tendermint\#9776](https://github.com/tendermint/tendermint/pull/9776)) diff --git a/.changelog/v0.34.27/summary.md b/.changelog/v0.34.27/summary.md new file mode 100644 index 0000000000..e4a13db501 --- /dev/null +++ b/.changelog/v0.34.27/summary.md @@ -0,0 +1,17 @@ +*Feb 27, 2023* + +This is the first official release of CometBFT - a fork of [Tendermint +Core](https://github.com/tendermint/tendermint). This particular release is +intended to be compatible with the Tendermint Core v0.34 release series. + +For details as to how to upgrade to CometBFT from Tendermint Core, please see +our [upgrading guidelines](./UPGRADING.md). + +If you have any questions, comments, concerns or feedback on this release, we +would love to hear from you! Please contact us via [GitHub +Discussions](https://github.com/cometbft/cometbft/discussions), +[Discord](https://discord.gg/cosmosnetwork) (in the `#cometbft` channel) or +[Telegram](https://t.me/CometBFT). + +Special thanks to @wcsiu, @ze97286, @faddat and @JayT106 for their contributions +to this release! diff --git a/.changelog/v0.34.28/breaking-changes/558-tm10011.md b/.changelog/v0.34.28/breaking-changes/558-tm10011.md new file mode 100644 index 0000000000..d1b9fca4ab --- /dev/null +++ b/.changelog/v0.34.28/breaking-changes/558-tm10011.md @@ -0,0 +1,2 @@ +- `[crypto/merkle]` Do not allow verification of Merkle Proofs against empty trees (`nil` root). `Proof.ComputeRootHash` now panics when it encounters an error, but `Proof.Verify` does not panic + ([\#558](https://github.com/cometbft/cometbft/issues/558)) diff --git a/.changelog/v0.34.28/bug-fixes/496-error-on-applyblock-should-panic.md b/.changelog/v0.34.28/bug-fixes/496-error-on-applyblock-should-panic.md new file mode 100644 index 0000000000..55e9c874f8 --- /dev/null +++ b/.changelog/v0.34.28/bug-fixes/496-error-on-applyblock-should-panic.md @@ -0,0 +1,2 @@ +- `[consensus]` Unexpected error conditions in `ApplyBlock` are non-recoverable, so ignoring the error and carrying on is a bug. We replaced a `return` that disregarded the error by a `panic`. + ([\#496](https://github.com/cometbft/cometbft/pull/496)) \ No newline at end of file diff --git a/.changelog/v0.34.28/bug-fixes/524-rename-peerstate-tojson.md b/.changelog/v0.34.28/bug-fixes/524-rename-peerstate-tojson.md new file mode 100644 index 0000000000..b9a43b3ce4 --- /dev/null +++ b/.changelog/v0.34.28/bug-fixes/524-rename-peerstate-tojson.md @@ -0,0 +1,2 @@ +- `[consensus]` Rename `(*PeerState).ToJSON` to `MarshalJSON` to fix a logging data race + ([\#524](https://github.com/cometbft/cometbft/pull/524)) diff --git a/.changelog/v0.34.28/bug-fixes/575-fix-light-client-panic.md b/.changelog/v0.34.28/bug-fixes/575-fix-light-client-panic.md new file mode 100644 index 0000000000..0ec55b923f --- /dev/null +++ b/.changelog/v0.34.28/bug-fixes/575-fix-light-client-panic.md @@ -0,0 +1,6 @@ +- `[light]` Fixed an edge case where a light client would panic when attempting + to query a node that (1) has started from a non-zero height and (2) does + not yet have any data. The light client will now, correctly, not panic + _and_ keep the node in its list of providers in the same way it would if + it queried a node starting from height zero that does not yet have data + ([\#575](https://github.com/cometbft/cometbft/issues/575)) \ No newline at end of file diff --git a/.changelog/v0.34.28/improvements/475-upgrade-go-schnorrkel.md b/.changelog/v0.34.28/improvements/475-upgrade-go-schnorrkel.md new file mode 100644 index 0000000000..bdaf96c14c --- /dev/null +++ b/.changelog/v0.34.28/improvements/475-upgrade-go-schnorrkel.md @@ -0,0 +1 @@ +- `[crypto/sr25519]` Upgrade to go-schnorrkel@v1.0.0 ([\#475](https://github.com/cometbft/cometbft/issues/475)) diff --git a/.changelog/v0.34.28/improvements/638-json-rpc-error-message.md b/.changelog/v0.34.28/improvements/638-json-rpc-error-message.md new file mode 100644 index 0000000000..6922091fd2 --- /dev/null +++ b/.changelog/v0.34.28/improvements/638-json-rpc-error-message.md @@ -0,0 +1,3 @@ +- `[jsonrpc/client]` Improve the error message for client errors stemming from + bad HTTP responses. + ([cometbft/cometbft\#638](https://github.com/cometbft/cometbft/pull/638)) diff --git a/.changelog/v0.34.28/summary.md b/.changelog/v0.34.28/summary.md new file mode 100644 index 0000000000..ba3efa9d79 --- /dev/null +++ b/.changelog/v0.34.28/summary.md @@ -0,0 +1,6 @@ +*April 26, 2023* + +This release fixes several bugs, and has had to introduce one small Go +API-breaking change in the `crypto/merkle` package in order to address what +could be a security issue for some users who directly and explicitly make use of +that code. diff --git a/.github/issue_template.md b/.github/issue_template.md new file mode 100644 index 0000000000..4514e4abf0 --- /dev/null +++ b/.github/issue_template.md @@ -0,0 +1,10 @@ +--- +labels: needs-triage +--- + + diff --git a/.github/workflows/check-generated.yml b/.github/workflows/check-generated.yml index de1746317c..ba206e4964 100644 --- a/.github/workflows/check-generated.yml +++ b/.github/workflows/check-generated.yml @@ -41,7 +41,7 @@ jobs: check-proto: runs-on: ubuntu-latest steps: - - uses: actions/setup-go@v3 + - uses: actions/setup-go@v4 with: go-version: '1.19' diff --git a/.github/workflows/docker.yml b/.github/workflows/cometbft-docker.yml similarity index 68% rename from .github/workflows/docker.yml rename to .github/workflows/cometbft-docker.yml index 68682574d3..f4cf3f6f10 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/cometbft-docker.yml @@ -5,8 +5,10 @@ on: branches: - v[0-9]+.[0-9]+.x-celestia tags: - - "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10 - - "v[0-9]+.[0-9]+.[0-9]+-rc*" # Push events to matching v*, i.e. v1.0-rc1, v20.15.10-rc5 + - "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10 + - "v[0-9]+.[0-9]+.[0-9]+-alpha.[0-9]+" # e.g. v0.37.0-alpha.1, v0.38.0-alpha.10 + - "v[0-9]+.[0-9]+.[0-9]+-beta.[0-9]+" # e.g. v0.37.0-beta.1, v0.38.0-beta.10 + - "v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+" # e.g. v0.37.0-rc1, v0.38.0-rc10 jobs: build: @@ -16,7 +18,7 @@ jobs: - name: Prepare id: prep run: | - DOCKER_IMAGE=tendermint/tendermint + DOCKER_IMAGE=cometbft/cometbft VERSION=noop if [[ $GITHUB_REF == refs/tags/* ]]; then VERSION=${GITHUB_REF#refs/tags/} @@ -30,15 +32,15 @@ jobs: if [[ $VERSION =~ ^v[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then TAGS="$TAGS,${DOCKER_IMAGE}:${VERSION}" fi - echo ::set-output name=tags::${TAGS} + echo "tags=${TAGS}" >> $GITHUB_OUTPUT - name: Set up QEMU uses: docker/setup-qemu-action@master with: platforms: all - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 + - name: Set up Docker Build + uses: docker/setup-buildx-action@v2.5.0 - name: Build but do not Publish to Docker Hub uses: docker/build-push-action@v3 @@ -46,4 +48,4 @@ jobs: context: . file: ./DOCKER/Dockerfile platforms: linux/amd64,linux/arm64 - tags: ${{ steps.prep.outputs.tags }} \ No newline at end of file + tags: ${{ steps.prep.outputs.tags }} diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index ab808ebf6e..8e009b138a 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -43,7 +43,7 @@ jobs: env: GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" steps: - - uses: actions/setup-go@v3 + - uses: actions/setup-go@v4 with: go-version: "1.19" - uses: actions/checkout@v3 @@ -65,7 +65,7 @@ jobs: matrix: part: ["00", "01", "02", "03"] steps: - - uses: actions/setup-go@v3 + - uses: actions/setup-go@v4 with: go-version: "1.19" - uses: actions/checkout@v3 diff --git a/.github/workflows/e2e-manual.yml b/.github/workflows/e2e-manual.yml index 74cd779478..bca2861687 100644 --- a/.github/workflows/e2e-manual.yml +++ b/.github/workflows/e2e-manual.yml @@ -14,7 +14,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 60 steps: - - uses: actions/setup-go@v3 + - uses: actions/setup-go@v4 with: go-version: '1.19' @@ -28,7 +28,7 @@ jobs: - name: Generate testnets working-directory: test/e2e # When changing -g, also change the matrix groups above - run: ./generator-multiversion.sh -g 4 -d networks/nightly/ + run: ./build/generator -g 4 -d networks/nightly -p - name: Run ${{ matrix.p2p }} p2p testnets working-directory: test/e2e diff --git a/.github/workflows/e2e-nightly-34x.yml b/.github/workflows/e2e-nightly-34x.yml index 464c5bf40f..4845e97c94 100644 --- a/.github/workflows/e2e-nightly-34x.yml +++ b/.github/workflows/e2e-nightly-34x.yml @@ -21,7 +21,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 60 steps: - - uses: actions/setup-go@v3 + - uses: actions/setup-go@v4 with: go-version: '1.19' @@ -37,7 +37,7 @@ jobs: - name: Generate testnets working-directory: test/e2e # When changing -g, also change the matrix groups above - run: ./build/generator -g 4 -d networks/nightly + run: ./build/generator -g 4 -d networks/nightly -p - name: Run testnets in group ${{ matrix.group }} working-directory: test/e2e diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index db61d244f6..2cb134aa90 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 15 steps: - - uses: actions/setup-go@v3 + - uses: actions/setup-go@v4 with: go-version: '1.19' - uses: actions/checkout@v3 diff --git a/.github/workflows/fuzz-nightly.yml b/.github/workflows/fuzz-nightly.yml index 4bc565c199..c2fcda24c7 100644 --- a/.github/workflows/fuzz-nightly.yml +++ b/.github/workflows/fuzz-nightly.yml @@ -9,7 +9,7 @@ jobs: fuzz-nightly-test: runs-on: ubuntu-latest steps: - - uses: actions/setup-go@v3 + - uses: actions/setup-go@v4 with: go-version: '1.19' diff --git a/.github/workflows/govulncheck.yml b/.github/workflows/govulncheck.yml index 7cf4d6f7c4..f2993a1157 100644 --- a/.github/workflows/govulncheck.yml +++ b/.github/workflows/govulncheck.yml @@ -10,21 +10,23 @@ on: branches: - v[0-9]+.[0-9]+.x-celestia -jobs: - govulncheck: - runs-on: ubuntu-latest - steps: - - uses: actions/setup-go@v3 - with: - go-version: "1.19" - - uses: actions/checkout@v3 - - uses: technote-space/get-diff-action@v6 - with: - PATTERNS: | - **/*.go - go.mod - go.sum - Makefile - - name: govulncheck - run: make vulncheck - if: "env.GIT_DIFF != ''" +# TODO: re-enable after figuring out what needs to get fixed or if this is +# handled upstream in main +# jobs: +# govulncheck: +# runs-on: ubuntu-latest +# steps: +# - uses: actions/setup-go@v3 +# with: +# go-version: "1.19" +# - uses: actions/checkout@v3 +# - uses: technote-space/get-diff-action@v6 +# with: +# PATTERNS: | +# **/*.go +# go.mod +# go.sum +# Makefile +# - name: govulncheck +# run: make vulncheck +# if: "env.GIT_DIFF != ''" diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index ae0ab0d553..33c46137d2 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -19,7 +19,8 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 8 steps: - - uses: actions/setup-go@v3 + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 with: go-version: '1.19' - uses: technote-space/get-diff-action@v6 diff --git a/.github/workflows/markdown-linter.yml b/.github/workflows/markdown-linter.yml new file mode 100644 index 0000000000..bdbd7f2c33 --- /dev/null +++ b/.github/workflows/markdown-linter.yml @@ -0,0 +1,33 @@ +name: Markdown Linter +on: + push: + branches: + - v0.34.x + paths: + - "**.md" + - "**.yml" + - "**.yaml" + pull_request: + branches: [v0.34.x] + paths: + - "**.md" + - "**.yml" + +jobs: + build: + name: Super linter + runs-on: ubuntu-latest + steps: + - name: Checkout Code + uses: actions/checkout@v3 + - name: Lint Code Base + uses: docker://github/super-linter:v4 + env: + VALIDATE_ALL_CODEBASE: true + DEFAULT_BRANCH: v0.34.x + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + VALIDATE_MD: true + VALIDATE_OPENAPI: true + VALIDATE_YAML: true + YAML_CONFIG_FILE: yaml-lint.yml + FILTER_REGEX_EXCLUDE: "/workspace/tools/mintnet-kubernetes/*.yaml | /workspace/tools/mintnet-kubernetes/examples/*.yaml | workspace/tools/mintnet-kubernetes/assets/*.yaml | /workspace/tools/mintnet-kubernetes/examples/dummy/*.yaml | /workspace/tools/mintnet-kubernetes/examples/counter/*.yaml" diff --git a/.github/workflows/pre-release.yml b/.github/workflows/pre-release.yml new file mode 100644 index 0000000000..2a9872bb9d --- /dev/null +++ b/.github/workflows/pre-release.yml @@ -0,0 +1,77 @@ +name: "Pre-release" + +on: + push: + tags: + - "v[0-9]+.[0-9]+.[0-9]+-alpha.[0-9]+" # e.g. v0.37.0-alpha.1, v0.38.0-alpha.10 + - "v[0-9]+.[0-9]+.[0-9]+-beta.[0-9]+" # e.g. v0.37.0-beta.1, v0.38.0-beta.10 + - "v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+" # e.g. v0.37.0-rc1, v0.38.0-rc10 + +jobs: + prerelease: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - uses: actions/setup-go@v4 + with: + go-version: '1.19' + + # Similar check to ./release-version.yml, but enforces this when pushing + # tags. The ./release-version.yml check can be bypassed and is mainly + # present for informational purposes. + - name: Check release version + run: | + # We strip the refs/tags/v prefix of the tag name. + TAG_VERSION=${GITHUB_REF#refs/tags/v} + # Get the version of the code, which has no "v" prefix. + CODE_VERSION=`go run ./cmd/cometbft/ version` + if [ "$TAG_VERSION" != "$CODE_VERSION" ]; then + echo "" + echo "Tag version ${TAG_VERSION} does not match code version ${CODE_VERSION}" + echo "" + echo "Please either fix the release tag or the version of the software in version/version.go." + exit 1 + fi + + - name: Generate release notes + run: | + VERSION="${GITHUB_REF#refs/tags/}" + CHANGELOG_URL="https://github.com/cometbft/cometbft/blob/${VERSION}/CHANGELOG.md" + echo "See the [CHANGELOG](${CHANGELOG_URL}) for changes available in this pre-release, but not yet officially released." > ../release_notes.md + + - name: Release + uses: goreleaser/goreleaser-action@v4 + with: + version: latest + args: release --clean --release-notes ../release_notes.md + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + prerelease-success: + needs: prerelease + if: ${{ success() }} + runs-on: ubuntu-latest + steps: + - name: Notify Slack upon pre-release + uses: slackapi/slack-github-action@v1.23.0 + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} + SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK + RELEASE_URL: "${{ github.server_url }}/${{ github.repository }}/releases/tag/${{ github.ref_name }}" + with: + payload: | + { + "blocks": [ + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": ":sparkles: New CometBFT pre-release: <${{ env.RELEASE_URL }}|${{ github.ref_name }}>" + } + } + ] + } diff --git a/.github/workflows/proto-lint.yml b/.github/workflows/proto-lint.yml index fd842c402c..7579d8efb5 100644 --- a/.github/workflows/proto-lint.yml +++ b/.github/workflows/proto-lint.yml @@ -15,7 +15,7 @@ jobs: timeout-minutes: 5 steps: - uses: actions/checkout@v3 - - uses: bufbuild/buf-setup-action@v1.14.0 + - uses: bufbuild/buf-setup-action@v1.17.0 - uses: bufbuild/buf-lint-action@v1 with: input: 'proto' diff --git a/.github/workflows/release-version.yml b/.github/workflows/release-version.yml new file mode 100644 index 0000000000..034d191f21 --- /dev/null +++ b/.github/workflows/release-version.yml @@ -0,0 +1,33 @@ +# Checks that, if we're working on a release branch and are about to cut a +# release, we have set the version correctly. +name: Check release version + +on: + push: + branches: + - 'release/**' + +jobs: + check-version: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - uses: actions/setup-go@v4 + with: + go-version: '1.19' + + - name: Check version + run: | + # We strip the refs/heads/release/v prefix of the branch name. + BRANCH_VERSION=${GITHUB_REF#refs/heads/release/v} + # Get the version of the code, which has no "v" prefix. + CODE_VERSION=`go run ./cmd/cometbft/ version` + if [ "$BRANCH_VERSION" != "$CODE_VERSION" ]; then + echo "" + echo "Branch version ${BRANCH_VERSION} does not match code version ${CODE_VERSION}" + echo "" + echo "Please either fix the release branch naming (which must have a 'release/v' prefix)" + echo "or the version of the software in version/version.go." + exit 1 + fi diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 4e28c9af3e..f3a9dd4e93 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -14,7 +14,7 @@ jobs: with: fetch-depth: 0 - - uses: actions/setup-go@v3 + - uses: actions/setup-go@v4 with: go-version: '1.19' diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 345ce716a8..396f41b1ab 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -7,7 +7,7 @@ jobs: stale: runs-on: ubuntu-latest steps: - - uses: actions/stale@v7 + - uses: actions/stale@v8 with: repo-token: ${{ secrets.GITHUB_TOKEN }} stale-pr-message: "This pull request has been automatically marked as stale because it has not had diff --git a/.github/workflows/testapp-docker.yml b/.github/workflows/testapp-docker.yml new file mode 100644 index 0000000000..4a7efb141e --- /dev/null +++ b/.github/workflows/testapp-docker.yml @@ -0,0 +1,61 @@ +name: Docker E2E Node +# Build & Push rebuilds the e2e Testapp docker image on every push to main and creation of tags +# and pushes the image to https://hub.docker.com/r/cometbft/e2e-node +on: + push: + branches: + - main + - v0.34.x + tags: + - "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10 + - "v[0-9]+.[0-9]+.[0-9]+-alpha.[0-9]+" # e.g. v0.37.0-alpha.1, v0.38.0-alpha.10 + - "v[0-9]+.[0-9]+.[0-9]+-beta.[0-9]+" # e.g. v0.37.0-beta.1, v0.38.0-beta.10 + - "v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+" # e.g. v0.37.0-rc1, v0.38.0-rc10 + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Prepare + id: prep + run: | + DOCKER_IMAGE=cometbft/e2e-node + VERSION=noop + if [[ $GITHUB_REF == refs/tags/* ]]; then + VERSION=${GITHUB_REF#refs/tags/} + elif [[ $GITHUB_REF == refs/heads/* ]]; then + VERSION=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g') + if [ "${{ github.event.repository.default_branch }}" = "$VERSION" ]; then + VERSION=latest + fi + fi + TAGS="${DOCKER_IMAGE}:${VERSION}" + if [[ $VERSION =~ ^v[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then + TAGS="$TAGS,${DOCKER_IMAGE}:${VERSION}" + fi + echo "tags=${TAGS}" >> $GITHUB_OUTPUT + + - name: Set up QEMU + uses: docker/setup-qemu-action@master + with: + platforms: all + + - name: Set up Docker Build + uses: docker/setup-buildx-action@v2.5.0 + + - name: Login to DockerHub + if: ${{ github.event_name != 'pull_request' }} + uses: docker/login-action@v2.1.0 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Publish to Docker Hub + uses: docker/build-push-action@v4.0.0 + with: + context: . + file: ./test/e2e/docker/Dockerfile + platforms: linux/amd64,linux/arm64 + push: ${{ github.event_name != 'beep_boop' }} + tags: ${{ steps.prep.outputs.tags }} diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 2912f205b3..64bb85e744 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -23,7 +23,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 5 steps: - - uses: actions/setup-go@v3 + - uses: actions/setup-go@v4 with: go-version: "1.19" - uses: actions/checkout@v3 @@ -55,7 +55,7 @@ jobs: needs: build timeout-minutes: 5 steps: - - uses: actions/setup-go@v3 + - uses: actions/setup-go@v4 with: go-version: "1.19" - uses: actions/checkout@v3 @@ -87,7 +87,7 @@ jobs: needs: build timeout-minutes: 5 steps: - - uses: actions/setup-go@v3 + - uses: actions/setup-go@v4 with: go-version: "1.19" - uses: actions/checkout@v3 diff --git a/CHANGELOG.md b/CHANGELOG.md index 5d31708d36..b89bd25153 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,39 @@ # CHANGELOG +## v0.34.28 + +*April 26, 2023* + +This release fixes several bugs, and has had to introduce one small Go +API-breaking change in the `crypto/merkle` package in order to address what +could be a security issue for some users who directly and explicitly make use of +that code. + +### BREAKING CHANGES + +- `[crypto/merkle]` Do not allow verification of Merkle Proofs against empty trees (`nil` root). `Proof.ComputeRootHash` now panics when it encounters an error, but `Proof.Verify` does not panic + ([\#558](https://github.com/cometbft/cometbft/issues/558)) + +### BUG FIXES + +- `[consensus]` Unexpected error conditions in `ApplyBlock` are non-recoverable, so ignoring the error and carrying on is a bug. We replaced a `return` that disregarded the error by a `panic`. + ([\#496](https://github.com/cometbft/cometbft/pull/496)) +- `[consensus]` Rename `(*PeerState).ToJSON` to `MarshalJSON` to fix a logging data race + ([\#524](https://github.com/cometbft/cometbft/pull/524)) +- `[light]` Fixed an edge case where a light client would panic when attempting + to query a node that (1) has started from a non-zero height and (2) does + not yet have any data. The light client will now, correctly, not panic + _and_ keep the node in its list of providers in the same way it would if + it queried a node starting from height zero that does not yet have data + ([\#575](https://github.com/cometbft/cometbft/issues/575)) + +### IMPROVEMENTS + +- `[crypto/sr25519]` Upgrade to go-schnorrkel@v1.0.0 ([\#475](https://github.com/cometbft/cometbft/issues/475)) +- `[jsonrpc/client]` Improve the error message for client errors stemming from + bad HTTP responses. + ([cometbft/cometbft\#638](https://github.com/cometbft/cometbft/pull/638)) + ## v0.34.27 *Feb 27, 2023* diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md deleted file mode 100644 index d358ebe390..0000000000 --- a/CHANGELOG_PENDING.md +++ /dev/null @@ -1,25 +0,0 @@ -# Unreleased Changes - -## v0.34.24 - -### BREAKING CHANGES - -- CLI/RPC/Config - -- Apps - -- P2P Protocol - -- Go API - -- Blockchain Protocol - -### FEATURES - -- [#9083] backport cli command to reindex missed events (@cmwaters) - -### IMPROVEMENTS - -- [consensus] \#9760 Save peer LastCommit correctly to achieve 50% reduction in gossiped precommits. (@williambanfield) - -### BUG FIXES diff --git a/Makefile b/Makefile index 276b3a327c..8c47694ae5 100644 --- a/Makefile +++ b/Makefile @@ -108,7 +108,7 @@ ifeq (linux/riscv64,$(findstring linux/riscv64,$(TARGETPLATFORM))) GOARCH=riscv64 endif -all: build test install +all: check build test install .PHONY: all include tests.mk diff --git a/README.md b/README.md index 7860fd387b..2506c57e97 100644 --- a/README.md +++ b/README.md @@ -50,7 +50,7 @@ This repo intends on preserving the minimal possible diff with [cometbft/cometbf - **specific to Celestia**: consider if [celestia-app](https://github.com/celestiaorg/celestia-app) is a better target - **not specific to Celestia**: consider making the contribution upstream in CometBFT -1. [Install Go](https://go.dev/doc/install) 1.17+ +1. [Install Go](https://go.dev/doc/install) 1.19+ 2. Fork this repo 3. Clone your fork 4. Find an issue to work on (see [good first issues](https://github.com/celestiaorg/celestia-core/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22)) diff --git a/UPGRADING.md b/UPGRADING.md index 40a9a1ddec..7cccd3ba1f 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -2,6 +2,14 @@ This guide provides instructions for upgrading to specific versions of CometBFT. +## v0.34.28 + +For users explicitly making use of the Go APIs provided in the `crypto/merkle` +package, please note that, in order to fix a potential security issue, we had to +make a breaking change here. This change should only affect a small minority of +users. For more details, please see +[\#557](https://github.com/cometbft/cometbft/issues/557). + ## v0.34.27 This is the first official release of CometBFT, forked originally from @@ -40,7 +48,7 @@ subsequent major release of CometBFT. ### Building CometBFT -CometBFT must be compiled using Go 1.19 or higher. The use of Go 1.18 is not +CometBFT must be compiled using Go 1.19 or higher. The use of Go 1.18 is not supported, since this version has reached end-of-life with the release of [Go 1.20][go120]. ### Troubleshooting diff --git a/cmd/cometbft/commands/compact.go b/cmd/cometbft/commands/compact.go index 05db2968cb..e1c22d5116 100644 --- a/cmd/cometbft/commands/compact.go +++ b/cmd/cometbft/commands/compact.go @@ -14,11 +14,12 @@ import ( ) var CompactGoLevelDBCmd = &cobra.Command{ - Use: "experimental-compact-goleveldb", - Short: "force compacts the CometBFT storage engine (only GoLevelDB supported)", + Use: "experimental-compact-goleveldb", + Aliases: []string{"experimental_compact_goleveldb"}, + Short: "force compacts the CometBFT storage engine (only GoLevelDB supported)", Long: ` -This is a temporary utility command that performs a force compaction on the state -and blockstores to reduce disk space for a pruning node. This should only be run +This is a temporary utility command that performs a force compaction on the state +and blockstores to reduce disk space for a pruning node. This should only be run once the node has stopped. This command will likely be omitted in the future after the planned refactor to the storage engine. diff --git a/cmd/cometbft/commands/gen_node_key.go b/cmd/cometbft/commands/gen_node_key.go index 4993e147a9..41842be113 100644 --- a/cmd/cometbft/commands/gen_node_key.go +++ b/cmd/cometbft/commands/gen_node_key.go @@ -15,7 +15,6 @@ var GenNodeKeyCmd = &cobra.Command{ Use: "gen-node-key", Aliases: []string{"gen_node_key"}, Short: "Generate a node key for this node and print its ID", - PreRun: deprecateSnakeCase, RunE: genNodeKey, } diff --git a/cmd/cometbft/commands/gen_validator.go b/cmd/cometbft/commands/gen_validator.go index 11539efe3a..e9266c885d 100644 --- a/cmd/cometbft/commands/gen_validator.go +++ b/cmd/cometbft/commands/gen_validator.go @@ -15,7 +15,6 @@ var GenValidatorCmd = &cobra.Command{ Use: "gen-validator", Aliases: []string{"gen_validator"}, Short: "Generate new validator keypair", - PreRun: deprecateSnakeCase, Run: genValidator, } diff --git a/cmd/cometbft/commands/probe_upnp.go b/cmd/cometbft/commands/probe_upnp.go index 17b310bc0d..6d0f6d582f 100644 --- a/cmd/cometbft/commands/probe_upnp.go +++ b/cmd/cometbft/commands/probe_upnp.go @@ -15,7 +15,6 @@ var ProbeUpnpCmd = &cobra.Command{ Aliases: []string{"probe_upnp"}, Short: "Test UPnP functionality", RunE: probeUpnp, - PreRun: deprecateSnakeCase, } func probeUpnp(cmd *cobra.Command, args []string) error { diff --git a/cmd/cometbft/commands/reindex_event.go b/cmd/cometbft/commands/reindex_event.go index 97a3b84164..cfbb4e4c2c 100644 --- a/cmd/cometbft/commands/reindex_event.go +++ b/cmd/cometbft/commands/reindex_event.go @@ -31,13 +31,14 @@ var ( // ReIndexEventCmd constructs a command to re-index events in a block height interval. var ReIndexEventCmd = &cobra.Command{ - Use: "reindex-event", - Short: "Re-index events to the event store backends", + Use: "reindex-event", + Aliases: []string{"reindex_event"}, + Short: "Re-index events to the event store backends", Long: ` reindex-event is an offline tooling to re-index block and tx events to the eventsinks. You can run this command when the event store backend dropped/disconnected or you want to -replace the backend. The default start-height is 0, meaning the tooling will start -reindex from the base block height(inclusive); and the default end-height is 0, meaning +replace the backend. The default start-height is 0, meaning the tooling will start +reindex from the base block height(inclusive); and the default end-height is 0, meaning the tooling will reindex until the latest block height(inclusive). User can omit either or both arguments. diff --git a/cmd/cometbft/commands/replay.go b/cmd/cometbft/commands/replay.go index 5de8d0d3e8..28b6f0218f 100644 --- a/cmd/cometbft/commands/replay.go +++ b/cmd/cometbft/commands/replay.go @@ -24,5 +24,4 @@ var ReplayConsoleCmd = &cobra.Command{ Run: func(cmd *cobra.Command, args []string) { consensus.RunReplayFile(config.BaseConfig, config.Consensus, true) }, - PreRun: deprecateSnakeCase, } diff --git a/cmd/cometbft/commands/reset.go b/cmd/cometbft/commands/reset.go index bfedf20a9a..0f1e5bbe21 100644 --- a/cmd/cometbft/commands/reset.go +++ b/cmd/cometbft/commands/reset.go @@ -18,16 +18,15 @@ var ResetAllCmd = &cobra.Command{ Aliases: []string{"unsafe_reset_all"}, Short: "(unsafe) Remove all the data and WAL, reset this node's validator to genesis state", RunE: resetAllCmd, - PreRun: deprecateSnakeCase, } var keepAddrBook bool // ResetStateCmd removes the database of the specified CometBFT core instance. var ResetStateCmd = &cobra.Command{ - Use: "reset-state", - Short: "Remove all the data and WAL", - PreRun: deprecateSnakeCase, + Use: "reset-state", + Aliases: []string{"reset_state"}, + Short: "Remove all the data and WAL", RunE: func(cmd *cobra.Command, args []string) (err error) { config, err = ParseConfig(cmd) if err != nil { @@ -47,7 +46,6 @@ var ResetPrivValidatorCmd = &cobra.Command{ Use: "unsafe-reset-priv-validator", Aliases: []string{"unsafe_reset_priv_validator"}, Short: "(unsafe) Reset this node's validator to genesis state", - PreRun: deprecateSnakeCase, RunE: resetPrivValidator, } diff --git a/cmd/cometbft/commands/root.go b/cmd/cometbft/commands/root.go index b112dd1bc9..45dc934238 100644 --- a/cmd/cometbft/commands/root.go +++ b/cmd/cometbft/commands/root.go @@ -3,7 +3,6 @@ package commands import ( "fmt" "os" - "strings" "github.com/spf13/cobra" "github.com/spf13/viper" @@ -40,7 +39,7 @@ func ParseConfig(cmd *cobra.Command) (*cfg.Config, error) { if os.Getenv("CMTHOME") != "" { home = os.Getenv("CMTHOME") } else if os.Getenv("TMHOME") != "" { - //XXX: Deprecated. + // XXX: Deprecated. home = os.Getenv("TMHOME") logger.Error("Deprecated environment variable TMHOME identified. CMTHOME should be used instead.") } else { @@ -91,10 +90,3 @@ var RootCmd = &cobra.Command{ return nil }, } - -// deprecateSnakeCase is a util function for 0.34.1. Should be removed in 0.35 -func deprecateSnakeCase(cmd *cobra.Command, args []string) { - if strings.Contains(cmd.CalledAs(), "_") { - fmt.Println("Deprecated: snake_case commands will be replaced by hyphen-case commands in the next major release") - } -} diff --git a/cmd/cometbft/commands/show_node_id.go b/cmd/cometbft/commands/show_node_id.go index 26c0974902..a5d8a7f4f2 100644 --- a/cmd/cometbft/commands/show_node_id.go +++ b/cmd/cometbft/commands/show_node_id.go @@ -14,7 +14,6 @@ var ShowNodeIDCmd = &cobra.Command{ Aliases: []string{"show_node_id"}, Short: "Show this node's ID", RunE: showNodeID, - PreRun: deprecateSnakeCase, } func showNodeID(cmd *cobra.Command, args []string) error { diff --git a/cmd/cometbft/commands/show_validator.go b/cmd/cometbft/commands/show_validator.go index 83e0101d70..51dd8cc860 100644 --- a/cmd/cometbft/commands/show_validator.go +++ b/cmd/cometbft/commands/show_validator.go @@ -16,7 +16,6 @@ var ShowValidatorCmd = &cobra.Command{ Aliases: []string{"show_validator"}, Short: "Show this node's validator info", RunE: showValidator, - PreRun: deprecateSnakeCase, } func showValidator(cmd *cobra.Command, args []string) error { diff --git a/config/toml.go b/config/toml.go index 8dfd117cd1..7c50608903 100644 --- a/config/toml.go +++ b/config/toml.go @@ -541,7 +541,7 @@ max_open_connections = {{ .Instrumentation.MaxOpenConnections }} # Instrumentation namespace namespace = "{{ .Instrumentation.Namespace }}" -# The URL of the influxdb instance to use for remote event +# The URL of the influxdb instance to use for remote event # collection. If empty, remote event collection is disabled. influx_url = "{{ .Instrumentation.InfluxURL }}" diff --git a/consensus/reactor.go b/consensus/reactor.go index 9051436183..aa93f5ac9d 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -108,14 +108,19 @@ func (conR *Reactor) OnStop() { func (conR *Reactor) SwitchToConsensus(state sm.State, skipWAL bool) { conR.Logger.Info("SwitchToConsensus") - // We have no votes, so reconstruct LastCommit from SeenCommit. - if state.LastBlockHeight > 0 { - conR.conS.reconstructLastCommit(state) - } + func() { + // We need to lock, as we are not entering consensus state from State's `handleMsg` or `handleTimeout` + conR.conS.mtx.Lock() + defer conR.conS.mtx.Unlock() + // We have no votes, so reconstruct LastCommit from SeenCommit + if state.LastBlockHeight > 0 { + conR.conS.reconstructLastCommit(state) + } - // NOTE: The line below causes broadcastNewRoundStepRoutine() to broadcast a - // NewRoundStepMessage. - conR.conS.updateToState(state) + // NOTE: The line below causes broadcastNewRoundStepRoutine() to broadcast a + // NewRoundStepMessage. + conR.conS.updateToState(state) + }() conR.mtx.Lock() conR.waitSync = false @@ -1079,8 +1084,8 @@ func (ps *PeerState) GetRoundState() *cstypes.PeerRoundState { return &prs } -// ToJSON returns a json of PeerState. -func (ps *PeerState) ToJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaler interface. +func (ps *PeerState) MarshalJSON() ([]byte, error) { ps.mtx.Lock() defer ps.mtx.Unlock() diff --git a/consensus/state.go b/consensus/state.go index 7a2ce7eb32..b9e90e2103 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -1688,8 +1688,7 @@ func (cs *State) finalizeCommit(height int64) { seenCommit, ) if err != nil { - logger.Error("failed to apply block", "err", err) - return + panic(fmt.Sprintf("failed to apply block; error %v", err)) } fail.Fail() // XXX diff --git a/crypto/merkle/proof.go b/crypto/merkle/proof.go index e3dd0f98af..1084bdf7c1 100644 --- a/crypto/merkle/proof.go +++ b/crypto/merkle/proof.go @@ -50,25 +50,40 @@ func ProofsFromByteSlices(items [][]byte) (rootHash []byte, proofs []*Proof) { // Verify that the Proof proves the root hash. // Check sp.Index/sp.Total manually if needed func (sp *Proof) Verify(rootHash []byte, leaf []byte) error { - leafHash := leafHash(leaf) + if rootHash == nil { + return fmt.Errorf("invalid root hash: cannot be nil") + } if sp.Total < 0 { return errors.New("proof total must be positive") } if sp.Index < 0 { return errors.New("proof index cannot be negative") } + leafHash := leafHash(leaf) if !bytes.Equal(sp.LeafHash, leafHash) { return fmt.Errorf("invalid leaf hash: wanted %X got %X", leafHash, sp.LeafHash) } - computedHash := sp.ComputeRootHash() + computedHash, err := sp.computeRootHash() + if err != nil { + return fmt.Errorf("compute root hash: %w", err) + } if !bytes.Equal(computedHash, rootHash) { return fmt.Errorf("invalid root hash: wanted %X got %X", rootHash, computedHash) } return nil } -// Compute the root hash given a leaf hash. Does not verify the result. +// Compute the root hash given a leaf hash. Panics in case of errors. func (sp *Proof) ComputeRootHash() []byte { + computedHash, err := sp.computeRootHash() + if err != nil { + panic(fmt.Errorf("ComputeRootHash errored %w", err)) + } + return computedHash +} + +// Compute the root hash given a leaf hash. +func (sp *Proof) computeRootHash() ([]byte, error) { return computeHashFromAunts( sp.Index, sp.Total, @@ -148,35 +163,36 @@ func ProofFromProto(pb *cmtcrypto.Proof) (*Proof, error) { // Use the leafHash and innerHashes to get the root merkle hash. // If the length of the innerHashes slice isn't exactly correct, the result is nil. // Recursive impl. -func computeHashFromAunts(index, total int64, leafHash []byte, innerHashes [][]byte) []byte { +func computeHashFromAunts(index, total int64, leafHash []byte, innerHashes [][]byte) ([]byte, error) { if index >= total || index < 0 || total <= 0 { - return nil + return nil, fmt.Errorf("invalid index %d and/or total %d", index, total) } switch total { case 0: panic("Cannot call computeHashFromAunts() with 0 total") case 1: if len(innerHashes) != 0 { - return nil + return nil, fmt.Errorf("unexpected inner hashes") } - return leafHash + return leafHash, nil default: if len(innerHashes) == 0 { - return nil + return nil, fmt.Errorf("expected at least one inner hash") } numLeft := getSplitPoint(total) if index < numLeft { - leftHash := computeHashFromAunts(index, numLeft, leafHash, innerHashes[:len(innerHashes)-1]) - if leftHash == nil { - return nil + leftHash, err := computeHashFromAunts(index, numLeft, leafHash, innerHashes[:len(innerHashes)-1]) + if err != nil { + return nil, err } - return innerHash(leftHash, innerHashes[len(innerHashes)-1]) + + return innerHash(leftHash, innerHashes[len(innerHashes)-1]), nil } - rightHash := computeHashFromAunts(index-numLeft, total-numLeft, leafHash, innerHashes[:len(innerHashes)-1]) - if rightHash == nil { - return nil + rightHash, err := computeHashFromAunts(index-numLeft, total-numLeft, leafHash, innerHashes[:len(innerHashes)-1]) + if err != nil { + return nil, err } - return innerHash(innerHashes[len(innerHashes)-1], rightHash) + return innerHash(innerHashes[len(innerHashes)-1], rightHash), nil } } diff --git a/crypto/merkle/proof_test.go b/crypto/merkle/proof_test.go index ad9e483749..52eda9b225 100644 --- a/crypto/merkle/proof_test.go +++ b/crypto/merkle/proof_test.go @@ -1,6 +1,7 @@ package merkle import ( + "bytes" "errors" "fmt" "testing" @@ -8,6 +9,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto/tmhash" cmtcrypto "github.com/tendermint/tendermint/proto/tendermint/crypto" ) @@ -262,3 +264,26 @@ func TestVoteProtobuf(t *testing.T) { } } } + +// TestVsa2022_100 verifies https://blog.verichains.io/p/vsa-2022-100-tendermint-forging-membership-proof +func TestVsa2022_100(t *testing.T) { + // a fake key-value pair and its hash + key := []byte{0x13} + value := []byte{0x37} + vhash := tmhash.Sum(value) + bz := new(bytes.Buffer) + _ = encodeByteSlice(bz, key) + _ = encodeByteSlice(bz, vhash) + kvhash := tmhash.Sum(append([]byte{0}, bz.Bytes()...)) + + // the malicious `op` + op := NewValueOp( + key, + &Proof{LeafHash: kvhash}, + ) + + // the nil root + var root []byte + + assert.NotNil(t, ProofOperators{op}.Verify(root, "/"+string(key), [][]byte{value})) +} diff --git a/crypto/merkle/proof_value.go b/crypto/merkle/proof_value.go index bae6849c6c..9648d076b5 100644 --- a/crypto/merkle/proof_value.go +++ b/crypto/merkle/proof_value.go @@ -93,8 +93,12 @@ func (op ValueOp) Run(args [][]byte) ([][]byte, error) { return nil, fmt.Errorf("leaf hash mismatch: want %X got %X", op.Proof.LeafHash, kvhash) } + rootHash, err := op.Proof.computeRootHash() + if err != nil { + return nil, err + } return [][]byte{ - op.Proof.ComputeRootHash(), + rootHash, }, nil } diff --git a/crypto/sr25519/pubkey.go b/crypto/sr25519/pubkey.go index 87805cacba..7fc954fc98 100644 --- a/crypto/sr25519/pubkey.go +++ b/crypto/sr25519/pubkey.go @@ -55,7 +55,8 @@ func (pubKey PubKey) VerifySignature(msg []byte, sig []byte) bool { return false } - return publicKey.Verify(signature, signingContext) + ok, err := publicKey.Verify(signature, signingContext) + return ok && err == nil } func (pubKey PubKey) String() string { diff --git a/docs/README.md b/docs/README.md index 66d7d8bb4a..cc36e176f2 100644 --- a/docs/README.md +++ b/docs/README.md @@ -16,13 +16,12 @@ CometBFT serves blockchain applications. More formally, CometBFT performs Byzantine Fault Tolerant (BFT) State Machine Replication (SMR) for arbitrary deterministic, finite state machines. -For more background, see [What is CometBFT?](introduction/what-is-cometbft.md). +For more background, see [What is CometBFT?](introduction/README.md#what-is-cometbft). -To get started quickly with an example application, see the -[quick start guide](introduction/quick-start.md). +To get started quickly with an example application, see the [quick start guide](guides/quick-start.md). To upgrade from Tendermint Core v0.34.x to CometBFT v0.34.x, please see our -[upgrading instructions](./introduction/upgrading-from-tm.md). +[upgrading instructions](./guides/upgrading-from-tm.md). To learn about application development on CometBFT, see the [Application Blockchain Interface](https://github.com/cometbft/cometbft/tree/v0.34.x/spec/abci). diff --git a/docs/core/configuration.md b/docs/core/configuration.md index 7f595d643f..3c2cffdbe1 100644 --- a/docs/core/configuration.md +++ b/docs/core/configuration.md @@ -17,6 +17,7 @@ like the file below, however, double check by inspecting the `config.toml` created with your version of `cometbft` installed: ```toml + # This is a TOML config file. # For more information, see https://github.com/toml-lang/toml @@ -66,7 +67,7 @@ db_backend = "goleveldb" db_dir = "data" # Output level for logging, including package level options -log_level = "main:info,state:info,statesync:info,*:error" +log_level = "info" # Output format: 'plain' (colored text) or 'json' log_format = "plain" @@ -155,6 +156,33 @@ max_subscription_clients = 100 # the estimated # maximum number of broadcast_tx_commit calls per block. max_subscriptions_per_client = 5 +# Experimental parameter to specify the maximum number of events a node will +# buffer, per subscription, before returning an error and closing the +# subscription. Must be set to at least 100, but higher values will accommodate +# higher event throughput rates (and will use more memory). +experimental_subscription_buffer_size = 200 + +# Experimental parameter to specify the maximum number of RPC responses that +# can be buffered per WebSocket client. If clients cannot read from the +# WebSocket endpoint fast enough, they will be disconnected, so increasing this +# parameter may reduce the chances of them being disconnected (but will cause +# the node to use more memory). +# +# Must be at least the same as "experimental_subscription_buffer_size", +# otherwise connections could be dropped unnecessarily. This value should +# ideally be somewhat higher than "experimental_subscription_buffer_size" to +# accommodate non-subscription-related RPC responses. +experimental_websocket_write_buffer_size = 200 + +# If a WebSocket client cannot read fast enough, at present we may +# silently drop events instead of generating an error or disconnecting the +# client. +# +# Enabling this experimental parameter will cause the WebSocket connection to +# be closed instead if it cannot read fast enough, allowing for greater +# predictability in subscription behaviour. +experimental_close_on_slow_client = false + # How long to wait for a tx to be committed during /broadcast_tx_commit. # WARNING: Using a value larger than 10s will result in increasing the # global HTTP write timeout, which applies to all connections and endpoints. @@ -178,7 +206,7 @@ tls_cert_file = "" # The path to a file containing matching private key that is used to create the HTTPS server. # Might be either absolute path or path related to CometBFT's config directory. -# NOTE: both tls_cert_file and tls_key_file must be present for CometBFT to create HTTPS server. +# NOTE: both tls-cert-file and tls-key-file must be present for CometBFT to create HTTPS server. # Otherwise, HTTP server is run. tls_key_file = "" @@ -196,7 +224,8 @@ laddr = "tcp://0.0.0.0:26656" # Address to advertise to peers for them to dial # If empty, will use the same port as the laddr, # and will introspect on the listener or use UPnP -# to figure out the address. +# to figure out the address. ip and port are required +# example: 159.89.10.97:26656 external_address = "" # Comma separated list of seed nodes to connect to @@ -259,10 +288,21 @@ handshake_timeout = "20s" dial_timeout = "3s" ####################################################### -### Mempool Configurattion Option ### +### Mempool Configuration Option ### ####################################################### [mempool] +# Mempool version to use: +# 1) "v0" - (default) FIFO mempool. +# 2) "v1" - prioritized mempool. +# 3) "v2" - CAT +version = "v2" + +# Recheck (default: true) defines whether CometBFT should recheck the +# validity for all remaining transaction in the mempool after a block. +# Since a block affects the application state, some transactions in the +# mempool may become invalid. If this does not apply to your application, +# you can disable rechecking. recheck = true broadcast = true wal_dir = "" @@ -290,7 +330,23 @@ max_tx_bytes = 1048576 # Maximum size of a batch of transactions to send to a peer # Including space needed by encoding (one varint per transaction). # XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 -max_batch_bytes = 10485760 +max_batch_bytes = 0 + +# ttl-duration, if non-zero, defines the maximum amount of time a transaction +# can exist for in the mempool. +# +# Note, if ttl-num-blocks is also defined, a transaction will be removed if it +# has existed in the mempool at least ttl-num-blocks number of blocks or if it's +# insertion time into the mempool is beyond ttl-duration. +ttl-duration = "0s" + +# ttl-num-blocks, if non-zero, defines the maximum number of blocks a transaction +# can exist for in the mempool. +# +# Note, if ttl-duration is also defined, a transaction will be removed if it +# has existed in the mempool at least ttl-num-blocks number of blocks or if +# it's insertion time into the mempool is beyond ttl-duration. +ttl-num-blocks = 0 ####################################################### ### State Sync Configuration Options ### @@ -312,12 +368,22 @@ enable = false rpc_servers = "" trust_height = 0 trust_hash = "" -trust_period = "0s" +trust_period = "168h0m0s" + +# Time to spend discovering snapshots before initiating a restore. +discovery_time = "15s" # Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). # Will create a new, randomly named directory within, and remove it when done. temp_dir = "" +# The timeout duration before re-requesting a chunk, possibly from a different +# peer (default: 1 minute). +chunk_request_timeout = "10s" + +# The number of concurrent chunk fetchers to run (default: 1). +chunk_fetchers = "4" + ####################################################### ### Fast Sync Configuration Connections ### ####################################################### @@ -370,6 +436,17 @@ create_empty_blocks_interval = "0s" peer_gossip_sleep_duration = "100ms" peer_query_maj23_sleep_duration = "2s" +####################################################### +### Storage Configuration Options ### +####################################################### +[storage] + +# Set to true to discard ABCI responses from the state store, which can save a +# considerable amount of disk space. Set to false to ensure ABCI responses are +# persisted. ABCI responses are required for /block_results RPC queries, and to +# reindex events in the command-line tool. +discard_abci_responses = false + ####################################################### ### Transaction Indexer Configuration Options ### ####################################################### @@ -384,8 +461,14 @@ peer_query_maj23_sleep_duration = "2s" # 1) "null" # 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). # - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. +# 3) "psql" - the indexer services backed by PostgreSQL. +# When "kv" or "psql" is chosen "tx.height" and "tx.hash" will always be indexed. indexer = "kv" +# The PostgreSQL connection configuration, the connection format: +# postgresql://:@:/? +psql-conn = "" + ####################################################### ### Instrumentation Configuration Options ### ####################################################### @@ -407,16 +490,12 @@ max_open_connections = 3 # Instrumentation namespace namespace = "cometbft" - -``` + ``` ## Empty blocks VS no empty blocks - ### create_empty_blocks = true -If `create_empty_blocks` is set to `true` in your config, blocks will be -created ~ every second (with default consensus parameters). You can regulate -the delay between blocks by changing the `timeout_commit`. E.g. `timeout_commit = "10s"` should result in ~ 10 second blocks. +If `create_empty_blocks` is set to `true` in your config, blocks will be created ~ every second (with default consensus parameters). You can regulate the delay between blocks by changing the `timeout_commit`. E.g. `timeout_commit = "10s"` should result in ~ 10 second blocks. ### create_empty_blocks = false @@ -439,11 +518,11 @@ transactions every `create_empty_blocks_interval`. For instance, with CometBFT will only create blocks if there are transactions, or after waiting 30 seconds without receiving any transactions. -## Consensus timeouts explained +Plus, if you set `create_empty_blocks_interval` to something other than the default (`0`), CometBFT will be creating empty blocks even in the absence of transactions every `create_empty_blocks_interval.` For instance, with `create_empty_blocks = false` and `create_empty_blocks_interval = "30s"`, CometBFT will only create blocks if there are transactions, or after waiting 30 seconds without receiving any transactions. +## Consensus timeouts explained There's a variety of information about timeouts in [Running in production](./running-in-production.md#configuration-parameters). - You can also find more detailed explanation in the paper describing the Tendermint consensus algorithm, adopted by CometBFT: [The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938). diff --git a/docs/core/rpc.md b/docs/core/rpc.md index f7523a1180..e118d5a3a2 100644 --- a/docs/core/rpc.md +++ b/docs/core/rpc.md @@ -6,6 +6,4 @@ order: 9 The RPC documentation is hosted here: -- [RPC Documentation](https://docs.cometbft.com/v0.34/rpc) - -To update the documentation, edit the relevant `godoc` comments in the [rpc/core directory](https://github.com/cometbft/cometbft/blob/v0.34.x/rpc/core). +- [OpenAPI reference](../rpc) diff --git a/docs/core/running-in-production.md b/docs/core/running-in-production.md index 9998acb2d3..88ef6686c7 100644 --- a/docs/core/running-in-production.md +++ b/docs/core/running-in-production.md @@ -93,15 +93,37 @@ mechanisms. ### RPC +#### Attack Exposure and Mitigation + +**It is generally not recommended for RPC endpoints to be exposed publicly, and +especially so if the node in question is a validator**, as the CometBFT RPC does +not currently provide advanced security features. Public exposure of RPC +endpoints without appropriate protection can make the associated node vulnerable +to a variety of attacks. + +It is entirely up to operators to ensure, if nodes' RPC endpoints have to be +exposed publicly, that appropriate measures have been taken to mitigate against +attacks. Some examples of mitigation measures include, but are not limited to: + +- Never publicly exposing the RPC endpoints of validators (i.e. if the RPC + endpoints absolutely have to be exposed, ensure you do so only on full nodes + and with appropriate protection) +- Correct usage of rate-limiting, authentication and caching (e.g. as provided + by reverse proxies like [nginx](https://nginx.org/) and/or DDoS protection + services like [Cloudflare](https://www.cloudflare.com)) +- Only exposing the specific endpoints absolutely necessary for the relevant use + cases (configurable via nginx/Cloudflare/etc.) + +If no expertise is available to the operator to assist with securing nodes' RPC +endpoints, it is strongly recommended to never expose those endpoints publicly. + +**Under no condition should any of the [unsafe RPC endpoints](../rpc/#/Unsafe) +ever be exposed publicly.** + +#### Endpoints Returning Multiple Entries + Endpoints returning multiple entries are limited by default to return 30 -elements (100 max). See the [RPC Documentation](https://docs.cometbft.com/v0.34/rpc/) -for more information. - -Rate-limiting and authentication are another key aspects to help protect -against DOS attacks. Validators are supposed to use external tools like -[NGINX](https://www.nginx.com/blog/rate-limiting-nginx/) or -[traefik](https://docs.traefik.io/middlewares/ratelimit/) -to achieve the same things. +elements (100 max). See the [RPC Documentation](../rpc/) for more information. ## Debugging CometBFT diff --git a/docs/core/subscription.md b/docs/core/subscription.md index 5e038439db..3a5d60cd16 100644 --- a/docs/core/subscription.md +++ b/docs/core/subscription.md @@ -14,9 +14,11 @@ To connect to a node via websocket from the CLI, you can use a tool such as [wscat](https://github.com/websockets/wscat) and run: ```sh -wscat ws://127.0.0.1:26657/websocket +wscat -c ws://127.0.0.1:26657/websocket ``` +NOTE: If your node's RPC endpoint is TLS-enabled, utilize the scheme `wss` instead of `ws`. + You can subscribe to any of the events above by calling the `subscribe` RPC method via Websocket along with a valid query. diff --git a/docs/guides/README.md b/docs/guides/README.md new file mode 100644 index 0000000000..7c8b0a9e5b --- /dev/null +++ b/docs/guides/README.md @@ -0,0 +1,15 @@ +--- +order: false +parent: + order: 2 +--- + +# Guides + +- [Installing CometBFT](./install.md) +- [Quick-start using CometBFT](./quick-start.md) +- [Upgrading from Tendermint to CometBFT](./upgrading-from-tm.md) +- [Creating a built-in application in Go](./go-built-in.md) +- [Creating an external application in Go](./go.md) +- [Creating an external application in Java](./java.md) +- [Creating an external application in Kotlin](./kotlin.md) diff --git a/docs/tutorials/go-built-in.md b/docs/guides/go-built-in.md similarity index 99% rename from docs/tutorials/go-built-in.md rename to docs/guides/go-built-in.md index 086418b0cb..589f4b8733 100644 --- a/docs/tutorials/go-built-in.md +++ b/docs/guides/go-built-in.md @@ -1,5 +1,5 @@ --- -order: 2 +order: 3 --- # Creating a built-in application in Go @@ -86,7 +86,8 @@ CometBFT. ```bash go mod init kvstore -go get github.com/cometbft/cometbft@v0.34.27 +go get github.com/tendermint/tendermint +go mod edit -replace github.com/tendermint/tendermint=github.com/cometbft/cometbft@v0.34.28 ``` After running the above commands you will see two generated files, `go.mod` and `go.sum`. diff --git a/docs/tutorials/go.md b/docs/guides/go.md similarity index 99% rename from docs/tutorials/go.md rename to docs/guides/go.md index f1ee4f9b81..3bbe48d326 100644 --- a/docs/tutorials/go.md +++ b/docs/guides/go.md @@ -1,5 +1,5 @@ --- -order: 1 +order: 4 --- # Creating an application in Go diff --git a/docs/introduction/install.md b/docs/guides/install.md similarity index 99% rename from docs/introduction/install.md rename to docs/guides/install.md index 366c0c90a2..4b2d3166e0 100644 --- a/docs/introduction/install.md +++ b/docs/guides/install.md @@ -1,5 +1,5 @@ --- -order: 3 +order: 1 --- # Install CometBFT diff --git a/docs/introduction/quick-start.md b/docs/guides/quick-start.md similarity index 97% rename from docs/introduction/quick-start.md rename to docs/guides/quick-start.md index 06d2df0aee..21dcdc91d6 100644 --- a/docs/introduction/quick-start.md +++ b/docs/guides/quick-start.md @@ -11,7 +11,7 @@ works and want to get started right away, continue. ## Install -See the [install instructions](./install.md). +See the [install guide](./install.md). ## Initialization @@ -95,7 +95,7 @@ First create four Ubuntu cloud machines. The following was tested on Digital Ocean Ubuntu 16.04 x64 (3GB/1CPU, 20GB SSD). We'll refer to their respective IP addresses below as IP1, IP2, IP3, IP4. -Then, `ssh` into each machine and install CometBFT following the [instructions](./install.md). +Then, `ssh` into each machine and install CometBFT following the [guide](./install.md). Next, use the `cometbft testnet` command to create four directories of config files (found in `./mytestnet`) and copy each directory to the relevant machine in the cloud, so that each machine has `$HOME/mytestnet/node[0-3]` directory. diff --git a/docs/guides/upgrading-from-tm.md b/docs/guides/upgrading-from-tm.md new file mode 100644 index 0000000000..dc5036ce35 --- /dev/null +++ b/docs/guides/upgrading-from-tm.md @@ -0,0 +1,47 @@ +--- +order: 3 +--- + +# Upgrading from Tendermint Core + +CometBFT was originally forked from [Tendermint Core v0.34.24][v03424] and +subsequently updated in Informal Systems' public fork of Tendermint Core for +[v0.34.25][v03425] and [v0.34.26][v03426]. + +If you already make use of Tendermint Core (either the original Tendermint Core +v0.34.24, or Informal Systems' public fork), you can upgrade to CometBFT +v0.34.27 by replacing your dependency in your `go.mod` file: + +```bash +go mod edit -replace github.com/tendermint/tendermint=github.com/cometbft/cometbft@v0.34.27 +``` + +We make use of the original module URL in order to minimize the impact of +switching to CometBFT. This is only possible in our v0.34 release series, and we +will be switching our module URL to `github.com/cometbft/cometbft` in the next +major release. + +## Home directory + +CometBFT, by default, will consider its home directory in `~/.cometbft` from now +on instead of `~/.tendermint`. + +## Environment variables + +The environment variable prefixes have now changed from `TM` to `CMT`. For +example, `TMHOME` or `TM_HOME` become `CMTHOME` or `CMT_HOME`. + +We have implemented a fallback check in case `TMHOME` is still set and `CMTHOME` +is not, but you will start to see a warning message in the logs if the old +`TMHOME` variable is set. This fallback check will be removed entirely in a +subsequent major release of CometBFT. + +## Building CometBFT + +If you are building CometBFT from scratch, please note that it must be compiled +using Go 1.19 or higher. The use of Go 1.18 is not supported, since this version +has reached end-of-life with the release of Go 1.20. + +[v03424]: https://github.com/tendermint/tendermint/releases/tag/v0.34.24 +[v03425]: https://github.com/informalsystems/tendermint/releases/tag/v0.34.25 +[v03426]: https://github.com/informalsystems/tendermint/releases/tag/v0.34.26 diff --git a/docs/introduction/README.md b/docs/introduction/README.md index 504c17b700..acafe992f5 100644 --- a/docs/introduction/README.md +++ b/docs/introduction/README.md @@ -5,22 +5,327 @@ parent: order: 1 --- -# Overview +# What is CometBFT -## Quick Start +CometBFT is software for securely and consistently replicating an +application on many machines. By securely, we mean that CometBFT works +as long as less than 1/3 of machines fail in arbitrary ways. By consistently, +we mean that every non-faulty machine sees the same transaction log and +computes the same state. Secure and consistent replication is a +fundamental problem in distributed systems; it plays a critical role in +the fault tolerance of a broad range of applications, from currencies, +to elections, to infrastructure orchestration, and beyond. -Get CometBFT up and running quickly with the -[quick-start guide](./quick-start.md)! +The ability to tolerate machines failing in arbitrary ways, including +becoming malicious, is known as Byzantine fault tolerance (BFT). The +theory of BFT is decades old, but software implementations have only +became popular recently, due largely to the success of "blockchain +technology" like Bitcoin and Ethereum. Blockchain technology is just a +reformalization of BFT in a more modern setting, with emphasis on +peer-to-peer networking and cryptographic authentication. The name +derives from the way transactions are batched in blocks, where each +block contains a cryptographic hash of the previous one, forming a +chain. -## Install +CometBFT consists of two chief technical components: a blockchain +consensus engine and a generic application interface. +The consensus engine, +which is based on [Tendermint consensus algorithm][tendermint-paper], +ensures that the same transactions are +recorded on every machine in the same order. The application interface, +called the Application BlockChain Interface (ABCI), delivers the transactions +to applications for processing. Unlike other +blockchain and consensus solutions, which come pre-packaged with built +in state machines (like a fancy key-value store, or a quirky scripting +language), developers can use CometBFT for BFT state machine +replication of applications written in whatever programming language and +development environment is right for them. -Detailed [installation instructions](./install.md). +CometBFT is designed to be easy-to-use, simple-to-understand, highly +performant, and useful for a wide variety of distributed applications. -## Upgrading from Tendermint Core +## CometBFT vs. X -See our [upgrading guidelines](./upgrading-from-tm.md) if you are interested in -switching to CometBFT from Tendermint Core. +CometBFT is broadly similar to two classes of software. The first +class consists of distributed key-value stores, like Zookeeper, etcd, +and consul, which use non-BFT consensus. The second class is known as +"blockchain technology", and consists of both cryptocurrencies like +Bitcoin and Ethereum, and alternative distributed ledger designs like +Hyperledger's Burrow. -## What is CometBFT +### Zookeeper, etcd, consul -Dive into [what CometBFT is and why](./what-is-cometbft.md)! +Zookeeper, etcd, and consul are all implementations of key-value stores +atop a classical, non-BFT consensus algorithm. Zookeeper uses an +algorithm called Zookeeper Atomic Broadcast, while etcd and consul use +the Raft log replication algorithm. A +typical cluster contains 3-5 machines, and can tolerate crash failures +in less than 1/2 of the machines (e.g., 1 out of 3 or 2 out of 5), +but even a single Byzantine fault can jeopardize the whole system. + +Each offering provides a slightly different implementation of a +featureful key-value store, but all are generally focused around +providing basic services to distributed systems, such as dynamic +configuration, service discovery, locking, leader-election, and so on. + +CometBFT is in essence similar software, but with two key differences: + +- It is Byzantine Fault Tolerant, meaning it can only tolerate less than 1/3 + of machines failing, but those failures can include arbitrary behavior - + including hacking and malicious attacks. +- It does not specify a + particular application, like a fancy key-value store. Instead, it + focuses on arbitrary state machine replication, so developers can build + the application logic that's right for them, from key-value store to + cryptocurrency to e-voting platform and beyond. + +### Bitcoin, Ethereum, etc + +[Tendermint consensus algorithm][tendermint-paper], adopted by CometBFT, +emerged in the tradition of cryptocurrencies like Bitcoin, +Ethereum, etc. with the goal of providing a more efficient and secure +consensus algorithm than Bitcoin's Proof of Work. In the early days, +Tendermint consensus-based blockchains had a simple currency built in, and to participate in +consensus, users had to "bond" units of the currency into a security +deposit which could be revoked if they misbehaved -this is what made +Tendermint consensus a Proof-of-Stake algorithm. + +Since then, CometBFT has evolved to be a general purpose blockchain +consensus engine that can host arbitrary application states. That means +it can be used as a plug-and-play replacement for the consensus engines +of other blockchain software. So one can take the current Ethereum code +base, whether in Rust, or Go, or Haskell, and run it as an ABCI +application using CometBFT. Indeed, [we did that with +Ethereum](https://github.com/cosmos/ethermint). And we plan to do +the same for Bitcoin, ZCash, and various other deterministic +applications as well. + +Another example of a cryptocurrency application built on CometBFT is +[the Cosmos network](http://cosmos.network). + +### Other Blockchain Projects + +[Fabric](https://github.com/hyperledger/fabric) takes a similar approach +to CometBFT, but is more opinionated about how the state is managed, +and requires that all application behavior runs in potentially many +docker containers, modules it calls "chaincode". It uses an +implementation of [PBFT](http://pmg.csail.mit.edu/papers/osdi99.pdf). +from a team at IBM that is [augmented to handle potentially +non-deterministic +chaincode](https://drops.dagstuhl.de/opus/volltexte/2017/7093/pdf/LIPIcs-OPODIS-2016-24.pdf). +It is possible to implement this docker-based behavior as an ABCI app in +CometBFT, though extending CometBFT to handle non-determinism +remains for future work. + +[Burrow](https://github.com/hyperledger/burrow) is an implementation of +the Ethereum Virtual Machine and Ethereum transaction mechanics, with +additional features for a name-registry, permissions, and native +contracts, and an alternative blockchain API. It uses CometBFT as its +consensus engine, and provides a particular application state. + +## ABCI Overview + +The [Application BlockChain Interface +(ABCI)](https://github.com/cometbft/cometbft/tree/main/abci) +allows for Byzantine Fault Tolerant replication of applications +written in any programming language. + +### Motivation + +Thus far, all blockchains "stacks" (such as +[Bitcoin](https://github.com/bitcoin/bitcoin)) have had a monolithic +design. That is, each blockchain stack is a single program that handles +all the concerns of a decentralized ledger; this includes P2P +connectivity, the "mempool" broadcasting of transactions, consensus on +the most recent block, account balances, Turing-complete contracts, +user-level permissions, etc. + +Using a monolithic architecture is typically bad practice in computer +science. It makes it difficult to reuse components of the code, and +attempts to do so result in complex maintenance procedures for forks of +the codebase. This is especially true when the codebase is not modular +in design and suffers from "spaghetti code". + +Another problem with monolithic design is that it limits you to the +language of the blockchain stack (or vice versa). In the case of +Ethereum which supports a Turing-complete bytecode virtual-machine, it +limits you to languages that compile down to that bytecode; while the +[list](https://github.com/pirapira/awesome-ethereum-virtual-machine#programming-languages-that-compile-into-evm) +is growing, it is still very limited. + +In contrast, our approach is to decouple the consensus engine and P2P +layers from the details of the state of the particular +blockchain application. We do this by abstracting away the details of +the application to an interface, which is implemented as a socket +protocol. + +### Intro to ABCI + +[CometBFT](https://github.com/cometbft/cometbft), the +"consensus engine", communicates with the application via a socket +protocol that satisfies the ABCI, the CometBFT Socket Protocol. + +To draw an analogy, let's talk about a well-known cryptocurrency, +Bitcoin. Bitcoin is a cryptocurrency blockchain where each node +maintains a fully audited Unspent Transaction Output (UTXO) database. If +one wanted to create a Bitcoin-like system on top of ABCI, CometBFT +would be responsible for + +- Sharing blocks and transactions between nodes +- Establishing a canonical/immutable order of transactions + (the blockchain) + +The application will be responsible for + +- Maintaining the UTXO database +- Validating cryptographic signatures of transactions +- Preventing transactions from spending non-existent transactions +- Allowing clients to query the UTXO database. + +CometBFT is able to decompose the blockchain design by offering a very +simple API (i.e. the ABCI) between the application process and consensus +process. + +The ABCI consists of 3 primary message types that get delivered from the +core to the application. The application replies with corresponding +response messages. + +The messages are specified here: [ABCI Message +Types](https://github.com/cometbft/cometbft/blob/main/proto/tendermint/abci/types.proto). + +The **DeliverTx** message is the work horse of the application. Each +transaction in the blockchain is delivered with this message. The +application needs to validate each transaction received with the +**DeliverTx** message against the current state, application protocol, +and the cryptographic credentials of the transaction. A validated +transaction then needs to update the application state — by binding a +value into a key values store, or by updating the UTXO database, for +instance. + +The **CheckTx** message is similar to **DeliverTx**, but it's only for +validating transactions. CometBFT's mempool first checks the +validity of a transaction with **CheckTx**, and only relays valid +transactions to its peers. For instance, an application may check an +incrementing sequence number in the transaction and return an error upon +**CheckTx** if the sequence number is old. Alternatively, they might use +a capabilities based system that requires capabilities to be renewed +with every transaction. + +The **Commit** message is used to compute a cryptographic commitment to +the current application state, to be placed into the next block header. +This has some handy properties. Inconsistencies in updating that state +will now appear as blockchain forks which catches a whole class of +programming errors. This also simplifies the development of secure +lightweight clients, as Merkle-hash proofs can be verified by checking +against the block hash, and that the block hash is signed by a quorum. + +There can be multiple ABCI socket connections to an application. +CometBFT creates three ABCI connections to the application; one +for the validation of transactions when broadcasting in the mempool, one +for the consensus engine to run block proposals, and one more for +querying the application state. + +It's probably evident that applications designers need to very carefully +design their message handlers to create a blockchain that does anything +useful but this architecture provides a place to start. The diagram +below illustrates the flow of messages via ABCI. + +![abci](../imgs/abci.png) + +## A Note on Determinism + +The logic for blockchain transaction processing must be deterministic. +If the application logic weren't deterministic, consensus would not be +reached among the CometBFT replica nodes. + +Solidity on Ethereum is a great language of choice for blockchain +applications because, among other reasons, it is a completely +deterministic programming language. However, it's also possible to +create deterministic applications using existing popular languages like +Java, C++, Python, or Go, by avoiding +sources of non-determinism such as: + +- random number generators (without deterministic seeding) +- race conditions on threads (or avoiding threads altogether) +- the system clock +- uninitialized memory (in unsafe programming languages like C + or C++) +- [floating point + arithmetic](http://gafferongames.com/networking-for-game-programmers/floating-point-determinism/) +- language features that are random (e.g. map iteration in Go) + +While programmers can avoid non-determinism by being careful, it is also +possible to create a special linter or static analyzer for each language +to check for determinism. In the future we may work with partners to +create such tools. + +## Consensus Overview + +CometBFT adopts [Tendermint consensus][tendermint-paper], +an easy-to-understand, mostly asynchronous, BFT consensus algorithm. +The algorithm follows a simple state machine that looks like this: + +![consensus-logic](../imgs/consensus_logic.png) + +Participants in the algorithm are called **validators**; they take turns +proposing blocks of transactions and voting on them. Blocks are +committed in a chain, with one block at each **height**. A block may +fail to be committed, in which case the algorithm moves to the next +**round**, and a new validator gets to propose a block for that height. +Two stages of voting are required to successfully commit a block; we +call them **pre-vote** and **pre-commit**. + +There is a picture of a couple doing the polka because validators are +doing something like a polka dance. When more than two-thirds of the +validators pre-vote for the same block, we call that a **polka**. Every +pre-commit must be justified by a polka in the same round. +A block is committed when +more than 2/3 of validators pre-commit for the same block in the same +round. + +Validators may fail to commit a block for a number of reasons; the +current proposer may be offline, or the network may be slow. Tendermint consensus +allows them to establish that a validator should be skipped. Validators +wait a small amount of time to receive a complete proposal block from +the proposer before voting to move to the next round. This reliance on a +timeout is what makes Tendermint consensus a weakly synchronous algorithm, rather +than an asynchronous one. However, the rest of the algorithm is +asynchronous, and validators only make progress after hearing from more +than two-thirds of the validator set. A simplifying element of +Tendermint consensus is that it uses the same mechanism to commit a block as it +does to skip to the next round. + +Assuming less than one-third of the validators are Byzantine, Tendermint consensus algorithm +guarantees that safety will never be violated - that is, validators will +never commit conflicting blocks at the same height. To do this it +introduces a few **locking** rules which modulate which paths can be +followed in the flow diagram. Once a validator precommits a block, it is +locked on that block. Then, + +1. it must prevote for the block it is locked on +2. it can only unlock, and precommit for a new block, if there is a + polka for that block in a later round + +## Stake + +In many systems, not all validators will have the same "weight" in the +consensus protocol. Thus, we are not so much interested in one-third or +two-thirds of the validators, but in those proportions of the total +voting power, which may not be uniformly distributed across individual +validators. + +Since CometBFT can replicate arbitrary applications, it is possible to +define a currency, and denominate the voting power in that currency. +When voting power is denominated in a native currency, the system is +often referred to as Proof-of-Stake. Validators can be forced, by logic +in the application, to "bond" their currency holdings in a security +deposit that can be destroyed if they're found to misbehave in the +consensus protocol. This adds an economic element to the security of the +protocol, allowing one to quantify the cost of violating the assumption +that less than one-third of voting power is Byzantine. + +The [Cosmos Network](https://cosmos.network) is designed to use this +Proof-of-Stake mechanism across an array of cryptocurrencies implemented +as ABCI applications. + +[tendermint-paper]: https://arxiv.org/abs/1807.04938 diff --git a/docs/qa/CometBFT-QA-34.md b/docs/qa/CometBFT-QA-34.md new file mode 100644 index 0000000000..d633426407 --- /dev/null +++ b/docs/qa/CometBFT-QA-34.md @@ -0,0 +1,370 @@ +--- +order: 1 +parent: + title: CometBFT QA Results v0.34.x + description: This is a report on the results obtained when running v0.34.x on testnets + order: 3 +--- + +# CometBFT QA Results v0.34.x + +## v0.34.x - From Tendermint Core to CometBFT + +This section reports on the QA process we followed before releasing the first `v0.34.x` version +from our CometBFT repository. + +The changes with respect to the last version of `v0.34.x` +(namely `v0.34.26`, released from the Informal Systems' Tendermint Core fork) +are minimal, and focus on rebranding our fork of Tendermint Core to CometBFT at places +where there is no substantial risk of breaking compatibility +with earlier Tendermint Core versions of `v0.34.x`. + +Indeed, CometBFT versions of `v0.34.x` (`v0.34.27` and subsequent) should fulfill +the following compatibility-related requirements. + +* Operators can easily upgrade a `v0.34.x` version of Tendermint Core to CometBFT. +* Upgrades from Tendermint Core to CometBFT can be uncoordinated for versions of the `v0.34.x` branch. +* Nodes running CometBFT must be interoperable with those running Tendermint Core in the same chain, + as long as all are running a `v0.34.x` version. + +These QA tests focus on the third bullet, whereas the first two bullets are tested using our _e2e tests_. + +It would be prohibitively time consuming to test mixed networks of all combinations of existing `v0.34.x` +versions, combined with the CometBFT release candidate under test. +Therefore our testing focuses on the last Tendermint Core version (`v0.34.26`) and the CometBFT release +candidate under test. + +We run the _200 node test_, but not the _rotating node test_. The effort of running the latter +is not justified given the amount and nature of the changes we are testing with respect to the +full QA cycle run previously on `v0.34.x`. +Since the changes to the system's logic are minimal, we are interested in these performance requirements: + +* The CometBFT release candidate under test performs similarly to Tendermint Core (i.e., the baseline) + * when used at scale (i.e., in a large network of CometBFT nodes) + * when used at scale in a mixed network (i.e., some nodes are running CometBFT + and others are running an older Tendermint Core version) + +Therefore we carry out a complete run of the _200-node test_ on the following networks: + +* A homogeneous 200-node testnet, where all nodes are running the CometBFT release candidate under test. +* A mixed network where 1/2 (99 out of 200) of the nodes are running the CometBFT release candidate under test, + and the rest (101 out of 200) are running Tendermint Core `v0.34.26`. +* A mixed network where 1/3 (66 out of 200) of the nodes are running the CometBFT release candidate under test, + and the rest (134 out of 200) are running Tendermint Core `v0.34.26`. +* A mixed network where 2/3 (133 out of 200) of the nodes are running the CometBFT release candidate under test, + and the rest (67 out of 200) are running Tendermint Core `v0.34.26`. + +## Configuration and Results +In the following sections we provide the results of the _200 node test_. +Each section reports the baseline results (for reference), the homogeneous network scenario (all CometBFT nodes), +and the mixed networks with 1/2, 1/3 and 2/3 of Tendermint Core nodes. + +### Saturation Point + +As the CometBFT release candidate under test has minimal changes +with respect to Tendermint Core `v0.34.26`, other than the rebranding changes, +we can confidently reuse the results from the `v0.34.x` baseline test regarding +the [saturation point](TMCore-QA-34.md#finding-the-saturation-point). + +Therefore, we will simply use a load of (`r=200,c=2`) +(see the explanation [here](TMCore-QA-34.md#finding-the-saturation-point)) on all experiments. + +We also include the baseline results for quick reference and comparison. + +### Experiments + +On each of the three networks, the test consists of 4 experiments, with the goal of +ensuring the data obtained is consistent across experiments. + +On each of the networks, we pick only one representative run to present and discuss the +results. + + +## Examining latencies +For each network the figures plot the four experiments carried out with the network. +We can see that the latencies follow comparable patterns across all experiments. + +Unique identifiers, UUID, for each execution are presented on top of each graph. +We refer to these UUID to indicate to the representative runs. + +### CometBFT Homogeneous network + +![latencies](img34/homogeneous/all_experiments.png) + +### 1/2 Tendermint Core - 1/2 CometBFT + +![latencies](img34/cmt1tm1/all_experiments.png) + +### 1/3 Tendermint Core - 2/3 CometBFT + +![latencies](img34/cmt2tm1/all_experiments.png) + +### 2/3 Tendermint Core - 1/3 CometBFT + +![latencies_all_tm2_3_cmt1_3](img34/v034_200node_tm2cmt1/all_experiments.png) + + +## Prometheus Metrics + +This section reports on the key Prometheus metrics extracted from the following experiments. + +* Baseline results: `v0.34.x`, obtained in October 2022 and reported [here](TMCore-QA-34.md). +* CometBFT homogeneous network: experiment with UUID starting with `be8c`. +* Mixed network, 1/2 Tendermint Core `v0.34.26` and 1/2 running CometBFT: experiment with UUID starting with `04ee`. +* Mixed network, 1/3 Tendermint Core `v0.34.26` and 2/3 running CometBFT: experiment with UUID starting with `fc5e`. +* Mixed network, 2/3 Tendermint Core `v0.34.26` and 1/3 running CometBFT: experiment with UUID starting with `4759`. + +We make explicit comparisons between the baseline and the homogenous setups, but refrain from +commenting on the mixed network experiment unless they show some exceptional results. + +### Mempool Size + +For each reported experiment we show two graphs. +The first shows the evolution over time of the cumulative number of transactions +inside all full nodes' mempools at a given time. + +The second one shows the evolution of the average over all full nodes. + +#### Baseline + +![mempool-cumulative](img34/baseline/mempool_size.png) + +![mempool-avg](img34/baseline/avg_mempool_size.png) + +#### CometBFT Homogeneous network + +The results for the homogeneous network and the baseline are similar in terms of outstanding transactions. + +![mempool-cumulative-homogeneous](img34/homogeneous/mempool_size.png) + +![mempool-avg-homogeneous](img34/homogeneous/avg_mempool_size.png) + +#### 1/2 Tendermint Core - 1/2 CometBFT + +![mempool size](img34/cmt1tm1/mempool_size.png) + +![average mempool size](img34/cmt1tm1/avg_mempool_size.png) + +#### 1/3 Tendermint Core - 2/3 CometBFT + +![mempool size](img34/cmt2tm1/mempool_size.png) + +![average mempool size](img34/cmt2tm1/avg_mempool_size.png) + +#### 2/3 Tendermint Core - 1/3 CometBFT + +![mempool_tm2_3_cmt_1_3](img34/v034_200node_tm2cmt1/mempool_size.png) + +![mempool-avg_tm2_3_cmt_1_3](img34/v034_200node_tm2cmt1/avg_mempool_size.png) + +### Consensus Rounds per Height + +The following graphs show the rounds needed to complete each height and agree on a block. + +A value of `0` shows that only one round was required (with id `0`), and a value of `1` shows that two rounds were required. + +#### Baseline +We can see that round 1 is reached with a certain frequency. + +![rounds](img34/baseline/rounds.png) + +#### CometBFT Homogeneous network + +Most heights finished in round 0, some nodes needed to advance to round 1 at various moments, +and a few nodes even needed to advance to round 2 at one point. +This coincides with the time at which we observed the biggest peak in mempool size +on the corresponding plot, shown above. + +![rounds-homogeneous](img34/homogeneous/rounds.png) + +#### 1/2 Tendermint Core - 1/2 CometBFT + +![peers](img34/cmt1tm1/rounds.png) + +#### 1/3 Tendermint Core - 2/3 CometBFT + +![peers](img34/cmt2tm1/rounds.png) + +#### 2/3 Tendermint Core - 1/3 CometBFT + +![rounds-tm2_3_cmt1_3](img34/v034_200node_tm2cmt1/rounds.png) + +### Peers + +The following plots show how many peers a node had throughtout the experiment. + +The thick red dashed line represents the moving average over a sliding window of 20 seconds. + +#### Baseline + +The following graph shows the that the number of peers was stable throughout the experiment. +Seed nodes typically have a higher number of peers. +The fact that non-seed nodes reach more than 50 peers is due to +[#9548](https://github.com/tendermint/tendermint/issues/9548). + +![peers](img34/baseline/peers.png) + +#### CometBFT Homogeneous network + +The results for the homogeneous network are very similar to the baseline. +The only difference being that the seed nodes seem to loose peers in the middle of the experiment. +However this cannot be attributed to the differences in the code, which are mainly rebranding. + +![peers-homogeneous](img34/homogeneous/peers.png) + +#### 1/2 Tendermint Core - 1/2 CometBFT + +![peers](img34/cmt1tm1/peers.png) + +#### 1/3 Tendermint Core - 2/3 CometBFT + +![peers](img34/cmt2tm1/peers.png) + +#### 2/3 Tendermint Core - 1/3 CometBFT + +As in the homogeneous case, there is some variation in the number of peers for some nodes. +These, however, do not affect the average. + +![peers-tm2_3_cmt1_3](img34/v034_200node_tm2cmt1/peers.png) + +### Blocks Produced per Minute, Transactions Processed per Minute + +The following plot show the rate of block production and the rate of transactions delivered, throughout the experiments. + +In both graphs, rates are calculated over a sliding window of 20 seconds. +The thick red dashed line show the rates' moving averages. + +#### Baseline + +The average number of blocks/minute oscilate between 10 and 40. + +![heights](img34/baseline/block_rate_regular.png) + +The number of transactions/minute tops around 30k. + +![total-txs](img34/baseline/total_txs_rate_regular.png) + + +#### CometBFT Homogeneous network + +The plot showing the block production rate shows that the rate oscillates around 20 blocks/minute, +mostly within the same range as the baseline. + +![heights-homogeneous-rate](img34/homogeneous/block_rate_regular.png) + +The plot showing the transaction rate shows the rate stays around 20000 transactions per minute, +also topping around 30k. + +![txs-homogeneous-rate](img34/homogeneous/total_txs_rate_regular.png) + +#### 1/2 Tendermint Core - 1/2 CometBFT + +![height rate](img34/cmt1tm1/block_rate_regular.png) + +![transaction rate](img34/cmt1tm1/total_txs_rate_regular.png) + +#### 1/3 Tendermint Core - 2/3 CometBFT + +![height rate](img34/cmt2tm1/block_rate_regular.png) + +![transaction rate](img34/cmt2tm1/total_txs_rate_regular.png) + +#### 2/3 Tendermint Core - 1/3 CometBFT + +![height rate](img34/v034_200node_tm2cmt1/block_rate_regular.png) + +![transaction rate](img34/v034_200node_tm2cmt1/total_txs_rate_regular.png) + +### Memory Resident Set Size + +The following graphs show the Resident Set Size (RSS) of all monitored processes and the average value. + +#### Baseline + +![rss](img34/baseline/memory.png) + +![rss-avg](img34/baseline/avg_memory.png) + +#### CometBFT Homogeneous network + +This is the plot for the homogeneous network, which is slightly more stable than the baseline over +the time of the experiment. + +![rss-homogeneous](img34/homogeneous/memory.png) + +And this is the average plot. It oscillates around 560 MiB, which is noticeably lower than the baseline. + +![rss-avg-homogeneous](img34/homogeneous/avg_memory.png) + +#### 1/2 Tendermint Core - 1/2 CometBFT + +![rss](img34/cmt1tm1/memory.png) + +![rss average](img34/cmt1tm1/avg_memory.png) + +#### 1/3 Tendermint Core - 2/3 CometBFT + +![rss](img34/cmt2tm1/memory.png) + +![rss average](img34/cmt2tm1/avg_memory.png) + +#### 2/3 Tendermint Core - 1/3 CometBFT + +![rss](img34/v034_200node_tm2cmt1/memory.png) + +![rss average](img34/v034_200node_tm2cmt1/avg_memory.png) + +### CPU utilization + +The following graphs show the `load1` of nodes, as typically shown in the first line of the Unix `top` +command, and their average value. + +#### Baseline + +![load1](img34/baseline/cpu.png) + +![load1-avg](img34/baseline/avg_cpu.png) + +#### CometBFT Homogeneous network + +The load in the homogenous network is, similarly to the baseline case, below 5 and, therefore, normal. + +![load1-homogeneous](img34/homogeneous/cpu.png) + +As expected, the average plot also looks similar. + +![load1-homogeneous-avg](img34/homogeneous/avg_cpu.png) + +#### 1/2 Tendermint Core - 1/2 CometBFT + +![load1](img34/cmt1tm1/cpu.png) + +![average load1](img34/cmt1tm1/avg_cpu.png) + +#### 1/3 Tendermint Core - 2/3 CometBFT + +![load1](img34/cmt2tm1/cpu.png) + +![average load1](img34/cmt2tm1/avg_cpu.png) + +#### 2/3 Tendermint Core - 1/3 CometBFT + +![load1](img34/v034_200node_tm2cmt1/cpu.png) + +![average load1](img34/v034_200node_tm2cmt1/avg_cpu.png) + +## Test Results + +The comparison of the baseline results and the homogeneous case show that both scenarios had similar numbers and are therefore equivalent. + +The mixed nodes cases show that networks operate normally with a mix of compatible Tendermint Core and CometBFT versions. +Although not the main goal, a comparison of metric numbers with the homogenous case and the baseline scenarios show similar results and therefore we can conclude that mixing compatible Tendermint Core and CometBFT introduces not performance degradation. + +A conclusion of these tests is shown in the following table, along with the commit versions used in the experiments. + +| Scenario | Date | Version | Result | +|--|--|--|--| +|CometBFT Homogeneous network | 2023-02-08 | 3b783434f26b0e87994e6a77c5411927aad9ce3f | Pass +|1/2 Tendermint Core
1/2 CometBFT | 2023-02-14 | CometBFT: 3b783434f26b0e87994e6a77c5411927aad9ce3f
Tendermint Core: 66c2cb63416e66bff08e11f9088e21a0ed142790 | Pass| +|1/3 Tendermint Core
2/3 CometBFT | 2023-02-08 | CometBFT: 3b783434f26b0e87994e6a77c5411927aad9ce3f
Tendermint Core: 66c2cb63416e66bff08e11f9088e21a0ed142790 | Pass| +|2/3 Tendermint Core
1/3 CometBFT | 2023-02-08 | CometBFT: 3b783434f26b0e87994e6a77c5411927aad9ce3f
Tendermint Core: 66c2cb63416e66bff08e11f9088e21a0ed142790 | Pass | diff --git a/docs/qa/README.md b/docs/qa/README.md index c5f278cf3f..e4068920d1 100644 --- a/docs/qa/README.md +++ b/docs/qa/README.md @@ -19,5 +19,5 @@ used to decide if a release is passing the Quality Assurance process. The results obtained in each release are stored in their own directory. The following releases have undergone the Quality Assurance process, and the corresponding reports include detailed information on tests and comparison with the baseline. -* [TM v0.34.x](./v034/TMCore.md) - Tested prior to releasing Tendermint Core v0.34.22. -* [v0.34.x](./v034/README.md) - Tested prior to releasing v0.34.27, using TM v0.34.x results as baseline. \ No newline at end of file +* [TM v0.34.x](TMCore-QA-34.md) - Tested prior to releasing Tendermint Core v0.34.22. +* [v0.34.x](CometBFT-QA-34.md) - Tested prior to releasing v0.34.27, using TM v0.34.x results as baseline. diff --git a/docs/qa/TMCore-QA-34.md b/docs/qa/TMCore-QA-34.md new file mode 100644 index 0000000000..e5764611c0 --- /dev/null +++ b/docs/qa/TMCore-QA-34.md @@ -0,0 +1,277 @@ +--- +order: 1 +parent: + title: Tendermint Core QA Results v0.34.x + description: This is a report on the results obtained when running v0.34.x on testnets + order: 2 +--- + +# Tendermint Core QA Results v0.34.x + +## 200 Node Testnet + +### Finding the Saturation Point + +The first goal when examining the results of the tests is identifying the saturation point. +The saturation point is a setup with a transaction load big enough to prevent the testnet +from being stable: the load runner tries to produce slightly more transactions than can +be processed by the testnet. + +The following table summarizes the results for v0.34.x, for the different experiments +(extracted from file [`v034_report_tabbed.txt`](img34/v034_report_tabbed.txt)). + +The X axis of this table is `c`, the number of connections created by the load runner process to the target node. +The Y axis of this table is `r`, the rate or number of transactions issued per second. + +| | c=1 | c=2 | c=4 | +| :--- | ----: | ----: | ----: | +| r=25 | 2225 | 4450 | 8900 | +| r=50 | 4450 | 8900 | 17800 | +| r=100 | 8900 | 17800 | 35600 | +| r=200 | 17800 | 35600 | 38660 | + +The table shows the number of 1024-byte-long transactions that were produced by the load runner, +and processed by Tendermint Core, during the 90 seconds of the experiment's duration. +Each cell in the table refers to an experiment with a particular number of websocket connections (`c`) +to a chosen validator, and the number of transactions per second that the load runner +tries to produce (`r`). Note that the overall load that the tool attempts to generate is $c \cdot r$. + +We can see that the saturation point is beyond the diagonal that spans cells + +* `r=200,c=2` +* `r=100,c=4` + +given that the total number of transactions should be close to the product rate X the number of connections x experiment time. + +All experiments below the saturation diagonal (`r=200,c=4`) have in common that the total +number of transactions processed is noticeably less than the product $c \cdot r \cdot 89$ (89 seconds, since the last batch never gets sent), +which is the expected number of transactions when the system is able to deal well with the +load. +With (`r=200,c=4`), we obtained 38660 whereas the theoretical number of transactions should +have been $200 \cdot 4 \cdot 89 = 71200$. + +At this point, we chose an experiment at the limit of the saturation diagonal, +in order to further study the performance of this release. +**The chosen experiment is (`r=200,c=2`)**. + +This is a plot of the CPU load (average over 1 minute, as output by `top`) of the load runner for (`r=200,c=2`), +where we can see that the load stays close to 0 most of the time. + +![load-load-runner](img34/v034_r200c2_load-runner.png) + +### Examining latencies + +The method described [here](method.md) allows us to plot the latencies of transactions +for all experiments. + +![all-latencies](img34/v034_200node_latencies.png) + +As we can see, even the experiments beyond the saturation diagonal managed to keep +transaction latency stable (i.e. not constantly increasing). +Our interpretation for this is that contention within Tendermint Core was propagated, +via the websockets, to the load runner, +hence the load runner could not produce the target load, but a fraction of it. + +Further examination of the Prometheus data (see below), showed that the mempool contained many transactions +at steady state, but did not grow much without quickly returning to this steady state. This demonstrates +that Tendermint Core network was able to process transactions at least as quickly as they +were submitted to the mempool. Finally, the test script made sure that, at the end of an experiment, the +mempool was empty so that all transactions submitted to the chain were processed. + +Finally, the number of points present in the plot appears to be much less than expected given the +number of transactions in each experiment, particularly close to or above the saturation diagonal. +This is a visual effect of the plot; what appear to be points in the plot are actually potentially huge +clusters of points. To corroborate this, we have zoomed in the plot above by setting (carefully chosen) +tiny axis intervals. The cluster shown below looks like a single point in the plot above. + +![all-latencies-zoomed](img34/v034_200node_latencies_zoomed.png) + +The plot of latencies can we used as a baseline to compare with other releases. + +The following plot summarizes average latencies versus overall throughput +across different numbers of WebSocket connections to the node into which +transactions are being loaded. + +![latency-vs-throughput](img34/v034_latency_throughput.png) + +### Prometheus Metrics on the Chosen Experiment + +As mentioned [above](#finding-the-saturation-point), the chosen experiment is `r=200,c=2`. +This section further examines key metrics for this experiment extracted from Prometheus data. + +#### Mempool Size + +The mempool size, a count of the number of transactions in the mempool, was shown to be stable and homogeneous +at all full nodes. It did not exhibit any unconstrained growth. +The plot below shows the evolution over time of the cumulative number of transactions inside all full nodes' mempools +at a given time. +The two spikes that can be observed correspond to a period where consensus instances proceeded beyond the initial round +at some nodes. + +![mempool-cumulative](img34/v034_r200c2_mempool_size.png) + +The plot below shows evolution of the average over all full nodes, which oscillates between 1500 and 2000 +outstanding transactions. + +![mempool-avg](img34/v034_r200c2_mempool_size_avg.png) + +The peaks observed coincide with the moments when some nodes proceeded beyond the initial round of consensus (see below). + +#### Peers + +The number of peers was stable at all nodes. +It was higher for the seed nodes (around 140) than for the rest (between 21 and 74). +The fact that non-seed nodes reach more than 50 peers is due to #9548. + +![peers](img34/v034_r200c2_peers.png) + +#### Consensus Rounds per Height + +Most nodes used only round 0 for most heights, but some nodes needed to advance to round 1 for some heights. + +![rounds](img34/v034_r200c2_rounds.png) + +#### Blocks Produced per Minute, Transactions Processed per Minute + +The blocks produced per minute are the slope of this plot. + +![heights](img34/v034_r200c2_heights.png) + +Over a period of 2 minutes, the height goes from 530 to 569. +This results in an average of 19.5 blocks produced per minute. + +The transactions processed per minute are the slope of this plot. + +![total-txs](img34/v034_r200c2_total-txs.png) + +Over a period of 2 minutes, the total goes from 64525 to 100125 transactions, +resulting in 17800 transactions per minute. However, we can see in the plot that +all transactions in the load are processed long before the two minutes. +If we adjust the time window when transactions are processed (approx. 105 seconds), +we obtain 20343 transactions per minute. + +#### Memory Resident Set Size + +Resident Set Size of all monitored processes is plotted below. + +![rss](img34/v034_r200c2_rss.png) + +The average over all processes oscillates around 1.2 GiB and does not demonstrate unconstrained growth. + +![rss-avg](img34/v034_r200c2_rss_avg.png) + +#### CPU utilization + +The best metric from Prometheus to gauge CPU utilization in a Unix machine is `load1`, +as it usually appears in the +[output of `top`](https://www.digitalocean.com/community/tutorials/load-average-in-linux). + +![load1](img34/v034_r200c2_load1.png) + +It is contained in most cases below 5, which is generally considered acceptable load. + +### Test Result + +**Result: N/A** (v0.34.x is the baseline) + +Date: 2022-10-14 + +Version: 3ec6e424d6ae4c96867c2dcf8310572156068bb6 + +## Rotating Node Testnet + +For this testnet, we will use a load that can safely be considered below the saturation +point for the size of this testnet (between 13 and 38 full nodes): `c=4,r=800`. + +N.B.: The version of CometBFT used for these tests is affected by #9539. +However, the reduced load that reaches the mempools is orthogonal to functionality +we are focusing on here. + +### Latencies + +The plot of all latencies can be seen in the following plot. + +![rotating-all-latencies](img34/v034_rotating_latencies.png) + +We can observe there are some very high latencies, towards the end of the test. +Upon suspicion that they are duplicate transactions, we examined the latencies +raw file and discovered there are more than 100K duplicate transactions. + +The following plot shows the latencies file where all duplicate transactions have +been removed, i.e., only the first occurrence of a duplicate transaction is kept. + +![rotating-all-latencies-uniq](img34/v034_rotating_latencies_uniq.png) + +This problem, existing in `v0.34.x`, will need to be addressed, perhaps in the same way +we addressed it when running the 200 node test with high loads: increasing the `cache_size` +configuration parameter. + +### Prometheus Metrics + +The set of metrics shown here are less than for the 200 node experiment. +We are only interested in those for which the catch-up process (blocksync) may have an impact. + +#### Blocks and Transactions per minute + +Just as shown for the 200 node test, the blocks produced per minute are the gradient of this plot. + +![rotating-heights](img34/v034_rotating_heights.png) + +Over a period of 5229 seconds, the height goes from 2 to 3638. +This results in an average of 41 blocks produced per minute. + +The following plot shows only the heights reported by ephemeral nodes +(which are also included in the plot above). Note that the _height_ metric +is only showed _once the node has switched to consensus_, hence the gaps +when nodes are killed, wiped out, started from scratch, and catching up. + +![rotating-heights-ephe](img34/v034_rotating_heights_ephe.png) + +The transactions processed per minute are the gradient of this plot. + +![rotating-total-txs](img34/v034_rotating_total-txs.png) + +The small lines we see periodically close to `y=0` are the transactions that +ephemeral nodes start processing when they are caught up. + +Over a period of 5229 minutes, the total goes from 0 to 387697 transactions, +resulting in 4449 transactions per minute. We can see some abrupt changes in +the plot's gradient. This will need to be investigated. + +#### Peers + +The plot below shows the evolution in peers throughout the experiment. +The periodic changes observed are due to the ephemeral nodes being stopped, +wiped out, and recreated. + +![rotating-peers](img34/v034_rotating_peers.png) + +The validators' plots are concentrated at the higher part of the graph, whereas the ephemeral nodes +are mostly at the lower part. + +#### Memory Resident Set Size + +The average Resident Set Size (RSS) over all processes seems stable, and slightly growing toward the end. +This might be related to the increased in transaction load observed above. + +![rotating-rss-avg](img34/v034_rotating_rss_avg.png) + +The memory taken by the validators and the ephemeral nodes (when they are up) is comparable. + +#### CPU utilization + +The plot shows metric `load1` for all nodes. + +![rotating-load1](img34/v034_rotating_load1.png) + +It is contained under 5 most of the time, which is considered normal load. +The purple line, which follows a different pattern is the validator receiving all +transactions, via RPC, from the load runner process. + +### Test Result + +**Result: N/A** + +Date: 2022-10-10 + +Version: a28c987f5a604ff66b515dd415270063e6fb069d diff --git a/docs/qa/img34/baseline/avg_cpu.png b/docs/qa/img34/baseline/avg_cpu.png new file mode 100644 index 0000000000..622456df64 Binary files /dev/null and b/docs/qa/img34/baseline/avg_cpu.png differ diff --git a/docs/qa/img34/baseline/avg_memory.png b/docs/qa/img34/baseline/avg_memory.png new file mode 100644 index 0000000000..55f213f5e1 Binary files /dev/null and b/docs/qa/img34/baseline/avg_memory.png differ diff --git a/docs/qa/img34/baseline/avg_mempool_size.png b/docs/qa/img34/baseline/avg_mempool_size.png new file mode 100644 index 0000000000..ec74072950 Binary files /dev/null and b/docs/qa/img34/baseline/avg_mempool_size.png differ diff --git a/docs/qa/img34/baseline/block_rate_regular.png b/docs/qa/img34/baseline/block_rate_regular.png new file mode 100644 index 0000000000..bdc7aa28d7 Binary files /dev/null and b/docs/qa/img34/baseline/block_rate_regular.png differ diff --git a/docs/qa/img34/baseline/cpu.png b/docs/qa/img34/baseline/cpu.png new file mode 100644 index 0000000000..ac4fc2695f Binary files /dev/null and b/docs/qa/img34/baseline/cpu.png differ diff --git a/docs/qa/img34/baseline/memory.png b/docs/qa/img34/baseline/memory.png new file mode 100644 index 0000000000..17336bd1b9 Binary files /dev/null and b/docs/qa/img34/baseline/memory.png differ diff --git a/docs/qa/img34/baseline/mempool_size.png b/docs/qa/img34/baseline/mempool_size.png new file mode 100644 index 0000000000..fafba68c1a Binary files /dev/null and b/docs/qa/img34/baseline/mempool_size.png differ diff --git a/docs/qa/img34/baseline/peers.png b/docs/qa/img34/baseline/peers.png new file mode 100644 index 0000000000..05a288a356 Binary files /dev/null and b/docs/qa/img34/baseline/peers.png differ diff --git a/docs/qa/img34/baseline/rounds.png b/docs/qa/img34/baseline/rounds.png new file mode 100644 index 0000000000..79f3348a25 Binary files /dev/null and b/docs/qa/img34/baseline/rounds.png differ diff --git a/docs/qa/img34/baseline/total_txs_rate_regular.png b/docs/qa/img34/baseline/total_txs_rate_regular.png new file mode 100644 index 0000000000..d80bef12c0 Binary files /dev/null and b/docs/qa/img34/baseline/total_txs_rate_regular.png differ diff --git a/docs/qa/img34/cmt1tm1/all_experiments.png b/docs/qa/img34/cmt1tm1/all_experiments.png new file mode 100644 index 0000000000..4dc857edca Binary files /dev/null and b/docs/qa/img34/cmt1tm1/all_experiments.png differ diff --git a/docs/qa/img34/cmt1tm1/avg_cpu.png b/docs/qa/img34/cmt1tm1/avg_cpu.png new file mode 100644 index 0000000000..cabd273a55 Binary files /dev/null and b/docs/qa/img34/cmt1tm1/avg_cpu.png differ diff --git a/docs/qa/img34/cmt1tm1/avg_memory.png b/docs/qa/img34/cmt1tm1/avg_memory.png new file mode 100644 index 0000000000..c8e5761772 Binary files /dev/null and b/docs/qa/img34/cmt1tm1/avg_memory.png differ diff --git a/docs/qa/img34/cmt1tm1/avg_mempool_size.png b/docs/qa/img34/cmt1tm1/avg_mempool_size.png new file mode 100644 index 0000000000..b41199dc00 Binary files /dev/null and b/docs/qa/img34/cmt1tm1/avg_mempool_size.png differ diff --git a/docs/qa/img34/cmt1tm1/block_rate_regular.png b/docs/qa/img34/cmt1tm1/block_rate_regular.png new file mode 100644 index 0000000000..9b3a0b8276 Binary files /dev/null and b/docs/qa/img34/cmt1tm1/block_rate_regular.png differ diff --git a/docs/qa/img34/cmt1tm1/cpu.png b/docs/qa/img34/cmt1tm1/cpu.png new file mode 100644 index 0000000000..cd5acdeb29 Binary files /dev/null and b/docs/qa/img34/cmt1tm1/cpu.png differ diff --git a/docs/qa/img34/cmt1tm1/memory.png b/docs/qa/img34/cmt1tm1/memory.png new file mode 100644 index 0000000000..6f56b3ccf1 Binary files /dev/null and b/docs/qa/img34/cmt1tm1/memory.png differ diff --git a/docs/qa/img34/cmt1tm1/mempool_size.png b/docs/qa/img34/cmt1tm1/mempool_size.png new file mode 100644 index 0000000000..862a0bdd49 Binary files /dev/null and b/docs/qa/img34/cmt1tm1/mempool_size.png differ diff --git a/docs/qa/img34/cmt1tm1/peers.png b/docs/qa/img34/cmt1tm1/peers.png new file mode 100644 index 0000000000..737cf3dffb Binary files /dev/null and b/docs/qa/img34/cmt1tm1/peers.png differ diff --git a/docs/qa/img34/cmt1tm1/rounds.png b/docs/qa/img34/cmt1tm1/rounds.png new file mode 100644 index 0000000000..17884813af Binary files /dev/null and b/docs/qa/img34/cmt1tm1/rounds.png differ diff --git a/docs/qa/img34/cmt1tm1/total_txs_rate_regular.png b/docs/qa/img34/cmt1tm1/total_txs_rate_regular.png new file mode 100644 index 0000000000..8b0cc0d426 Binary files /dev/null and b/docs/qa/img34/cmt1tm1/total_txs_rate_regular.png differ diff --git a/docs/qa/img34/cmt2tm1/all_experiments.png b/docs/qa/img34/cmt2tm1/all_experiments.png new file mode 100644 index 0000000000..4e6f73d355 Binary files /dev/null and b/docs/qa/img34/cmt2tm1/all_experiments.png differ diff --git a/docs/qa/img34/cmt2tm1/avg_cpu.png b/docs/qa/img34/cmt2tm1/avg_cpu.png new file mode 100644 index 0000000000..92fea31bd1 Binary files /dev/null and b/docs/qa/img34/cmt2tm1/avg_cpu.png differ diff --git a/docs/qa/img34/cmt2tm1/avg_memory.png b/docs/qa/img34/cmt2tm1/avg_memory.png new file mode 100644 index 0000000000..f362798d8f Binary files /dev/null and b/docs/qa/img34/cmt2tm1/avg_memory.png differ diff --git a/docs/qa/img34/cmt2tm1/avg_mempool_size.png b/docs/qa/img34/cmt2tm1/avg_mempool_size.png new file mode 100644 index 0000000000..b73e577b75 Binary files /dev/null and b/docs/qa/img34/cmt2tm1/avg_mempool_size.png differ diff --git a/docs/qa/img34/cmt2tm1/block_rate_regular.png b/docs/qa/img34/cmt2tm1/block_rate_regular.png new file mode 100644 index 0000000000..5fc7a5560b Binary files /dev/null and b/docs/qa/img34/cmt2tm1/block_rate_regular.png differ diff --git a/docs/qa/img34/cmt2tm1/cpu.png b/docs/qa/img34/cmt2tm1/cpu.png new file mode 100644 index 0000000000..15df58abbe Binary files /dev/null and b/docs/qa/img34/cmt2tm1/cpu.png differ diff --git a/docs/qa/img34/cmt2tm1/memory.png b/docs/qa/img34/cmt2tm1/memory.png new file mode 100644 index 0000000000..b0feab1074 Binary files /dev/null and b/docs/qa/img34/cmt2tm1/memory.png differ diff --git a/docs/qa/img34/cmt2tm1/mempool_size.png b/docs/qa/img34/cmt2tm1/mempool_size.png new file mode 100644 index 0000000000..b3a1514f92 Binary files /dev/null and b/docs/qa/img34/cmt2tm1/mempool_size.png differ diff --git a/docs/qa/img34/cmt2tm1/peers.png b/docs/qa/img34/cmt2tm1/peers.png new file mode 100644 index 0000000000..558d4c129e Binary files /dev/null and b/docs/qa/img34/cmt2tm1/peers.png differ diff --git a/docs/qa/img34/cmt2tm1/rounds.png b/docs/qa/img34/cmt2tm1/rounds.png new file mode 100644 index 0000000000..3c22a5cf30 Binary files /dev/null and b/docs/qa/img34/cmt2tm1/rounds.png differ diff --git a/docs/qa/img34/cmt2tm1/total_txs_rate_regular.png b/docs/qa/img34/cmt2tm1/total_txs_rate_regular.png new file mode 100644 index 0000000000..ae98df2176 Binary files /dev/null and b/docs/qa/img34/cmt2tm1/total_txs_rate_regular.png differ diff --git a/docs/qa/img34/homogeneous/all_experiments.png b/docs/qa/img34/homogeneous/all_experiments.png new file mode 100644 index 0000000000..d8768f6a5d Binary files /dev/null and b/docs/qa/img34/homogeneous/all_experiments.png differ diff --git a/docs/qa/img34/homogeneous/avg_cpu.png b/docs/qa/img34/homogeneous/avg_cpu.png new file mode 100644 index 0000000000..7df188951f Binary files /dev/null and b/docs/qa/img34/homogeneous/avg_cpu.png differ diff --git a/docs/qa/img34/homogeneous/avg_memory.png b/docs/qa/img34/homogeneous/avg_memory.png new file mode 100644 index 0000000000..e800cbce22 Binary files /dev/null and b/docs/qa/img34/homogeneous/avg_memory.png differ diff --git a/docs/qa/img34/homogeneous/avg_mempool_size.png b/docs/qa/img34/homogeneous/avg_mempool_size.png new file mode 100644 index 0000000000..beb323e646 Binary files /dev/null and b/docs/qa/img34/homogeneous/avg_mempool_size.png differ diff --git a/docs/qa/img34/homogeneous/block_rate_regular.png b/docs/qa/img34/homogeneous/block_rate_regular.png new file mode 100644 index 0000000000..2a71ab70df Binary files /dev/null and b/docs/qa/img34/homogeneous/block_rate_regular.png differ diff --git a/docs/qa/img34/homogeneous/cpu.png b/docs/qa/img34/homogeneous/cpu.png new file mode 100644 index 0000000000..8e8c9227af Binary files /dev/null and b/docs/qa/img34/homogeneous/cpu.png differ diff --git a/docs/qa/img34/homogeneous/memory.png b/docs/qa/img34/homogeneous/memory.png new file mode 100644 index 0000000000..190c622a34 Binary files /dev/null and b/docs/qa/img34/homogeneous/memory.png differ diff --git a/docs/qa/img34/homogeneous/mempool_size.png b/docs/qa/img34/homogeneous/mempool_size.png new file mode 100644 index 0000000000..ec1c79a242 Binary files /dev/null and b/docs/qa/img34/homogeneous/mempool_size.png differ diff --git a/docs/qa/img34/homogeneous/peers.png b/docs/qa/img34/homogeneous/peers.png new file mode 100644 index 0000000000..3c8b0a2e0d Binary files /dev/null and b/docs/qa/img34/homogeneous/peers.png differ diff --git a/docs/qa/img34/homogeneous/rounds.png b/docs/qa/img34/homogeneous/rounds.png new file mode 100644 index 0000000000..660f31d939 Binary files /dev/null and b/docs/qa/img34/homogeneous/rounds.png differ diff --git a/docs/qa/img34/homogeneous/total_txs_rate_regular.png b/docs/qa/img34/homogeneous/total_txs_rate_regular.png new file mode 100644 index 0000000000..a9025b6665 Binary files /dev/null and b/docs/qa/img34/homogeneous/total_txs_rate_regular.png differ diff --git a/docs/qa/img34/v034_200node_latencies.png b/docs/qa/img34/v034_200node_latencies.png new file mode 100644 index 0000000000..afd1060caf Binary files /dev/null and b/docs/qa/img34/v034_200node_latencies.png differ diff --git a/docs/qa/img34/v034_200node_latencies_zoomed.png b/docs/qa/img34/v034_200node_latencies_zoomed.png new file mode 100644 index 0000000000..1ff9364422 Binary files /dev/null and b/docs/qa/img34/v034_200node_latencies_zoomed.png differ diff --git a/docs/qa/img34/v034_200node_tm2cmt1/all_experiments.png b/docs/qa/img34/v034_200node_tm2cmt1/all_experiments.png new file mode 100644 index 0000000000..e91a87effd Binary files /dev/null and b/docs/qa/img34/v034_200node_tm2cmt1/all_experiments.png differ diff --git a/docs/qa/img34/v034_200node_tm2cmt1/avg_cpu.png b/docs/qa/img34/v034_200node_tm2cmt1/avg_cpu.png new file mode 100644 index 0000000000..a1b0ef79e4 Binary files /dev/null and b/docs/qa/img34/v034_200node_tm2cmt1/avg_cpu.png differ diff --git a/docs/qa/img34/v034_200node_tm2cmt1/avg_memory.png b/docs/qa/img34/v034_200node_tm2cmt1/avg_memory.png new file mode 100644 index 0000000000..f9d9b99334 Binary files /dev/null and b/docs/qa/img34/v034_200node_tm2cmt1/avg_memory.png differ diff --git a/docs/qa/img34/v034_200node_tm2cmt1/avg_mempool_size.png b/docs/qa/img34/v034_200node_tm2cmt1/avg_mempool_size.png new file mode 100644 index 0000000000..c2b896060a Binary files /dev/null and b/docs/qa/img34/v034_200node_tm2cmt1/avg_mempool_size.png differ diff --git a/docs/qa/img34/v034_200node_tm2cmt1/block_rate_regular.png b/docs/qa/img34/v034_200node_tm2cmt1/block_rate_regular.png new file mode 100644 index 0000000000..5a5417bdf3 Binary files /dev/null and b/docs/qa/img34/v034_200node_tm2cmt1/block_rate_regular.png differ diff --git a/docs/qa/img34/v034_200node_tm2cmt1/c2r200_merged.png b/docs/qa/img34/v034_200node_tm2cmt1/c2r200_merged.png new file mode 100644 index 0000000000..45de9ce72d Binary files /dev/null and b/docs/qa/img34/v034_200node_tm2cmt1/c2r200_merged.png differ diff --git a/docs/qa/img34/v034_200node_tm2cmt1/cpu.png b/docs/qa/img34/v034_200node_tm2cmt1/cpu.png new file mode 100644 index 0000000000..eabfa96617 Binary files /dev/null and b/docs/qa/img34/v034_200node_tm2cmt1/cpu.png differ diff --git a/docs/qa/img34/v034_200node_tm2cmt1/memory.png b/docs/qa/img34/v034_200node_tm2cmt1/memory.png new file mode 100644 index 0000000000..70014c1f96 Binary files /dev/null and b/docs/qa/img34/v034_200node_tm2cmt1/memory.png differ diff --git a/docs/qa/img34/v034_200node_tm2cmt1/mempool_size.png b/docs/qa/img34/v034_200node_tm2cmt1/mempool_size.png new file mode 100644 index 0000000000..5f4c44b2a6 Binary files /dev/null and b/docs/qa/img34/v034_200node_tm2cmt1/mempool_size.png differ diff --git a/docs/qa/img34/v034_200node_tm2cmt1/peers.png b/docs/qa/img34/v034_200node_tm2cmt1/peers.png new file mode 100644 index 0000000000..c35c84675c Binary files /dev/null and b/docs/qa/img34/v034_200node_tm2cmt1/peers.png differ diff --git a/docs/qa/img34/v034_200node_tm2cmt1/rounds.png b/docs/qa/img34/v034_200node_tm2cmt1/rounds.png new file mode 100644 index 0000000000..7d1034bcbc Binary files /dev/null and b/docs/qa/img34/v034_200node_tm2cmt1/rounds.png differ diff --git a/docs/qa/img34/v034_200node_tm2cmt1/total_txs_rate_regular.png b/docs/qa/img34/v034_200node_tm2cmt1/total_txs_rate_regular.png new file mode 100644 index 0000000000..2e8a40af6a Binary files /dev/null and b/docs/qa/img34/v034_200node_tm2cmt1/total_txs_rate_regular.png differ diff --git a/docs/qa/img34/v034_latency_throughput.png b/docs/qa/img34/v034_latency_throughput.png new file mode 100644 index 0000000000..3674fe47b4 Binary files /dev/null and b/docs/qa/img34/v034_latency_throughput.png differ diff --git a/docs/qa/img34/v034_r200c2_heights.png b/docs/qa/img34/v034_r200c2_heights.png new file mode 100644 index 0000000000..11f3bba432 Binary files /dev/null and b/docs/qa/img34/v034_r200c2_heights.png differ diff --git a/docs/qa/img34/v034_r200c2_load-runner.png b/docs/qa/img34/v034_r200c2_load-runner.png new file mode 100644 index 0000000000..70211b0d21 Binary files /dev/null and b/docs/qa/img34/v034_r200c2_load-runner.png differ diff --git a/docs/qa/img34/v034_r200c2_load1.png b/docs/qa/img34/v034_r200c2_load1.png new file mode 100644 index 0000000000..11012844dc Binary files /dev/null and b/docs/qa/img34/v034_r200c2_load1.png differ diff --git a/docs/qa/img34/v034_r200c2_mempool_size.png b/docs/qa/img34/v034_r200c2_mempool_size.png new file mode 100644 index 0000000000..c5d690200a Binary files /dev/null and b/docs/qa/img34/v034_r200c2_mempool_size.png differ diff --git a/docs/qa/img34/v034_r200c2_mempool_size_avg.png b/docs/qa/img34/v034_r200c2_mempool_size_avg.png new file mode 100644 index 0000000000..bda399fe5d Binary files /dev/null and b/docs/qa/img34/v034_r200c2_mempool_size_avg.png differ diff --git a/docs/qa/img34/v034_r200c2_peers.png b/docs/qa/img34/v034_r200c2_peers.png new file mode 100644 index 0000000000..a0aea7ada3 Binary files /dev/null and b/docs/qa/img34/v034_r200c2_peers.png differ diff --git a/docs/qa/img34/v034_r200c2_rounds.png b/docs/qa/img34/v034_r200c2_rounds.png new file mode 100644 index 0000000000..215be100de Binary files /dev/null and b/docs/qa/img34/v034_r200c2_rounds.png differ diff --git a/docs/qa/img34/v034_r200c2_rss.png b/docs/qa/img34/v034_r200c2_rss.png new file mode 100644 index 0000000000..6d14dced0b Binary files /dev/null and b/docs/qa/img34/v034_r200c2_rss.png differ diff --git a/docs/qa/img34/v034_r200c2_rss_avg.png b/docs/qa/img34/v034_r200c2_rss_avg.png new file mode 100644 index 0000000000..8dec67da29 Binary files /dev/null and b/docs/qa/img34/v034_r200c2_rss_avg.png differ diff --git a/docs/qa/img34/v034_r200c2_total-txs.png b/docs/qa/img34/v034_r200c2_total-txs.png new file mode 100644 index 0000000000..177d5f1c31 Binary files /dev/null and b/docs/qa/img34/v034_r200c2_total-txs.png differ diff --git a/docs/qa/img34/v034_report_tabbed.txt b/docs/qa/img34/v034_report_tabbed.txt new file mode 100644 index 0000000000..2514954743 --- /dev/null +++ b/docs/qa/img34/v034_report_tabbed.txt @@ -0,0 +1,52 @@ +Experiment ID: 3d5cf4ef-1a1a-4b46-aa2d-da5643d2e81e │Experiment ID: 80e472ec-13a1-4772-a827-3b0c907fb51d │Experiment ID: 07aca6cf-c5a4-4696-988f-e3270fc6333b + │ │ + Connections: 1 │ Connections: 2 │ Connections: 4 + Rate: 25 │ Rate: 25 │ Rate: 25 + Size: 1024 │ Size: 1024 │ Size: 1024 + │ │ + Total Valid Tx: 2225 │ Total Valid Tx: 4450 │ Total Valid Tx: 8900 + Total Negative Latencies: 0 │ Total Negative Latencies: 0 │ Total Negative Latencies: 0 + Minimum Latency: 599.404362ms │ Minimum Latency: 448.145181ms │ Minimum Latency: 412.485729ms + Maximum Latency: 3.539686885s │ Maximum Latency: 3.237392049s │ Maximum Latency: 12.026665368s + Average Latency: 1.441485349s │ Average Latency: 1.441267946s │ Average Latency: 2.150192457s + Standard Deviation: 541.049869ms │ Standard Deviation: 525.040007ms │ Standard Deviation: 2.233852478s + │ │ +Experiment ID: 953dc544-dd40-40e8-8712-20c34c3ce45e │Experiment ID: d31fc258-16e7-45cd-9dc8-13ab87bc0b0a │Experiment ID: 15d90a7e-b941-42f4-b411-2f15f857739e + │ │ + Connections: 1 │ Connections: 2 │ Connections: 4 + Rate: 50 │ Rate: 50 │ Rate: 50 + Size: 1024 │ Size: 1024 │ Size: 1024 + │ │ + Total Valid Tx: 4450 │ Total Valid Tx: 8900 │ Total Valid Tx: 17800 + Total Negative Latencies: 0 │ Total Negative Latencies: 0 │ Total Negative Latencies: 0 + Minimum Latency: 482.046942ms │ Minimum Latency: 435.458913ms │ Minimum Latency: 510.746448ms + Maximum Latency: 3.761483455s │ Maximum Latency: 7.175583584s │ Maximum Latency: 6.551497882s + Average Latency: 1.450408183s │ Average Latency: 1.681673116s │ Average Latency: 1.738083875s + Standard Deviation: 587.560056ms │ Standard Deviation: 1.147902047s │ Standard Deviation: 943.46522ms + │ │ +Experiment ID: 9a0b9980-9ce6-4db5-a80a-65ca70294b87 │Experiment ID: df8fa4f4-80af-4ded-8a28-356d15018b43 │Experiment ID: d0e41c2c-89c0-4f38-8e34-ca07adae593a + │ │ + Connections: 1 │ Connections: 2 │ Connections: 4 + Rate: 100 │ Rate: 100 │ Rate: 100 + Size: 1024 │ Size: 1024 │ Size: 1024 + │ │ + Total Valid Tx: 8900 │ Total Valid Tx: 17800 │ Total Valid Tx: 35600 + Total Negative Latencies: 0 │ Total Negative Latencies: 0 │ Total Negative Latencies: 0 + Minimum Latency: 477.417219ms │ Minimum Latency: 564.29247ms │ Minimum Latency: 840.71089ms + Maximum Latency: 6.63744785s │ Maximum Latency: 6.988553219s │ Maximum Latency: 9.555312398s + Average Latency: 1.561216103s │ Average Latency: 1.76419063s │ Average Latency: 3.200941683s + Standard Deviation: 1.011333552s │ Standard Deviation: 1.068459423s │ Standard Deviation: 1.732346601s + │ │ +Experiment ID: 493df3ee-4a36-4bce-80f8-6d65da66beda │Experiment ID: 13060525-f04f-46f6-8ade-286684b2fe50 │Experiment ID: 1777cbd2-8c96-42e4-9ec7-9b21f2225e4d + │ │ + Connections: 1 │ Connections: 2 │ Connections: 4 + Rate: 200 │ Rate: 200 │ Rate: 200 + Size: 1024 │ Size: 1024 │ Size: 1024 + │ │ + Total Valid Tx: 17800 │ Total Valid Tx: 35600 │ Total Valid Tx: 38660 + Total Negative Latencies: 0 │ Total Negative Latencies: 0 │ Total Negative Latencies: 0 + Minimum Latency: 493.705261ms │ Minimum Latency: 955.090573ms │ Minimum Latency: 1.9485821s + Maximum Latency: 7.440921872s │ Maximum Latency: 10.086673491s │ Maximum Latency: 17.73103976s + Average Latency: 1.875510582s │ Average Latency: 3.438130099s │ Average Latency: 8.143862237s + Standard Deviation: 1.304336995s │ Standard Deviation: 1.966391574s │ Standard Deviation: 3.943140002s + diff --git a/docs/qa/img34/v034_rotating_heights.png b/docs/qa/img34/v034_rotating_heights.png new file mode 100644 index 0000000000..47913c282f Binary files /dev/null and b/docs/qa/img34/v034_rotating_heights.png differ diff --git a/docs/qa/img34/v034_rotating_heights_ephe.png b/docs/qa/img34/v034_rotating_heights_ephe.png new file mode 100644 index 0000000000..981b93d6c4 Binary files /dev/null and b/docs/qa/img34/v034_rotating_heights_ephe.png differ diff --git a/docs/qa/img34/v034_rotating_latencies.png b/docs/qa/img34/v034_rotating_latencies.png new file mode 100644 index 0000000000..f0a54ed5b6 Binary files /dev/null and b/docs/qa/img34/v034_rotating_latencies.png differ diff --git a/docs/qa/img34/v034_rotating_latencies_uniq.png b/docs/qa/img34/v034_rotating_latencies_uniq.png new file mode 100644 index 0000000000..e5d694a16e Binary files /dev/null and b/docs/qa/img34/v034_rotating_latencies_uniq.png differ diff --git a/docs/qa/img34/v034_rotating_load1.png b/docs/qa/img34/v034_rotating_load1.png new file mode 100644 index 0000000000..e9c385b85e Binary files /dev/null and b/docs/qa/img34/v034_rotating_load1.png differ diff --git a/docs/qa/img34/v034_rotating_peers.png b/docs/qa/img34/v034_rotating_peers.png new file mode 100644 index 0000000000..ab5c8732d3 Binary files /dev/null and b/docs/qa/img34/v034_rotating_peers.png differ diff --git a/docs/qa/img34/v034_rotating_rss_avg.png b/docs/qa/img34/v034_rotating_rss_avg.png new file mode 100644 index 0000000000..9a4167320c Binary files /dev/null and b/docs/qa/img34/v034_rotating_rss_avg.png differ diff --git a/docs/qa/img34/v034_rotating_total-txs.png b/docs/qa/img34/v034_rotating_total-txs.png new file mode 100644 index 0000000000..1ce5f47e9b Binary files /dev/null and b/docs/qa/img34/v034_rotating_total-txs.png differ diff --git a/docs/qa/method.md b/docs/qa/method.md index 5326f935a1..6de0cbcf80 100644 --- a/docs/qa/method.md +++ b/docs/qa/method.md @@ -1,6 +1,8 @@ --- order: 1 -title: Method +parent: + title: Method + order: 1 --- # Method @@ -106,11 +108,11 @@ The CometBFT team should improve it at every iteration to increase the amount of 3. File `report.txt` contains an unordered list of experiments with varying concurrent connections and transaction rate * If you are looking for the saturation point * Create files `report01.txt`, `report02.txt`, `report04.txt` and, for each experiment in file `report.txt`, - copy its related lines to the filename that matches the number of connections, for example + copy its related lines to the filename that matches the number of connections, for example ```bash for cnum in 1 2 3 4; do echo "$cnum"; grep "Connections: $cnum" results/report.txt -B 2 -A 10 > results/report$cnum.txt; done ``` - + * Sort the experiments in `report01.txt` in ascending tx rate order. Likewise for `report02.txt` and `report04.txt`. * Otherwise just keep `report.txt`, and skip step 4. 4. Generate file `report_tabbed.txt` by showing the contents `report01.txt`, `report02.txt`, `report04.txt` side by side @@ -229,7 +231,7 @@ This section explains how the tests were carried out for reproducibility purpose 7. On a different shell, * run `make runload ROTATE_CONNECTIONS=X ROTATE_TX_RATE=Y` * `X` and `Y` should reflect a load below the saturation point (see, e.g., - [this paragraph](./v034/README.md#finding-the-saturation-point) for further info) + [this paragraph](CometBFT-QA-34.md#finding-the-saturation-point) for further info) 8. Run `make rotate` to start the script that creates the ephemeral nodes, and kills them when they are caught up. * WARNING: If you run this command from your laptop, the laptop needs to be up and connected for full length of the experiment. diff --git a/docs/tutorials/java.md b/docs/tutorials/java.md deleted file mode 100644 index 526d3a2c68..0000000000 --- a/docs/tutorials/java.md +++ /dev/null @@ -1,630 +0,0 @@ - - -# Creating an application in Java - -## Guide Assumptions - -This guide is designed for beginners who want to get started with a Tendermint -Core application from scratch. It does not assume that you have any prior -experience with Tendermint Core. - -Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state -transition machine (your application) - written in any programming language - and securely -replicates it on many machines. - -By following along with this guide, you'll create a Tendermint Core project -called kvstore, a (very) simple distributed BFT key-value store. The application (which should -implementing the blockchain interface (ABCI)) will be written in Java. - -This guide assumes that you are not new to JVM world. If you are new please see [JVM Minimal Survival Guide](https://hadihariri.com/2013/12/29/jvm-minimal-survival-guide-for-the-dotnet-developer/#java-the-language-java-the-ecosystem-java-the-jvm) and [Gradle Docs](https://docs.gradle.org/current/userguide/userguide.html). - -## Built-in app vs external app - -If you use Golang, you can run your app and Tendermint Core in the same process to get maximum performance. -[Cosmos SDK](https://github.com/cosmos/cosmos-sdk) is written this way. -Please refer to [Writing a built-in Tendermint Core application in Go](./go-built-in.md) guide for details. - -If you choose another language, like we did in this guide, you have to write a separate app, -which will communicate with Tendermint Core via a socket (UNIX or TCP) or gRPC. -This guide will show you how to build external application using RPC server. - -Having a separate application might give you better security guarantees as two -processes would be communicating via established binary protocol. Tendermint -Core will not have access to application's state. - -## 1.1 Installing Java and Gradle - -Please refer to [the Oracle's guide for installing JDK](https://www.oracle.com/technetwork/java/javase/downloads/index.html). - -Verify that you have installed Java successfully: - -```bash -$ java -version -java version "12.0.2" 2019-07-16 -Java(TM) SE Runtime Environment (build 12.0.2+10) -Java HotSpot(TM) 64-Bit Server VM (build 12.0.2+10, mixed mode, sharing) -``` - -You can choose any version of Java higher or equal to 8. -This guide is written using Java SE Development Kit 12. - -Make sure you have `$JAVA_HOME` environment variable set: - -```bash -$ echo $JAVA_HOME -/Library/Java/JavaVirtualMachines/jdk-12.0.2.jdk/Contents/Home -``` - -For Gradle installation, please refer to [their official guide](https://gradle.org/install/). - -## 1.2 Creating a new Java project - -We'll start by creating a new Gradle project. - -```bash -export KVSTORE_HOME=~/kvstore -mkdir $KVSTORE_HOME -cd $KVSTORE_HOME -``` - -Inside the example directory run: - -```bash -gradle init --dsl groovy --package io.example --project-name example --type java-application --test-framework junit -``` - -This will create a new project for you. The tree of files should look like: - -```bash -$ tree -. -|-- build.gradle -|-- gradle -| `-- wrapper -| |-- gradle-wrapper.jar -| `-- gradle-wrapper.properties -|-- gradlew -|-- gradlew.bat -|-- settings.gradle -`-- src - |-- main - | |-- java - | | `-- io - | | `-- example - | | `-- App.java - | `-- resources - `-- test - |-- java - | `-- io - | `-- example - | `-- AppTest.java - `-- resources -``` - -When run, this should print "Hello world." to the standard output. - -```bash -$ ./gradlew run -> Task :run -Hello world. -``` - -## 1.3 Writing a Tendermint Core application - -Tendermint Core communicates with the application through the Application -BlockChain Interface (ABCI). All message types are defined in the [protobuf -file](https://github.com/tendermint/tendermint/blob/v0.34.x/proto/tendermint/abci/types.proto). -This allows Tendermint Core to run applications written in any programming -language. - -### 1.3.1 Compile .proto files - -Add the following piece to the top of the `build.gradle`: - -```groovy -buildscript { - repositories { - mavenCentral() - } - dependencies { - classpath 'com.google.protobuf:protobuf-gradle-plugin:0.8.8' - } -} -``` - -Enable the protobuf plugin in the `plugins` section of the `build.gradle`: - -```groovy -plugins { - id 'com.google.protobuf' version '0.8.8' -} -``` - -Add the following code to `build.gradle`: - -```groovy -protobuf { - protoc { - artifact = "com.google.protobuf:protoc:3.7.1" - } - plugins { - grpc { - artifact = 'io.grpc:protoc-gen-grpc-java:1.22.1' - } - } - generateProtoTasks { - all()*.plugins { - grpc {} - } - } -} -``` - -Now we should be ready to compile the `*.proto` files. - -Copy the necessary `.proto` files to your project: - -```bash -mkdir -p \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/abci \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/version \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/types \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/crypto \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/libs \ - $KVSTORE_HOME/src/main/proto/github.com/gogo/protobuf/gogoproto - -cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/abci/types.proto \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/abci/types.proto -cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/version/version.proto \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/version/version.proto -cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/types/types.proto \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/types/types.proto -cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/types/evidence.proto \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/types/evidence.proto -cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/types/params.proto \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/types/params.proto -cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/crypto/merkle.proto \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/crypto/merkle.proto -cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/crypto/keys.proto \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/crypto/keys.proto -cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/libs/types.proto \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/libs/types.proto -cp $GOPATH/src/github.com/gogo/protobuf/gogoproto/gogo.proto \ - $KVSTORE_HOME/src/main/proto/github.com/gogo/protobuf/gogoproto/gogo.proto -``` - -Add these dependencies to `build.gradle`: - -```groovy -dependencies { - implementation 'io.grpc:grpc-protobuf:1.22.1' - implementation 'io.grpc:grpc-netty-shaded:1.22.1' - implementation 'io.grpc:grpc-stub:1.22.1' -} -``` - -To generate all protobuf-type classes run: - -```bash -./gradlew generateProto -``` - -To verify that everything went smoothly, you can inspect the `build/generated/` directory: - -```bash -$ tree build/generated/ -build/generated/ -|-- source -| `-- proto -| `-- main -| |-- grpc -| | `-- types -| | `-- ABCIApplicationGrpc.java -| `-- java -| |-- com -| | `-- google -| | `-- protobuf -| | `-- GoGoProtos.java -| |-- common -| | `-- Types.java -| |-- merkle -| | `-- Merkle.java -| `-- types -| `-- Types.java -``` - -### 1.3.2 Implementing ABCI - -The resulting `$KVSTORE_HOME/build/generated/source/proto/main/grpc/types/ABCIApplicationGrpc.java` file -contains the abstract class `ABCIApplicationImplBase`, which is an interface we'll need to implement. - -Create `$KVSTORE_HOME/src/main/java/io/example/KVStoreApp.java` file with the following content: - -```java -package io.example; - -import io.grpc.stub.StreamObserver; -import types.ABCIApplicationGrpc; -import types.Types.*; - -class KVStoreApp extends ABCIApplicationGrpc.ABCIApplicationImplBase { - - // methods implementation - -} -``` - -Now I will go through each method of `ABCIApplicationImplBase` explaining when it's called and adding -required business logic. - -### 1.3.3 CheckTx - -When a new transaction is added to the Tendermint Core, it will ask the -application to check it (validate the format, signatures, etc.). - -```java -@Override -public void checkTx(RequestCheckTx req, StreamObserver responseObserver) { - var tx = req.getTx(); - int code = validate(tx); - var resp = ResponseCheckTx.newBuilder() - .setCode(code) - .setGasWanted(1) - .build(); - responseObserver.onNext(resp); - responseObserver.onCompleted(); -} - -private int validate(ByteString tx) { - List parts = split(tx, '='); - if (parts.size() != 2) { - return 1; - } - byte[] key = parts.get(0); - byte[] value = parts.get(1); - - // check if the same key=value already exists - var stored = getPersistedValue(key); - if (stored != null && Arrays.equals(stored, value)) { - return 2; - } - - return 0; -} - -private List split(ByteString tx, char separator) { - var arr = tx.toByteArray(); - int i; - for (i = 0; i < tx.size(); i++) { - if (arr[i] == (byte)separator) { - break; - } - } - if (i == tx.size()) { - return Collections.emptyList(); - } - return List.of( - tx.substring(0, i).toByteArray(), - tx.substring(i + 1).toByteArray() - ); -} -``` - -Don't worry if this does not compile yet. - -If the transaction does not have a form of `{bytes}={bytes}`, we return `1` -code. When the same key=value already exist (same key and value), we return `2` -code. For others, we return a zero code indicating that they are valid. - -Note that anything with non-zero code will be considered invalid (`-1`, `100`, -etc.) by Tendermint Core. - -Valid transactions will eventually be committed given they are not too big and -have enough gas. To learn more about gas, check out ["the -specification"](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/abci/apps.md#gas). - -For the underlying key-value store we'll use -[JetBrains Xodus](https://github.com/JetBrains/xodus), which is a transactional schema-less embedded high-performance database written in Java. - -`build.gradle`: - -```groovy -dependencies { - implementation 'org.jetbrains.xodus:xodus-environment:1.3.91' -} -``` - -```java -... -import jetbrains.exodus.ArrayByteIterable; -import jetbrains.exodus.ByteIterable; -import jetbrains.exodus.env.Environment; -import jetbrains.exodus.env.Store; -import jetbrains.exodus.env.StoreConfig; -import jetbrains.exodus.env.Transaction; - -class KVStoreApp extends ABCIApplicationGrpc.ABCIApplicationImplBase { - private Environment env; - private Transaction txn = null; - private Store store = null; - - KVStoreApp(Environment env) { - this.env = env; - } - - ... - - private byte[] getPersistedValue(byte[] k) { - return env.computeInReadonlyTransaction(txn -> { - var store = env.openStore("store", StoreConfig.WITHOUT_DUPLICATES, txn); - ByteIterable byteIterable = store.get(txn, new ArrayByteIterable(k)); - if (byteIterable == null) { - return null; - } - return byteIterable.getBytesUnsafe(); - }); - } -} -``` - -### 1.3.4 BeginBlock -> DeliverTx -> EndBlock -> Commit - -When Tendermint Core has decided on the block, it's transferred to the -application in 3 parts: `BeginBlock`, one `DeliverTx` per transaction and -`EndBlock` in the end. `DeliverTx` are being transferred asynchronously, but the -responses are expected to come in order. - -```java -@Override -public void beginBlock(RequestBeginBlock req, StreamObserver responseObserver) { - txn = env.beginTransaction(); - store = env.openStore("store", StoreConfig.WITHOUT_DUPLICATES, txn); - var resp = ResponseBeginBlock.newBuilder().build(); - responseObserver.onNext(resp); - responseObserver.onCompleted(); -} -``` - -Here we begin a new transaction, which will accumulate the block's transactions and open the corresponding store. - -```java -@Override -public void deliverTx(RequestDeliverTx req, StreamObserver responseObserver) { - var tx = req.getTx(); - int code = validate(tx); - if (code == 0) { - List parts = split(tx, '='); - var key = new ArrayByteIterable(parts.get(0)); - var value = new ArrayByteIterable(parts.get(1)); - store.put(txn, key, value); - } - var resp = ResponseDeliverTx.newBuilder() - .setCode(code) - .build(); - responseObserver.onNext(resp); - responseObserver.onCompleted(); -} -``` - -If the transaction is badly formatted or the same key=value already exist, we -again return the non-zero code. Otherwise, we add it to the store. - -In the current design, a block can include incorrect transactions (those who -passed `CheckTx`, but failed `DeliverTx` or transactions included by the proposer -directly). This is done for performance reasons. - -Note we can't commit transactions inside the `DeliverTx` because in such case -`Query`, which may be called in parallel, will return inconsistent data (i.e. -it will report that some value already exist even when the actual block was not -yet committed). - -`Commit` instructs the application to persist the new state. - -```java -@Override -public void commit(RequestCommit req, StreamObserver responseObserver) { - txn.commit(); - var resp = ResponseCommit.newBuilder() - .setData(ByteString.copyFrom(new byte[8])) - .build(); - responseObserver.onNext(resp); - responseObserver.onCompleted(); -} -``` - -### 1.3.5 Query - -Now, when the client wants to know whenever a particular key/value exist, it -will call Tendermint Core RPC `/abci_query` endpoint, which in turn will call -the application's `Query` method. - -Applications are free to provide their own APIs. But by using Tendermint Core -as a proxy, clients (including [light client -package](https://godoc.org/github.com/tendermint/tendermint/light)) can leverage -the unified API across different applications. Plus they won't have to call the -otherwise separate Tendermint Core API for additional proofs. - -Note we don't include a proof here. - -```java -@Override -public void query(RequestQuery req, StreamObserver responseObserver) { - var k = req.getData().toByteArray(); - var v = getPersistedValue(k); - var builder = ResponseQuery.newBuilder(); - if (v == null) { - builder.setLog("does not exist"); - } else { - builder.setLog("exists"); - builder.setKey(ByteString.copyFrom(k)); - builder.setValue(ByteString.copyFrom(v)); - } - responseObserver.onNext(builder.build()); - responseObserver.onCompleted(); -} -``` - -The complete specification can be found -[here](https://github.com/tendermint/tendermint/tree/v0.34.x/spec/abci/). - -## 1.4 Starting an application and a Tendermint Core instances - -Put the following code into the `$KVSTORE_HOME/src/main/java/io/example/App.java` file: - -```java -package io.example; - -import jetbrains.exodus.env.Environment; -import jetbrains.exodus.env.Environments; - -import java.io.IOException; - -public class App { - public static void main(String[] args) throws IOException, InterruptedException { - try (Environment env = Environments.newInstance("tmp/storage")) { - var app = new KVStoreApp(env); - var server = new GrpcServer(app, 26658); - server.start(); - server.blockUntilShutdown(); - } - } -} -``` - -It is the entry point of the application. -Here we create a special object `Environment`, which knows where to store the application state. -Then we create and start the gRPC server to handle Tendermint Core requests. - -Create the `$KVSTORE_HOME/src/main/java/io/example/GrpcServer.java` file with the following content: - -```java -package io.example; - -import io.grpc.BindableService; -import io.grpc.Server; -import io.grpc.ServerBuilder; - -import java.io.IOException; - -class GrpcServer { - private Server server; - - GrpcServer(BindableService service, int port) { - this.server = ServerBuilder.forPort(port) - .addService(service) - .build(); - } - - void start() throws IOException { - server.start(); - System.out.println("gRPC server started, listening on $port"); - Runtime.getRuntime().addShutdownHook(new Thread(() -> { - System.out.println("shutting down gRPC server since JVM is shutting down"); - GrpcServer.this.stop(); - System.out.println("server shut down"); - })); - } - - private void stop() { - server.shutdown(); - } - - /** - * Await termination on the main thread since the grpc library uses daemon threads. - */ - void blockUntilShutdown() throws InterruptedException { - server.awaitTermination(); - } -} -``` - -## 1.5 Getting Up and Running - -To create a default configuration, nodeKey and private validator files, let's -execute `tendermint init`. But before we do that, we will need to install -Tendermint Core. - -```bash -$ rm -rf /tmp/example -$ cd $GOPATH/src/github.com/tendermint/tendermint -$ make install -$ TMHOME="/tmp/example" tendermint init - -I[2019-07-16|18:20:36.480] Generated private validator module=main keyFile=/tmp/example/config/priv_validator_key.json stateFile=/tmp/example2/data/priv_validator_state.json -I[2019-07-16|18:20:36.481] Generated node key module=main path=/tmp/example/config/node_key.json -I[2019-07-16|18:20:36.482] Generated genesis file module=main path=/tmp/example/config/genesis.json -``` - -Feel free to explore the generated files, which can be found at -`/tmp/example/config` directory. Documentation on the config can be found -[here](https://docs.tendermint.com/v0.34/tendermint-core/configuration.html). - -We are ready to start our application: - -```bash -./gradlew run - -gRPC server started, listening on 26658 -``` - -Then we need to start Tendermint Core and point it to our application. Staying -within the application directory execute: - -```bash -$ TMHOME="/tmp/example" tendermint node --abci grpc --proxy_app tcp://127.0.0.1:26658 - -I[2019-07-28|15:44:53.632] Version info module=main software=0.32.1 block=10 p2p=7 -I[2019-07-28|15:44:53.677] Starting Node module=main impl=Node -I[2019-07-28|15:44:53.681] Started node module=main nodeInfo="{ProtocolVersion:{P2P:7 Block:10 App:0} ID_:7639e2841ccd47d5ae0f5aad3011b14049d3f452 ListenAddr:tcp://0.0.0.0:26656 Network:test-chain-Nhl3zk Version:0.32.1 Channels:4020212223303800 Moniker:Ivans-MacBook-Pro.local Other:{TxIndex:on RPCAddress:tcp://127.0.0.1:26657}}" -I[2019-07-28|15:44:54.801] Executed block module=state height=8 validTxs=0 invalidTxs=0 -I[2019-07-28|15:44:54.814] Committed state module=state height=8 txs=0 appHash=0000000000000000 -``` - -Now open another tab in your terminal and try sending a transaction: - -```bash -$ curl -s 'localhost:26657/broadcast_tx_commit?tx="tendermint=rocks"' -{ - "jsonrpc": "2.0", - "id": "", - "result": { - "check_tx": { - "gasWanted": "1" - }, - "deliver_tx": {}, - "hash": "CDD3C6DFA0A08CAEDF546F9938A2EEC232209C24AA0E4201194E0AFB78A2C2BB", - "height": "33" -} -``` - -Response should contain the height where this transaction was committed. - -Now let's check if the given key now exists and its value: - -```bash -$ curl -s 'localhost:26657/abci_query?data="tendermint"' -{ - "jsonrpc": "2.0", - "id": "", - "result": { - "response": { - "log": "exists", - "key": "dGVuZGVybWludA==", - "value": "cm9ja3My" - } - } -} -``` - -`dGVuZGVybWludA==` and `cm9ja3M=` are the base64-encoding of the ASCII of `tendermint` and `rocks` accordingly. - -## Outro - -I hope everything went smoothly and your first, but hopefully not the last, -Tendermint Core application is up and running. If not, please [open an issue on -Github](https://github.com/tendermint/tendermint/issues/new/choose). To dig -deeper, read [the docs](https://docs.tendermint.com/v0.34/). - -The full source code of this example project can be found [here](https://github.com/climber73/tendermint-abci-grpc-java). diff --git a/docs/tutorials/kotlin.md b/docs/tutorials/kotlin.md deleted file mode 100644 index c311e9d71e..0000000000 --- a/docs/tutorials/kotlin.md +++ /dev/null @@ -1,604 +0,0 @@ - - -# Creating an application in Kotlin - -## Guide Assumptions - -This guide is designed for beginners who want to get started with a Tendermint -Core application from scratch. It does not assume that you have any prior -experience with Tendermint Core. - -Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state -transition machine (your application) - written in any programming language - and securely -replicates it on many machines. - -By following along with this guide, you'll create a Tendermint Core project -called kvstore, a (very) simple distributed BFT key-value store. The application (which should -implementing the blockchain interface (ABCI)) will be written in Kotlin. - -This guide assumes that you are not new to JVM world. If you are new please see [JVM Minimal Survival Guide](https://hadihariri.com/2013/12/29/jvm-minimal-survival-guide-for-the-dotnet-developer/#java-the-language-java-the-ecosystem-java-the-jvm) and [Gradle Docs](https://docs.gradle.org/current/userguide/userguide.html). - -## Built-in app vs external app - -If you use Golang, you can run your app and Tendermint Core in the same process to get maximum performance. -[Cosmos SDK](https://github.com/cosmos/cosmos-sdk) is written this way. -Please refer to [Writing a built-in Tendermint Core application in Go](./go-built-in.md) guide for details. - -If you choose another language, like we did in this guide, you have to write a separate app, -which will communicate with Tendermint Core via a socket (UNIX or TCP) or gRPC. -This guide will show you how to build external application using RPC server. - -Having a separate application might give you better security guarantees as two -processes would be communicating via established binary protocol. Tendermint -Core will not have access to application's state. - -## 1.1 Installing Java and Gradle - -Please refer to [the Oracle's guide for installing JDK](https://www.oracle.com/technetwork/java/javase/downloads/index.html). - -Verify that you have installed Java successfully: - -```bash -java -version -java version "1.8.0_162" -Java(TM) SE Runtime Environment (build 1.8.0_162-b12) -Java HotSpot(TM) 64-Bit Server VM (build 25.162-b12, mixed mode) -``` - -You can choose any version of Java higher or equal to 8. -In my case it is Java SE Development Kit 8. - -Make sure you have `$JAVA_HOME` environment variable set: - -```bash -echo $JAVA_HOME -/Library/Java/JavaVirtualMachines/jdk1.8.0_162.jdk/Contents/Home -``` - -For Gradle installation, please refer to [their official guide](https://gradle.org/install/). - -## 1.2 Creating a new Kotlin project - -We'll start by creating a new Gradle project. - -```bash -export KVSTORE_HOME=~/kvstore -mkdir $KVSTORE_HOME -cd $KVSTORE_HOME -``` - -Inside the example directory run: - -```bash -gradle init --dsl groovy --package io.example --project-name example --type kotlin-application -``` - -This will create a new project for you. The tree of files should look like: - -```bash -tree -. -|-- build.gradle -|-- gradle -| `-- wrapper -| |-- gradle-wrapper.jar -| `-- gradle-wrapper.properties -|-- gradlew -|-- gradlew.bat -|-- settings.gradle -`-- src - |-- main - | |-- kotlin - | | `-- io - | | `-- example - | | `-- App.kt - | `-- resources - `-- test - |-- kotlin - | `-- io - | `-- example - | `-- AppTest.kt - `-- resources -``` - -When run, this should print "Hello world." to the standard output. - -```bash -./gradlew run -> Task :run -Hello world. -``` - -## 1.3 Writing a Tendermint Core application - -Tendermint Core communicates with the application through the Application -BlockChain Interface (ABCI). All message types are defined in the [protobuf -file](https://github.com/tendermint/tendermint/blob/v0.34.x/proto/tendermint/abci/types.proto). -This allows Tendermint Core to run applications written in any programming -language. - -### 1.3.1 Compile .proto files - -Add the following piece to the top of the `build.gradle`: - -```groovy -buildscript { - repositories { - mavenCentral() - } - dependencies { - classpath 'com.google.protobuf:protobuf-gradle-plugin:0.8.8' - } -} -``` - -Enable the protobuf plugin in the `plugins` section of the `build.gradle`: - -```groovy -plugins { - id 'com.google.protobuf' version '0.8.8' -} -``` - -Add the following code to `build.gradle`: - -```groovy -protobuf { - protoc { - artifact = "com.google.protobuf:protoc:3.7.1" - } - plugins { - grpc { - artifact = 'io.grpc:protoc-gen-grpc-java:1.22.1' - } - } - generateProtoTasks { - all()*.plugins { - grpc {} - } - } -} -``` - -Now we should be ready to compile the `*.proto` files. - -Copy the necessary `.proto` files to your project: - -```bash -mkdir -p \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/abci \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/version \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/types \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/crypto \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/libs \ - $KVSTORE_HOME/src/main/proto/github.com/gogo/protobuf/gogoproto - -cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/abci/types.proto \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/abci/types.proto -cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/version/version.proto \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/version/version.proto -cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/types/types.proto \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/types/types.proto -cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/types/evidence.proto \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/types/evidence.proto -cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/types/params.proto \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/types/params.proto -cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/crypto/merkle.proto \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/crypto/merkle.proto -cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/crypto/keys.proto \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/crypto/keys.proto -cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/libs/types.proto \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/libs/types.proto -cp $GOPATH/src/github.com/gogo/protobuf/gogoproto/gogo.proto \ - $KVSTORE_HOME/src/main/proto/github.com/gogo/protobuf/gogoproto/gogo.proto -``` - -Add these dependencies to `build.gradle`: - -```groovy -dependencies { - implementation 'io.grpc:grpc-protobuf:1.22.1' - implementation 'io.grpc:grpc-netty-shaded:1.22.1' - implementation 'io.grpc:grpc-stub:1.22.1' -} -``` - -To generate all protobuf-type classes run: - -```bash -./gradlew generateProto -``` - -To verify that everything went smoothly, you can inspect the `build/generated/` directory: - -```bash -tree build/generated/ -build/generated/ -`-- source - `-- proto - `-- main - |-- grpc - | `-- types - | `-- ABCIApplicationGrpc.java - `-- java - |-- com - | `-- google - | `-- protobuf - | `-- GoGoProtos.java - |-- common - | `-- Types.java - |-- merkle - | `-- Merkle.java - `-- types - `-- Types.java -``` - -### 1.3.2 Implementing ABCI - -The resulting `$KVSTORE_HOME/build/generated/source/proto/main/grpc/types/ABCIApplicationGrpc.java` file -contains the abstract class `ABCIApplicationImplBase`, which is an interface we'll need to implement. - -Create `$KVSTORE_HOME/src/main/kotlin/io/example/KVStoreApp.kt` file with the following content: - -```kotlin -package io.example - -import io.grpc.stub.StreamObserver -import types.ABCIApplicationGrpc -import types.Types.* - -class KVStoreApp : ABCIApplicationGrpc.ABCIApplicationImplBase() { - - // methods implementation - -} -``` - -Now I will go through each method of `ABCIApplicationImplBase` explaining when it's called and adding -required business logic. - -### 1.3.3 CheckTx - -When a new transaction is added to the Tendermint Core, it will ask the -application to check it (validate the format, signatures, etc.). - -```kotlin -override fun checkTx(req: RequestCheckTx, responseObserver: StreamObserver) { - val code = req.tx.validate() - val resp = ResponseCheckTx.newBuilder() - .setCode(code) - .setGasWanted(1) - .build() - responseObserver.onNext(resp) - responseObserver.onCompleted() -} - -private fun ByteString.validate(): Int { - val parts = this.split('=') - if (parts.size != 2) { - return 1 - } - val key = parts[0] - val value = parts[1] - - // check if the same key=value already exists - val stored = getPersistedValue(key) - if (stored != null && stored.contentEquals(value)) { - return 2 - } - - return 0 -} - -private fun ByteString.split(separator: Char): List { - val arr = this.toByteArray() - val i = (0 until this.size()).firstOrNull { arr[it] == separator.toByte() } - ?: return emptyList() - return listOf( - this.substring(0, i).toByteArray(), - this.substring(i + 1).toByteArray() - ) -} -``` - -Don't worry if this does not compile yet. - -If the transaction does not have a form of `{bytes}={bytes}`, we return `1` -code. When the same key=value already exist (same key and value), we return `2` -code. For others, we return a zero code indicating that they are valid. - -Note that anything with non-zero code will be considered invalid (`-1`, `100`, -etc.) by Tendermint Core. - -Valid transactions will eventually be committed given they are not too big and -have enough gas. To learn more about gas, check out ["the -specification"](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/abci/apps.md#gas). - -For the underlying key-value store we'll use -[JetBrains Xodus](https://github.com/JetBrains/xodus), which is a transactional schema-less embedded high-performance database written in Java. - -`build.gradle`: - -```groovy -dependencies { - implementation 'org.jetbrains.xodus:xodus-environment:1.3.91' -} -``` - -```kotlin -... -import jetbrains.exodus.ArrayByteIterable -import jetbrains.exodus.env.Environment -import jetbrains.exodus.env.Store -import jetbrains.exodus.env.StoreConfig -import jetbrains.exodus.env.Transaction - -class KVStoreApp( - private val env: Environment -) : ABCIApplicationGrpc.ABCIApplicationImplBase() { - - private var txn: Transaction? = null - private var store: Store? = null - - ... - - private fun getPersistedValue(k: ByteArray): ByteArray? { - return env.computeInReadonlyTransaction { txn -> - val store = env.openStore("store", StoreConfig.WITHOUT_DUPLICATES, txn) - store.get(txn, ArrayByteIterable(k))?.bytesUnsafe - } - } -} -``` - -### 1.3.4 BeginBlock -> DeliverTx -> EndBlock -> Commit - -When Tendermint Core has decided on the block, it's transferred to the -application in 3 parts: `BeginBlock`, one `DeliverTx` per transaction and -`EndBlock` in the end. `DeliverTx` are being transferred asynchronously, but the -responses are expected to come in order. - -```kotlin -override fun beginBlock(req: RequestBeginBlock, responseObserver: StreamObserver) { - txn = env.beginTransaction() - store = env.openStore("store", StoreConfig.WITHOUT_DUPLICATES, txn!!) - val resp = ResponseBeginBlock.newBuilder().build() - responseObserver.onNext(resp) - responseObserver.onCompleted() -} -``` - -Here we begin a new transaction, which will accumulate the block's transactions and open the corresponding store. - -```kotlin -override fun deliverTx(req: RequestDeliverTx, responseObserver: StreamObserver) { - val code = req.tx.validate() - if (code == 0) { - val parts = req.tx.split('=') - val key = ArrayByteIterable(parts[0]) - val value = ArrayByteIterable(parts[1]) - store!!.put(txn!!, key, value) - } - val resp = ResponseDeliverTx.newBuilder() - .setCode(code) - .build() - responseObserver.onNext(resp) - responseObserver.onCompleted() -} -``` - -If the transaction is badly formatted or the same key=value already exist, we -again return the non-zero code. Otherwise, we add it to the store. - -In the current design, a block can include incorrect transactions (those who -passed `CheckTx`, but failed `DeliverTx` or transactions included by the proposer -directly). This is done for performance reasons. - -Note we can't commit transactions inside the `DeliverTx` because in such case -`Query`, which may be called in parallel, will return inconsistent data (i.e. -it will report that some value already exist even when the actual block was not -yet committed). - -`Commit` instructs the application to persist the new state. - -```kotlin -override fun commit(req: RequestCommit, responseObserver: StreamObserver) { - txn!!.commit() - val resp = ResponseCommit.newBuilder() - .setData(ByteString.copyFrom(ByteArray(8))) - .build() - responseObserver.onNext(resp) - responseObserver.onCompleted() -} -``` - -### 1.3.5 Query - -Now, when the client wants to know whenever a particular key/value exist, it -will call Tendermint Core RPC `/abci_query` endpoint, which in turn will call -the application's `Query` method. - -Applications are free to provide their own APIs. But by using Tendermint Core -as a proxy, clients (including [light client -package](https://godoc.org/github.com/tendermint/tendermint/light)) can leverage -the unified API across different applications. Plus they won't have to call the -otherwise separate Tendermint Core API for additional proofs. - -Note we don't include a proof here. - -```kotlin -override fun query(req: RequestQuery, responseObserver: StreamObserver) { - val k = req.data.toByteArray() - val v = getPersistedValue(k) - val builder = ResponseQuery.newBuilder() - if (v == null) { - builder.log = "does not exist" - } else { - builder.log = "exists" - builder.key = ByteString.copyFrom(k) - builder.value = ByteString.copyFrom(v) - } - responseObserver.onNext(builder.build()) - responseObserver.onCompleted() -} -``` - -The complete specification can be found -[here](https://github.com/tendermint/tendermint/tree/v0.34.x/spec/abci/). - -## 1.4 Starting an application and a Tendermint Core instances - -Put the following code into the `$KVSTORE_HOME/src/main/kotlin/io/example/App.kt` file: - -```kotlin -package io.example - -import jetbrains.exodus.env.Environments - -fun main() { - Environments.newInstance("tmp/storage").use { env -> - val app = KVStoreApp(env) - val server = GrpcServer(app, 26658) - server.start() - server.blockUntilShutdown() - } -} -``` - -It is the entry point of the application. -Here we create a special object `Environment`, which knows where to store the application state. -Then we create and start the gRPC server to handle Tendermint Core requests. - -Create `$KVSTORE_HOME/src/main/kotlin/io/example/GrpcServer.kt` file with the following content: - -```kotlin -package io.example - -import io.grpc.BindableService -import io.grpc.ServerBuilder - -class GrpcServer( - private val service: BindableService, - private val port: Int -) { - private val server = ServerBuilder - .forPort(port) - .addService(service) - .build() - - fun start() { - server.start() - println("gRPC server started, listening on $port") - Runtime.getRuntime().addShutdownHook(object : Thread() { - override fun run() { - println("shutting down gRPC server since JVM is shutting down") - this@GrpcServer.stop() - println("server shut down") - } - }) - } - - fun stop() { - server.shutdown() - } - - /** - * Await termination on the main thread since the grpc library uses daemon threads. - */ - fun blockUntilShutdown() { - server.awaitTermination() - } - -} -``` - -## 1.5 Getting Up and Running - -To create a default configuration, nodeKey and private validator files, let's -execute `tendermint init`. But before we do that, we will need to install -Tendermint Core. - -```bash -rm -rf /tmp/example -cd $GOPATH/src/github.com/tendermint/tendermint -make install -TMHOME="/tmp/example" tendermint init - -I[2019-07-16|18:20:36.480] Generated private validator module=main keyFile=/tmp/example/config/priv_validator_key.json stateFile=/tmp/example2/data/priv_validator_state.json -I[2019-07-16|18:20:36.481] Generated node key module=main path=/tmp/example/config/node_key.json -I[2019-07-16|18:20:36.482] Generated genesis file module=main path=/tmp/example/config/genesis.json -``` - -Feel free to explore the generated files, which can be found at -`/tmp/example/config` directory. Documentation on the config can be found -[here](https://docs.tendermint.com/v0.34/tendermint-core/configuration.html). - -We are ready to start our application: - -```bash -./gradlew run - -gRPC server started, listening on 26658 -``` - -Then we need to start Tendermint Core and point it to our application. Staying -within the application directory execute: - -```bash -TMHOME="/tmp/example" tendermint node --abci grpc --proxy_app tcp://127.0.0.1:26658 - -I[2019-07-28|15:44:53.632] Version info module=main software=0.32.1 block=10 p2p=7 -I[2019-07-28|15:44:53.677] Starting Node module=main impl=Node -I[2019-07-28|15:44:53.681] Started node module=main nodeInfo="{ProtocolVersion:{P2P:7 Block:10 App:0} ID_:7639e2841ccd47d5ae0f5aad3011b14049d3f452 ListenAddr:tcp://0.0.0.0:26656 Network:test-chain-Nhl3zk Version:0.32.1 Channels:4020212223303800 Moniker:Ivans-MacBook-Pro.local Other:{TxIndex:on RPCAddress:tcp://127.0.0.1:26657}}" -I[2019-07-28|15:44:54.801] Executed block module=state height=8 validTxs=0 invalidTxs=0 -I[2019-07-28|15:44:54.814] Committed state module=state height=8 txs=0 appHash=0000000000000000 -``` - -Now open another tab in your terminal and try sending a transaction: - -```bash -curl -s 'localhost:26657/broadcast_tx_commit?tx="tendermint=rocks"' -{ - "jsonrpc": "2.0", - "id": "", - "result": { - "check_tx": { - "gasWanted": "1" - }, - "deliver_tx": {}, - "hash": "CDD3C6DFA0A08CAEDF546F9938A2EEC232209C24AA0E4201194E0AFB78A2C2BB", - "height": "33" -} -``` - -Response should contain the height where this transaction was committed. - -Now let's check if the given key now exists and its value: - -```bash -curl -s 'localhost:26657/abci_query?data="tendermint"' -{ - "jsonrpc": "2.0", - "id": "", - "result": { - "response": { - "log": "exists", - "key": "dGVuZGVybWludA==", - "value": "cm9ja3My" - } - } -} -``` - -`dGVuZGVybWludA==` and `cm9ja3M=` are the base64-encoding of the ASCII of `tendermint` and `rocks` accordingly. - -## Outro - -I hope everything went smoothly and your first, but hopefully not the last, -Tendermint Core application is up and running. If not, please [open an issue on -Github](https://github.com/tendermint/tendermint/issues/new/choose). To dig -deeper, read [the docs](https://docs.tendermint.com/v0.34/). - -The full source code of this example project can be found [here](https://github.com/climber73/tendermint-abci-grpc-kotlin). diff --git a/go.mod b/go.mod index 13d991b8fc..1655a96ceb 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.19 require ( github.com/BurntSushi/toml v1.2.1 - github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d + github.com/ChainSafe/go-schnorrkel v1.0.0 github.com/Workiva/go-datastructures v1.0.53 github.com/adlio/schema v1.3.3 github.com/fortytw2/leaktest v1.3.0 diff --git a/go.sum b/go.sum index aca8b88401..6244430c7e 100644 --- a/go.sum +++ b/go.sum @@ -53,8 +53,8 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d h1:nalkkPQcITbvhmL4+C4cKA87NW0tfm3Kl9VXRoPywFg= -github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d/go.mod h1:URdX5+vg25ts3aCh8H5IFZybJYKWhJHYMTnf+ULtoC4= +github.com/ChainSafe/go-schnorrkel v1.0.0 h1:3aDA67lAykLaG1y3AOjs88dMxC88PgUuHRrLeDnvGIM= +github.com/ChainSafe/go-schnorrkel v1.0.0/go.mod h1:dpzHYVxLZcp8pjlV+O+UR8K0Hp/z7vcchBSbMBEhCw4= github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= diff --git a/light/provider/http/http.go b/light/provider/http/http.go index f529cae484..430dddd54a 100644 --- a/light/provider/http/http.go +++ b/light/provider/http/http.go @@ -180,6 +180,15 @@ func (p *http) signedHeader(ctx context.Context, height *int64) (*types.SignedHe commit, err := p.client.Commit(ctx, height) switch { case err == nil: + // See https://github.com/cometbft/cometbft/issues/575 + // If the node is starting at a non-zero height, but does not yet + // have any blocks, it can return an empty signed header without + // returning an error. + if commit.SignedHeader.IsEmpty() { + // Technically this means that the provider still needs to + // catch up. + return nil, provider.ErrHeightTooHigh + } return &commit.SignedHeader, nil case regexpTooHigh.MatchString(err.Error()): diff --git a/light/proxy/routes.go b/light/proxy/routes.go index a97e079105..2898853d26 100644 --- a/light/proxy/routes.go +++ b/light/proxy/routes.go @@ -109,14 +109,6 @@ func makeBlockFunc(c *lrpc.Client) rpcBlockFunc { } } -type rpcSignedBlockFunc func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultSignedBlock, error) - -func makeSignedBlockFunc(c *lrpc.Client) rpcSignedBlockFunc { - return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultSignedBlock, error) { - return c.SignedBlock(ctx.Context(), height) - } -} - type rpcBlockByHashFunc func(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error) func makeBlockByHashFunc(c *lrpc.Client) rpcBlockByHashFunc { @@ -141,40 +133,6 @@ func makeCommitFunc(c *lrpc.Client) rpcCommitFunc { } } -type rpcDataCommitmentFunc func( - ctx *rpctypes.Context, - start uint64, - end uint64, -) (*ctypes.ResultDataCommitment, error) - -type rpcDataRootInclusionProofFunc func( - ctx *rpctypes.Context, - height uint64, - start uint64, - end uint64, -) (*ctypes.ResultDataRootInclusionProof, error) - -func makeDataCommitmentFunc(c *lrpc.Client) rpcDataCommitmentFunc { - return func( - ctx *rpctypes.Context, - start uint64, - end uint64, - ) (*ctypes.ResultDataCommitment, error) { - return c.DataCommitment(ctx.Context(), start, end) - } -} - -func makeDataRootInclusionProofFunc(c *lrpc.Client) rpcDataRootInclusionProofFunc { - return func( - ctx *rpctypes.Context, - height uint64, - start uint64, - end uint64, - ) (*ctypes.ResultDataRootInclusionProof, error) { - return c.DataRootInclusionProof(ctx.Context(), height, start, end) - } -} - type rpcTxFunc func(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) func makeTxFunc(c *lrpc.Client) rpcTxFunc { diff --git a/mempool/cat/cache.go b/mempool/cat/cache.go index 803ed31bc7..7afb77f602 100644 --- a/mempool/cat/cache.go +++ b/mempool/cat/cache.go @@ -122,7 +122,7 @@ func (s *SeenTxSet) Add(txKey types.TxKey, peer uint16) { seenSet, exists := s.set[txKey] if !exists { s.set[txKey] = timestampedPeerSet{ - peers: map[uint16]struct{}{peer: struct{}{}}, + peers: map[uint16]struct{}{peer: {}}, time: time.Now().UTC(), } } else { diff --git a/mempool/cat/pool_test.go b/mempool/cat/pool_test.go index 2b5039bace..a09c267bb4 100644 --- a/mempool/cat/pool_test.go +++ b/mempool/cat/pool_test.go @@ -13,6 +13,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/abci/example/code" @@ -727,7 +728,7 @@ func TestTxPool_BroadcastQueue(t *testing.T) { for i := 0; i < txs; i++ { select { case <-ctx.Done(): - t.Fatalf("failed to receive all txs (got %d/%d)", i+1, txs) + assert.FailNowf(t, "failed to receive all txs (got %d/%d)", "", i+1, txs) case wtx := <-txmp.next(): require.Equal(t, wtx.tx, newDefaultTx(fmt.Sprintf("%d", i))) } diff --git a/mempool/cat/reactor.go b/mempool/cat/reactor.go index 519d6aacf2..fb0adbf4b6 100644 --- a/mempool/cat/reactor.go +++ b/mempool/cat/reactor.go @@ -282,7 +282,7 @@ func (memR *Reactor) ReceiveEnvelope(e p2p.Envelope) { if has && !memR.opts.ListenOnly { peerID := memR.ids.GetIDForPeer(e.Src.ID()) memR.Logger.Debug("sending a tx in response to a want msg", "peer", peerID) - if p2p.SendEnvelopeShim(e.Src, p2p.Envelope{ + if p2p.SendEnvelopeShim(e.Src, p2p.Envelope{ //nolint:staticcheck ChannelID: mempool.MempoolChannel, Message: &protomem.Txs{Txs: [][]byte{tx}}, }, memR.Logger) { @@ -320,7 +320,7 @@ func (memR *Reactor) broadcastSeenTx(txKey types.TxKey) { // Add jitter to when the node broadcasts it's seen txs to stagger when nodes // in the network broadcast their seenTx messages. - time.Sleep(time.Duration(rand.Intn(10)*10) * time.Millisecond) + time.Sleep(time.Duration(rand.Intn(10)*10) * time.Millisecond) //nolint:gosec for id, peer := range memR.ids.GetAll() { if p, ok := peer.Get(types.PeerStateKey).(PeerState); ok { @@ -338,7 +338,7 @@ func (memR *Reactor) broadcastSeenTx(txKey types.TxKey) { continue } - peer.Send(MempoolStateChannel, bz) + peer.Send(MempoolStateChannel, bz) //nolint:staticcheck } } @@ -371,7 +371,7 @@ func (memR *Reactor) broadcastNewTx(wtx *wrappedTx) { continue } - if peer.Send(mempool.MempoolChannel, bz) { + if peer.Send(mempool.MempoolChannel, bz) { //nolint:staticcheck memR.mempool.PeerHasTx(id, wtx.key) } } @@ -395,7 +395,7 @@ func (memR *Reactor) requestTx(txKey types.TxKey, peer p2p.Peer) { panic(err) } - success := peer.Send(MempoolStateChannel, bz) + success := peer.Send(MempoolStateChannel, bz) //nolint:staticcheck if success { memR.mempool.metrics.RequestedTxs.Add(1) requested := memR.requests.Add(txKey, memR.ids.GetIDForPeer(peer.ID()), memR.findNewPeerToRequestTx) diff --git a/mempool/cat/store.go b/mempool/cat/store.go index 8187b24cc5..94ac9e0b2a 100644 --- a/mempool/cat/store.go +++ b/mempool/cat/store.go @@ -65,12 +65,12 @@ func (s *store) remove(txKey types.TxKey) bool { func (s *store) reserve(txKey types.TxKey) bool { s.mtx.Lock() defer s.mtx.Unlock() - if _, ok := s.txs[txKey]; ok { - return false // already reserved - } else { + _, has := s.txs[txKey] + if !has { s.txs[txKey] = &wrappedTx{height: -1} + return true } - return true + return false } // release is called when a pending transaction failed diff --git a/mempool/cat/store_test.go b/mempool/cat/store_test.go index 3cfc7225a3..4a29106ee7 100644 --- a/mempool/cat/store_test.go +++ b/mempool/cat/store_test.go @@ -91,30 +91,27 @@ func TestStoreConcurrentAccess(t *testing.T) { go func(i int) { defer wg.Done() ticker := time.NewTicker(10 * time.Millisecond) - for { - select { - case <-ticker.C: - tx := types.Tx(fmt.Sprintf("tx%d", i%(numTxs/10))) - key := tx.Key() - wtx := newWrappedTx(tx, key, 1, 1, 1, "") - existingTx := store.get(key) - if existingTx != nil && bytes.Equal(existingTx.tx, tx) { - // tx has already been added - return - } - if store.reserve(key) { - // some fail - if i%3 == 0 { - store.release(key) - return - } - store.set(wtx) - // this should be a noop + for range ticker.C { + tx := types.Tx(fmt.Sprintf("tx%d", i%(numTxs/10))) + key := tx.Key() + wtx := newWrappedTx(tx, key, 1, 1, 1, "") + existingTx := store.get(key) + if existingTx != nil && bytes.Equal(existingTx.tx, tx) { + // tx has already been added + return + } + if store.reserve(key) { + // some fail + if i%3 == 0 { store.release(key) return } - // already reserved so we retry in 10 milliseconds + store.set(wtx) + // this should be a noop + store.release(key) + return } + // already reserved so we retry in 10 milliseconds } }(i) } @@ -147,7 +144,7 @@ func TestStoreGetTxs(t *testing.T) { // get txs below a certain priority txs, bz := store.getTxsBelowPriority(int64(numTxs / 2)) require.Equal(t, numTxs/2, len(txs)) - var actualBz int64 = 0 + var actualBz int64 for _, tx := range txs { actualBz += tx.size() } diff --git a/node/node.go b/node/node.go index 22a4b854c2..2db2ec86f5 100644 --- a/node/node.go +++ b/node/node.go @@ -836,7 +836,7 @@ func NewNode(config *cfg.Config, csMetrics, p2pMetrics, memplMetrics, smMetrics := metricsProvider(genDoc.ChainID) - // create an optional influxdb client to send arbitary data to a remote + // create an optional influxdb client to send arbitrary data to a remote // influxdb server. This is used to collect trace data from many different nodes // in a network. influxdbClient, err := trace.NewClient( diff --git a/pkg/trace/flags.go b/pkg/trace/flags.go index e56ecc3fda..a703b427ad 100644 --- a/pkg/trace/flags.go +++ b/pkg/trace/flags.go @@ -4,5 +4,5 @@ const ( FlagInfluxDBURL = "influxdb-url" FlagInfluxDBToken = "influxdb-token" FlagInfluxDBURLDescription = "URL of the InfluxDB instance to use for arbitrary data collection. If not specified, data will not be collected" - FlagInfluxDBTokenDescription = "Token to use when writing to the InfluxDB instance. Must be specified if 'influxdb-url' is specified" + FlagInfluxDBTokenDescription = "Token to use when writing to the InfluxDB instance. Must be specified if 'influxdb-url' is specified" //nolint:gosec ) diff --git a/rpc/client/rpc_test.go b/rpc/client/rpc_test.go index 9ec9dbefdf..29d6dce133 100644 --- a/rpc/client/rpc_test.go +++ b/rpc/client/rpc_test.go @@ -514,11 +514,15 @@ func TestBlockSearch(t *testing.T) { require.NoError(t, err) } require.NoError(t, client.WaitForHeight(c, 5, nil)) - // This cannot test match_events as it calls the client BlockSearch function directly - // It is the RPC request handler that processes the match_event - result, err := c.BlockSearch(context.Background(), "begin_event.foo = 100 AND begin_event.bar = 300", nil, nil, "asc") + result, err := c.BlockSearch(context.Background(), "begin_event.foo = 100", nil, nil, "asc") require.NoError(t, err) blockCount := len(result.Blocks) + // if we generate block events within the test (by uncommenting + // the code in line main_test.go:L23) then we expect len(result.Blocks) + // to be at least 5 + // require.GreaterOrEqual(t, blockCount, 5) + + // otherwise it is 0 require.Equal(t, blockCount, 0) } diff --git a/rpc/core/blocks.go b/rpc/core/blocks.go index bfbb9763f1..d2198f53fe 100644 --- a/rpc/core/blocks.go +++ b/rpc/core/blocks.go @@ -8,13 +8,12 @@ import ( "strconv" "github.com/tendermint/tendermint/crypto/merkle" - "github.com/tendermint/tendermint/pkg/consts" - blockidxnull "github.com/tendermint/tendermint/state/indexer/block/null" - cmtmath "github.com/tendermint/tendermint/libs/math" cmtquery "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/pkg/consts" ctypes "github.com/tendermint/tendermint/rpc/core/types" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" + blockidxnull "github.com/tendermint/tendermint/state/indexer/block/null" "github.com/tendermint/tendermint/types" ) @@ -482,7 +481,19 @@ func BlockSearch( skipCount := validateSkipCount(page, perPage) pageSize := cmtmath.MinInt(perPage, totalCount-skipCount) - apiResults := fetchBlocks(results, pageSize, skipCount) + apiResults := make([]*ctypes.ResultBlock, 0, pageSize) + for i := skipCount; i < skipCount+pageSize; i++ { + block := GetEnvironment().BlockStore.LoadBlock(results[i]) + if block != nil { + blockMeta := GetEnvironment().BlockStore.LoadBlockMeta(block.Height) + if blockMeta != nil { + apiResults = append(apiResults, &ctypes.ResultBlock{ + Block: block, + BlockID: blockMeta.BlockID, + }) + } + } + } return &ctypes.ResultBlockSearch{Blocks: apiResults, TotalCount: totalCount}, nil } @@ -503,25 +514,6 @@ func sortBlocks(results []int64, orderBy string) error { return nil } -// fetchBlocks takes a list of block heights and fetches them. -func fetchBlocks(results []int64, pageSize int, skipCount int) []*ctypes.ResultBlock { - env := GetEnvironment() - apiResults := make([]*ctypes.ResultBlock, 0, pageSize) - for i := skipCount; i < skipCount+pageSize; i++ { - block := env.BlockStore.LoadBlock(results[i]) - if block != nil { - blockMeta := env.BlockStore.LoadBlockMeta(block.Height) - if blockMeta != nil { - apiResults = append(apiResults, &ctypes.ResultBlock{ - Block: block, - BlockID: blockMeta.BlockID, - }) - } - } - } - return apiResults -} - // fetchDataRootTuples takes an end exclusive range of heights and fetches its // corresponding data root tuples. func fetchDataRootTuples(start, end uint64) ([]DataRootTuple, error) { diff --git a/rpc/core/consensus.go b/rpc/core/consensus.go index 659447e498..1f303d3393 100644 --- a/rpc/core/consensus.go +++ b/rpc/core/consensus.go @@ -57,7 +57,7 @@ func DumpConsensusState(ctx *rpctypes.Context) (*ctypes.ResultDumpConsensusState if !ok { // peer does not have a state yet continue } - peerStateJSON, err := peerState.ToJSON() + peerStateJSON, err := peerState.MarshalJSON() if err != nil { return nil, err } diff --git a/rpc/core/doc.go b/rpc/core/doc.go index 3dd6e1bfac..503a113a2e 100644 --- a/rpc/core/doc.go +++ b/rpc/core/doc.go @@ -2,7 +2,7 @@ Package core defines the CometBFT RPC endpoints. CometBFT ships with its own JSONRPC library - -https://github.com/comet/comet/tree/v0.34.x/rpc/jsonrpc. +https://github.com/cometbft/cometbft/tree/v0.34.x/rpc/jsonrpc. ## Get the list diff --git a/rpc/core/tx.go b/rpc/core/tx.go index 730875cd47..76e7ec292e 100644 --- a/rpc/core/tx.go +++ b/rpc/core/tx.go @@ -20,7 +20,7 @@ import ( // Tx allows you to query the transaction results. `nil` could mean the // transaction is in the mempool, invalidated, or was not sent in the first // place. -// More: https://docs.tendermint.com/v0.34/rpc/#/Info/tx +// More: https://docs.cometbft.com/v0.34/rpc/#/Info/tx func Tx(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) { env := GetEnvironment() // if index is disabled, return error @@ -60,9 +60,7 @@ func Tx(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error // TxSearch allows you to query for multiple transactions results. It returns a // list of transactions (maximum ?per_page entries) and the total count. -// NOTE: proveTx isn't respected but is left in the function signature to -// conform to the endpoint exposed by Tendermint -// More: https://docs.tendermint.com/v0.34/rpc/#/Info/tx_search +// More: https://docs.cometbft.com/v0.34/rpc/#/Info/tx_search func TxSearch( ctx *rpctypes.Context, query string, diff --git a/rpc/jsonrpc/client/http_json_client.go b/rpc/jsonrpc/client/http_json_client.go index bd43776ed6..9c928f1f04 100644 --- a/rpc/jsonrpc/client/http_json_client.go +++ b/rpc/jsonrpc/client/http_json_client.go @@ -214,15 +214,22 @@ func (c *Client) Call( if err != nil { return nil, fmt.Errorf("post failed: %w", err) } - defer httpResponse.Body.Close() responseBytes, err := io.ReadAll(httpResponse.Body) if err != nil { - return nil, fmt.Errorf("failed to read response body: %w", err) + return nil, fmt.Errorf("%s. Failed to read response body: %w", getHTTPRespErrPrefix(httpResponse), err) } - return unmarshalResponseBytes(responseBytes, id, result) + res, err := unmarshalResponseBytes(responseBytes, id, result) + if err != nil { + return nil, fmt.Errorf("%s. %w", getHTTPRespErrPrefix(httpResponse), err) + } + return res, nil +} + +func getHTTPRespErrPrefix(resp *http.Response) string { + return fmt.Sprintf("error in json rpc client, with http response metadata: (Status: %s, Protocol %s)", resp.Status, resp.Proto) } // NewRequestBatch starts a batch of requests for this client. diff --git a/rpc/openapi/openapi.yaml b/rpc/openapi/openapi.yaml index 60da2a2e52..343b1f918c 100644 --- a/rpc/openapi/openapi.yaml +++ b/rpc/openapi/openapi.yaml @@ -2,8 +2,8 @@ openapi: 3.0.0 info: title: CometBFT RPC contact: - name: CometBFT RPC - url: https://github.com/cometbft/cometbft/issues/new/choose + name: CometBFT + url: https://cometbft.com/ description: | CometBFT supports the following RPC protocols: @@ -17,12 +17,22 @@ info: `$CMTHOME/config/config.toml` file or by using the `--rpc.X` command-line flags. - Default rpc listen address is `tcp://0.0.0.0:26657`. + The default RPC listen address is `tcp://127.0.0.1:26657`. To set another address, set the `laddr` config parameter to desired value. CORS (Cross-Origin Resource Sharing) can be enabled by setting `cors_allowed_origins`, `cors_allowed_methods`, `cors_allowed_headers` config parameters. + If testing using a local RPC node, under the `[rpc]` + section change the `cors_allowed_origins` property, please add the URL of + the site where this OpenAPI document is running, for example: + + `cors_allowed_origins = ["http://localhost:8088"]` + + or if testing from the official documentation site: + + `cors_allowed_origins = ["https://docs.cometbft.com"]` + ## Arguments Arguments which expect strings or byte arrays may be passed as quoted @@ -47,22 +57,21 @@ info: Asynchronous RPC functions like event `subscribe` and `unsubscribe` are only available via websockets. - Example using https://github.com/hashrocket/ws: + For example using the [websocat](https://github.com/vi/websocat) tool, you can subscribe for 'NewBlock` events + with the following command: + + echo '{ "jsonrpc": "2.0","method": "subscribe","id": 0,"params": {"query": "tm.event='"'NewBlock'"'"} }' | websocat -n -t ws://127.0.0.1:26657/websocket - ws ws://localhost:26657/websocket - > { "jsonrpc": "2.0", "method": "subscribe", "params": ["tm.event='NewBlock'"], "id": 1 } version: "v0.34" license: name: Apache 2.0 url: https://github.com/cometbft/cometbft/blob/v0.34.x/LICENSE servers: - - url: https://rpc.cosmos.network - description: Cosmos mainnet node to interact with the CometBFT RPC + - url: https://rpc.cosmos.directory/cosmoshub + description: Interact with the CometBFT RPC from a public node in the Cosmos registry - url: http://localhost:26657 - description: Interact with the CometBFT RPC locally on your device + description: Interact with CometBFT RPC node running locally tags: - - name: Websocket - description: Subscribe/unsubscribe are reserved for websocket events. - name: Info description: Informations about the node APIs - name: Tx @@ -93,10 +102,10 @@ paths: drop transactions, which might become valid in the future (https://github.com/tendermint/tendermint/issues/3322) - Please refer to https://docs.cometbft.com/v0.34/core/using-cometbft.html#formatting for formatting/encoding rules. + parameters: - in: query name: tx @@ -141,6 +150,7 @@ paths: Please refer to https://docs.cometbft.com/v0.34/core/using-cometbft.html#formatting for formatting/encoding rules. + parameters: - in: query name: tx @@ -183,6 +193,7 @@ paths: Please refer to https://docs.cometbft.com/v0.34/core/using-cometbft.html#formatting for formatting/encoding rules. + parameters: - in: query name: tx @@ -240,225 +251,6 @@ paths: application/json: schema: $ref: "#/components/schemas/ErrorResponse" - /subscribe: - get: - summary: Subscribe for events via WebSocket. - tags: - - Websocket - operationId: subscribe - description: | - To tell which events you want, you need to provide a query. query is a - string, which has a form: "condition AND condition ..." (no OR at the - moment). condition has a form: "key operation operand". key is a string with - a restricted set of possible symbols ( \t\n\r\\()"'=>< are not allowed). - operation can be "=", "<", "<=", ">", ">=", "CONTAINS" AND "EXISTS". operand - can be a string (escaped with single quotes), number, date or time. - - Examples: - tm.event = 'NewBlock' # new blocks - tm.event = 'CompleteProposal' # node got a complete proposal - tm.event = 'Tx' AND tx.hash = 'XYZ' # single transaction - tm.event = 'Tx' AND tx.height = 5 # all txs of the fifth block - tx.height = 5 # all txs of the fifth block - - CometBFT provides a few predefined keys: tm.event, tx.hash and tx.height. - Note for transactions, you can define additional keys by providing events with - DeliverTx response. - - import ( - abci "github.com/cometbft/cometbft/abci/types" - "github.com/cometbft/cometbft/libs/pubsub/query" - ) - - abci.ResponseDeliverTx{ - Events: []abci.Event{ - { - Type: "rewards.withdraw", - Attributes: abci.EventAttribute{ - {Key: []byte("address"), Value: []byte("AddrA"), Index: true}, - {Key: []byte("source"), Value: []byte("SrcX"), Index: true}, - {Key: []byte("amount"), Value: []byte("..."), Index: true}, - {Key: []byte("balance"), Value: []byte("..."), Index: true}, - }, - }, - { - Type: "rewards.withdraw", - Attributes: abci.EventAttribute{ - {Key: []byte("address"), Value: []byte("AddrB"), Index: true}, - {Key: []byte("source"), Value: []byte("SrcY"), Index: true}, - {Key: []byte("amount"), Value: []byte("..."), Index: true}, - {Key: []byte("balance"), Value: []byte("..."), Index: true}, - }, - }, - { - Type: "transfer", - Attributes: abci.EventAttribute{ - {Key: []byte("sender"), Value: []byte("AddrC"), Index: true}, - {Key: []byte("recipient"), Value: []byte("AddrD"), Index: true}, - {Key: []byte("amount"), Value: []byte("..."), Index: true}, - }, - }, - }, - } - - All events are indexed by a composite key of the form {eventType}.{evenAttrKey}. - In the above examples, the following keys would be indexed: - - rewards.withdraw.address - - rewards.withdraw.source - - rewards.withdraw.amount - - rewards.withdraw.balance - - transfer.sender - - transfer.recipient - - transfer.amount - - Multiple event types with duplicate keys are allowed and are meant to - categorize unique and distinct events. In the above example, all events - indexed under the key `rewards.withdraw.address` will have the following - values stored and queryable: - - - AddrA - - AddrB - - To create a query for txs where address AddrA withdrew rewards: - query.MustParse("tm.event = 'Tx' AND rewards.withdraw.address = 'AddrA'") - - To create a query for txs where address AddrA withdrew rewards from source Y: - query.MustParse("tm.event = 'Tx' AND rewards.withdraw.address = 'AddrA' AND rewards.withdraw.source = 'Y'") - - To create a query for txs where AddrA transferred funds: - query.MustParse("tm.event = 'Tx' AND transfer.sender = 'AddrA'") - - The following queries would return no results: - query.MustParse("tm.event = 'Tx' AND transfer.sender = 'AddrZ'") - query.MustParse("tm.event = 'Tx' AND rewards.withdraw.address = 'AddrZ'") - query.MustParse("tm.event = 'Tx' AND rewards.withdraw.source = 'W'") - - See list of all possible events here - https://godoc.org/github.com/cometbft/cometbft/types#pkg-constants - - For complete query syntax, check out - https://godoc.org/github.com/cometbft/cometbft/libs/pubsub/query. - - ```go - import rpchttp "github.com/cometbft/rpc/client/http" - import "github.com/cometbft/cometbft/types" - - client := rpchttp.New("tcp:0.0.0.0:26657", "/websocket") - err := client.Start() - if err != nil { - handle error - } - defer client.Stop() - ctx, cancel := context.WithTimeout(context.Background(), 1 * time.Second) - defer cancel() - query := "tm.event = 'Tx' AND tx.height = 3" - txs, err := client.Subscribe(ctx, "test-client", query) - if err != nil { - handle error - } - - go func() { - for e := range txs { - fmt.Println("got ", e.Data.(types.EventDataTx)) - } - }() - ``` - - NOTE: if you're not reading events fast enough, CometBFT might - terminate the subscription. - parameters: - - in: query - name: query - required: true - schema: - type: string - example: tm.event = 'Tx' AND tx.height = 5 - description: | - query is a string, which has a form: "condition AND condition ..." (no OR at the - moment). condition has a form: "key operation operand". key is a string with - a restricted set of possible symbols ( \t\n\r\\()"'=>< are not allowed). - operation can be "=", "<", "<=", ">", ">=", "CONTAINS". operand can be a - string (escaped with single quotes), number, date or time. - responses: - "200": - description: empty answer - content: - application/json: - schema: - $ref: "#/components/schemas/EmptyResponse" - "500": - description: empty error - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorResponse" - /unsubscribe: - get: - summary: Unsubscribe from event on Websocket - tags: - - Websocket - operationId: unsubscribe - description: | - ```go - client := rpchttp.New("tcp:0.0.0.0:26657", "/websocket") - err := client.Start() - if err != nil { - handle error - } - defer client.Stop() - query := "tm.event = 'Tx' AND tx.height = 3" - err = client.Unsubscribe(context.Background(), "test-client", query) - if err != nil { - handle error - } - ``` - parameters: - - in: query - name: query - required: true - schema: - type: string - example: tm.event = 'Tx' AND tx.height = 5 - description: | - query is a string, which has a form: "condition AND condition ..." (no OR at the - moment). condition has a form: "key operation operand". key is a string with - a restricted set of possible symbols ( \t\n\r\\()"'=>< are not allowed). - operation can be "=", "<", "<=", ">", ">=", "CONTAINS". operand can be a - string (escaped with single quotes), number, date or time. - responses: - "200": - description: Answer - content: - application/json: - schema: - $ref: "#/components/schemas/EmptyResponse" - "500": - description: Error - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorResponse" - /unsubscribe_all: - get: - summary: Unsubscribe from all events via WebSocket - tags: - - Websocket - operationId: unsubscribe_all - description: | - Unsubscribe from all events via WebSocket - responses: - "200": - description: empty answer - content: - application/json: - schema: - $ref: "#/components/schemas/EmptyResponse" - "500": - description: empty error - content: - application/json: - schema: - $ref: "#/components/schemas/ErrorResponse" /health: get: summary: Node heartbeat diff --git a/spec/README.md b/spec/README.md index ebb7f210d5..921c68b7cb 100644 --- a/spec/README.md +++ b/spec/README.md @@ -60,11 +60,11 @@ hash-linked batches of transactions. Such transaction batches are called "blocks Hence, CometBFT defines a "blockchain". Each block in CometBFT has a unique index - its Height. -Height's in the blockchain are monotonic. +Heights in the blockchain are monotonic. Each block is committed by a known set of weighted Validators. Membership and weighting within this validator set may change over time. CometBFT guarantees the safety and liveness of the blockchain -so long as less than 1/3 of the total weight of the Validator set +as long as less than 1/3 of the total weight of the Validator set is malicious or faulty. A commit in CometBFT is a set of signed messages from more than 2/3 of diff --git a/spec/core/data_structures.md b/spec/core/data_structures.md index 26f0834c4d..2dcb852f3c 100644 --- a/spec/core/data_structures.md +++ b/spec/core/data_structures.md @@ -71,7 +71,7 @@ and `ABCIApp` is an ABCI application that can return results and changes to the set (TODO). Execute is defined as: ```go -func Execute(s State, app ABCIApp, block Block) State { +func Execute(state State, app ABCIApp, block Block) State { // Fuction ApplyBlock executes block of transactions against the app and returns the new root hash of the app state, // modifications to the validator set and the changes of the consensus parameters. AppHash, ValidatorChanges, ConsensusParamChanges := app.ApplyBlock(block) @@ -82,7 +82,6 @@ func Execute(s State, app ABCIApp, block Block) State { InitialHeight: state.InitialHeight, LastResults: abciResponses.DeliverTxResults, AppHash: AppHash, - InitialHeight: state.InitialHeight, LastValidators: state.Validators, Validators: state.NextValidators, NextValidators: UpdateValidators(state.NextValidators, ValidatorChanges), diff --git a/state/execution_test.go b/state/execution_test.go index f1fa67ab09..144f0c5327 100644 --- a/state/execution_test.go +++ b/state/execution_test.go @@ -3,7 +3,8 @@ package state_test import ( "bytes" "context" - "io/ioutil" + "io" + "net/http" "net/http/httptest" "strconv" @@ -279,7 +280,7 @@ func TestProcessProposalRejectedMetric(t *testing.T) { require.NoError(t, err) defer resp.Body.Close() - buf, _ := ioutil.ReadAll(resp.Body) + buf, _ := io.ReadAll(resp.Body) return string(buf) } metrics := sm.PrometheusMetrics(namespace) @@ -621,7 +622,7 @@ func TestFireEventSignedBlockEvent(t *testing.T) { eventBus := types.NewEventBus() err = eventBus.Start() require.NoError(t, err) - defer eventBus.Stop() + defer eventBus.Stop() //nolint:errcheck ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/test/e2e/generator/generate.go b/test/e2e/generator/generate.go index 4cf9774a87..2b4817974f 100644 --- a/test/e2e/generator/generate.go +++ b/test/e2e/generator/generate.go @@ -68,6 +68,7 @@ type generateConfig struct { randSource *rand.Rand outputDir string multiVersion string + prometheus bool } // Generate generates random testnets using the given RNG. @@ -109,7 +110,7 @@ func Generate(cfg *generateConfig) ([]e2e.Manifest, error) { } manifests := []e2e.Manifest{} for _, opt := range combinations(testnetCombinations) { - manifest, err := generateTestnet(cfg.randSource, opt, upgradeVersion) + manifest, err := generateTestnet(cfg.randSource, opt, upgradeVersion, cfg.prometheus) if err != nil { return nil, err } @@ -119,7 +120,7 @@ func Generate(cfg *generateConfig) ([]e2e.Manifest, error) { } // generateTestnet generates a single testnet with the given options. -func generateTestnet(r *rand.Rand, opt map[string]interface{}, upgradeVersion string) (e2e.Manifest, error) { +func generateTestnet(r *rand.Rand, opt map[string]interface{}, upgradeVersion string, prometheus bool) (e2e.Manifest, error) { manifest := e2e.Manifest{ IPv6: ipv6.Choose(r).(bool), ABCIProtocol: nodeABCIProtocols.Choose(r).(string), @@ -129,6 +130,7 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}, upgradeVersion st ValidatorUpdates: map[string]map[string]int64{}, Nodes: map[string]*e2e.ManifestNode{}, UpgradeVersion: upgradeVersion, + Prometheus: prometheus, } var numSeeds, numValidators, numFulls, numLightClients int diff --git a/test/e2e/generator/main.go b/test/e2e/generator/main.go index 3424094fff..8db1ee33fa 100644 --- a/test/e2e/generator/main.go +++ b/test/e2e/generator/main.go @@ -31,7 +31,7 @@ type CLI struct { func NewCLI() *CLI { cli := &CLI{} cli.root = &cobra.Command{ - Use: "generator -d dir [-g int] [-m version_weight_csv]", + Use: "generator -d dir [-g int] [-m version_weight_csv] [-p]", Short: "End-to-end testnet generator", SilenceUsage: true, SilenceErrors: true, // we'll output them ourselves in Run() @@ -48,7 +48,11 @@ func NewCLI() *CLI { if err != nil { return err } - return cli.generate(dir, groups, multiVersion) + prometheus, err := cmd.Flags().GetBool("prometheus") + if err != nil { + return err + } + return cli.generate(dir, groups, multiVersion, prometheus) }, } @@ -57,12 +61,13 @@ func NewCLI() *CLI { cli.root.PersistentFlags().StringP("multi-version", "m", "", "Comma-separated list of versions of CometBFT to test in the generated testnets, "+ "or empty to only use this branch's version") cli.root.PersistentFlags().IntP("groups", "g", 0, "Number of groups") + cli.root.PersistentFlags().BoolP("prometheus", "p", false, "Enable generation of Prometheus metrics on all manifests") return cli } // generate generates manifests in a directory. -func (cli *CLI) generate(dir string, groups int, multiVersion string) error { +func (cli *CLI) generate(dir string, groups int, multiVersion string, prometheus bool) error { err := os.MkdirAll(dir, 0o755) if err != nil { return err @@ -71,6 +76,7 @@ func (cli *CLI) generate(dir string, groups int, multiVersion string) error { cfg := &generateConfig{ randSource: rand.New(rand.NewSource(randomSeed)), //nolint:gosec multiVersion: multiVersion, + prometheus: prometheus, } manifests, err := Generate(cfg) if err != nil { diff --git a/test/e2e/pkg/infra/docker/docker.go b/test/e2e/pkg/infra/docker/docker.go index c46809e288..78a606ce25 100644 --- a/test/e2e/pkg/infra/docker/docker.go +++ b/test/e2e/pkg/infra/docker/docker.go @@ -83,6 +83,9 @@ services: ports: - 26656 - {{ if .ProxyPort }}{{ .ProxyPort }}:{{ end }}26657 +{{- if .PrometheusProxyPort }} + - {{ .PrometheusProxyPort }}:26660 +{{- end }} - 6060 volumes: - ./{{ .Name }}:/cometbft @@ -107,6 +110,9 @@ services: ports: - 26656 - {{ if .ProxyPort }}{{ .ProxyPort }}:{{ end }}26657 +{{- if .PrometheusProxyPort }} + - {{ .PrometheusProxyPort }}:26660 +{{- end }} - 6060 volumes: - ./{{ .Name }}:/cometbft diff --git a/test/e2e/pkg/manifest.go b/test/e2e/pkg/manifest.go index 7ae926b770..b6ea5d41e2 100644 --- a/test/e2e/pkg/manifest.go +++ b/test/e2e/pkg/manifest.go @@ -71,6 +71,10 @@ type Manifest struct { LoadTxSizeBytes int `toml:"load_tx_size_bytes"` LoadTxBatchSize int `toml:"load_tx_batch_size"` LoadTxConnections int `toml:"load_tx_connections"` + + // Enable or disable Prometheus metrics on all nodes. + // Defaults to false (disabled). + Prometheus bool `toml:"prometheus"` } // ManifestNode represents a node in a testnet manifest. diff --git a/test/e2e/pkg/testnet.go b/test/e2e/pkg/testnet.go index c782880275..de98494deb 100644 --- a/test/e2e/pkg/testnet.go +++ b/test/e2e/pkg/testnet.go @@ -15,14 +15,14 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/secp256k1" - tmrand "github.com/tendermint/tendermint/libs/rand" rpchttp "github.com/tendermint/tendermint/rpc/client/http" mcs "github.com/tendermint/tendermint/test/maverick/consensus" ) const ( - randomSeed int64 = 2308084734268 - proxyPortFirst uint32 = 5701 + randomSeed int64 = 2308084734268 + proxyPortFirst uint32 = 5701 + prometheusProxyPortFirst uint32 = 6701 defaultBatchSize = 2 defaultConnections = 1 @@ -69,42 +69,45 @@ type Testnet struct { Nodes []*Node KeyType string Evidence int + LoadTxSizeBytes int MaxInboundConnections int MaxOutboundConnections int - LoadTxSizeBytes int LoadTxBatchSize int LoadTxConnections int ABCIProtocol string UpgradeVersion string + Prometheus bool } // Node represents a CometBFT node in a testnet. type Node struct { - Name string - Version string - Testnet *Testnet - Mode Mode - PrivvalKey crypto.PrivKey - NodeKey crypto.PrivKey - IP net.IP - ProxyPort uint32 - StartAt int64 - FastSync string - StateSync bool - Mempool string - Database string - ABCIProtocol Protocol - PrivvalProtocol Protocol - PersistInterval uint64 - SnapshotInterval uint64 - RetainBlocks uint64 - Seeds []*Node - PersistentPeers []*Node - Perturbations []Perturbation - Misbehaviors map[int64]string - SendNoLoad bool - InfluxDBURL string - InfluxDBToken string + Name string + Version string + Testnet *Testnet + Mode Mode + PrivvalKey crypto.PrivKey + NodeKey crypto.PrivKey + IP net.IP + ProxyPort uint32 + StartAt int64 + FastSync string + StateSync bool + Mempool string + Database string + ABCIProtocol Protocol + PrivvalProtocol Protocol + PersistInterval uint64 + SnapshotInterval uint64 + RetainBlocks uint64 + Seeds []*Node + PersistentPeers []*Node + Perturbations []Perturbation + Misbehaviors map[int64]string + SendNoLoad bool + Prometheus bool + PrometheusProxyPort uint32 + InfluxDBURL string + InfluxDBToken string } // LoadTestnet loads a testnet from a manifest file, using the filename to @@ -116,13 +119,14 @@ func LoadTestnet(manifest Manifest, fname string, ifd InfrastructureData) (*Test dir := strings.TrimSuffix(fname, filepath.Ext(fname)) keyGen := newKeyGenerator(randomSeed) proxyPortGen := newPortGenerator(proxyPortFirst) + prometheusProxyPortGen := newPortGenerator(prometheusProxyPortFirst) _, ipNet, err := net.ParseCIDR(ifd.Network) if err != nil { return nil, fmt.Errorf("invalid IP network address %q: %w", ifd.Network, err) } testnet := &Testnet{ - Name: filepath.Base(dir) + "-" + tmrand.Str(6), + Name: filepath.Base(dir), File: fname, Dir: dir, IP: ipNet, @@ -131,13 +135,14 @@ func LoadTestnet(manifest Manifest, fname string, ifd InfrastructureData) (*Test Validators: map[*Node]int64{}, ValidatorUpdates: map[int64]map[*Node]int64{}, Nodes: []*Node{}, + MaxInboundConnections: manifest.MaxInboundConnections, + MaxOutboundConnections: manifest.MaxOutboundConnections, LoadTxSizeBytes: manifest.LoadTxSizeBytes, LoadTxBatchSize: manifest.LoadTxBatchSize, LoadTxConnections: manifest.LoadTxConnections, - MaxInboundConnections: manifest.MaxInboundConnections, - MaxOutboundConnections: manifest.MaxOutboundConnections, ABCIProtocol: manifest.ABCIProtocol, UpgradeVersion: manifest.UpgradeVersion, + Prometheus: manifest.Prometheus, } if len(manifest.KeyType) != 0 { testnet.KeyType = manifest.KeyType @@ -203,6 +208,7 @@ func LoadTestnet(manifest Manifest, fname string, ifd InfrastructureData) (*Test SendNoLoad: nodeManifest.SendNoLoad, InfluxDBURL: ifd.InfluxDBURL, InfluxDBToken: ifd.InfluxDBToken, + Prometheus: testnet.Prometheus, } if node.StartAt == testnet.InitialHeight { node.StartAt = 0 // normalize to 0 for initial nodes, since code expects this @@ -222,6 +228,9 @@ func LoadTestnet(manifest Manifest, fname string, ifd InfrastructureData) (*Test if nodeManifest.PersistInterval != nil { node.PersistInterval = *nodeManifest.PersistInterval } + if node.Prometheus { + node.PrometheusProxyPort = prometheusProxyPortGen.Next() + } for _, p := range nodeManifest.Perturb { node.Perturbations = append(node.Perturbations, Perturbation(p)) } @@ -313,15 +322,15 @@ func (t Testnet) Validate() error { if t.IP == nil { return errors.New("network has no IP") } - if len(t.Nodes) == 0 { - return errors.New("network has no nodes") - } if t.MaxInboundConnections < 0 { return errors.New("MaxInboundConnections must not be negative") } if t.MaxOutboundConnections < 0 { return errors.New("MaxOutboundConnections must not be negative") } + if len(t.Nodes) == 0 { + return errors.New("network has no nodes") + } for _, node := range t.Nodes { if err := node.Validate(t); err != nil { return fmt.Errorf("invalid node %q: %w", node.Name, err) @@ -341,13 +350,22 @@ func (n Node) Validate(testnet Testnet) error { if !testnet.IP.Contains(n.IP) { return fmt.Errorf("node IP %v is not in testnet network %v", n.IP, testnet.IP) } - if n.ProxyPort > 0 { - if n.ProxyPort <= 1024 { - return fmt.Errorf("local port %v must be >1024", n.ProxyPort) + if n.ProxyPort == n.PrometheusProxyPort { + return fmt.Errorf("node local port %v used also for Prometheus local port", n.ProxyPort) + } + if n.ProxyPort > 0 && n.ProxyPort <= 1024 { + return fmt.Errorf("local port %v must be >1024", n.ProxyPort) + } + if n.PrometheusProxyPort > 0 && n.PrometheusProxyPort <= 1024 { + return fmt.Errorf("local port %v must be >1024", n.PrometheusProxyPort) + } + for _, peer := range testnet.Nodes { + if peer.Name != n.Name && peer.ProxyPort == n.ProxyPort { + return fmt.Errorf("peer %q also has local port %v", peer.Name, n.ProxyPort) } - for _, peer := range testnet.Nodes { - if peer.Name != n.Name && peer.ProxyPort == n.ProxyPort { - return fmt.Errorf("peer %q also has local port %v", peer.Name, n.ProxyPort) + if n.PrometheusProxyPort > 0 { + if peer.Name != n.Name && peer.PrometheusProxyPort == n.PrometheusProxyPort { + return fmt.Errorf("peer %q also has local port %v", peer.Name, n.PrometheusProxyPort) } } } diff --git a/test/e2e/runner/main.go b/test/e2e/runner/main.go index 5d14135afe..6e2f922577 100644 --- a/test/e2e/runner/main.go +++ b/test/e2e/runner/main.go @@ -277,7 +277,7 @@ func NewCLI() *CLI { Min Block Interval Max Block Interval over a 100 block sampling period. - + Does not run any perturbations. `, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/test/e2e/runner/setup.go b/test/e2e/runner/setup.go index c2a702f227..5a49d3b1d2 100644 --- a/test/e2e/runner/setup.go +++ b/test/e2e/runner/setup.go @@ -259,6 +259,10 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { if node.Testnet.MaxOutboundConnections != 0 { cfg.P2P.MaxNumOutboundPeers = node.Testnet.MaxOutboundConnections } + if node.Prometheus { + cfg.Instrumentation.Prometheus = true + } + return cfg, nil } diff --git a/test/e2e/runner/start.go b/test/e2e/runner/start.go index bcb8a6a7b8..c08afec2f5 100644 --- a/test/e2e/runner/start.go +++ b/test/e2e/runner/start.go @@ -52,6 +52,9 @@ func Start(testnet *e2e.Testnet) error { } nid := p2p.NodeKey{PrivKey: node.NodeKey} logger.Info("start", "msg", log.NewLazySprintf("Node %v up on http://127.0.0.1:%v chain-id %s node-id %s", node.Name, node.ProxyPort, testnet.Name, nid.ID())) + if node.PrometheusProxyPort > 0 { + logger.Info("start", "msg", log.NewLazySprintf("with Prometheus on http://127.0.0.1:%v/metrics", node.Name, node.ProxyPort, node.PrometheusProxyPort)) + } } networkHeight := testnet.InitialHeight diff --git a/types/block_test.go b/types/block_test.go index 72afdd9d86..bd6c05f374 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -46,7 +46,7 @@ func TestBlockAddEvidence(t *testing.T) { ev := NewMockDuplicateVoteEvidenceWithValidator(h, time.Now(), vals[0], "block-test-chain") evList := []Evidence{ev} - block := MakeBlock(h, makeData(txs, nil), commit, evList) + block := MakeBlock(h, makeData(txs), commit, evList) require.NotNil(t, block) require.Equal(t, 1, len(block.Evidence.Evidence)) require.NotNil(t, block.EvidenceHash) @@ -90,7 +90,7 @@ func TestBlockValidateBasic(t *testing.T) { tc := tc i := i t.Run(tc.testName, func(t *testing.T) { - block := MakeBlock(h, makeData(txs, nil), commit, evList) + block := MakeBlock(h, makeData(txs), commit, evList) block.ProposerAddress = valSet.GetProposer().Address tc.malleateBlock(block) err = block.ValidateBasic() @@ -101,13 +101,13 @@ func TestBlockValidateBasic(t *testing.T) { func TestBlockHash(t *testing.T) { assert.Nil(t, (*Block)(nil).Hash()) - assert.Nil(t, MakeBlock(int64(3), makeData([]Tx{Tx("Hello World")}, nil), nil, nil).Hash()) + assert.Nil(t, MakeBlock(int64(3), makeData([]Tx{Tx("Hello World")}), nil, nil).Hash()) } func TestBlockMakePartSet(t *testing.T) { assert.Nil(t, (*Block)(nil).MakePartSet(2)) - partSet := MakeBlock(int64(3), makeData([]Tx{Tx("Hello World")}, nil), nil, nil).MakePartSet(1024) + partSet := MakeBlock(int64(3), makeData([]Tx{Tx("Hello World")}), nil, nil).MakePartSet(1024) assert.NotNil(t, partSet) assert.EqualValues(t, 1, partSet.Total()) } @@ -125,7 +125,7 @@ func TestBlockMakePartSetWithEvidence(t *testing.T) { ev := NewMockDuplicateVoteEvidenceWithValidator(h, time.Now(), vals[0], "block-test-chain") evList := []Evidence{ev} - partSet := MakeBlock(h, makeData([]Tx{Tx("Hello World")}, nil), commit, evList).MakePartSet(512) + partSet := MakeBlock(h, makeData([]Tx{Tx("Hello World")}), commit, evList).MakePartSet(512) assert.NotNil(t, partSet) assert.EqualValues(t, 4, partSet.Total()) } @@ -142,7 +142,7 @@ func TestBlockHashesTo(t *testing.T) { ev := NewMockDuplicateVoteEvidenceWithValidator(h, time.Now(), vals[0], "block-test-chain") evList := []Evidence{ev} - block := MakeBlock(h, makeData([]Tx{Tx("Hello World")}, nil), commit, evList) + block := MakeBlock(h, makeData([]Tx{Tx("Hello World")}), commit, evList) block.ValidatorsHash = valSet.Hash() assert.False(t, block.HashesTo([]byte{})) assert.False(t, block.HashesTo([]byte("something else"))) @@ -150,7 +150,7 @@ func TestBlockHashesTo(t *testing.T) { } func TestBlockSize(t *testing.T) { - size := MakeBlock(int64(3), makeData([]Tx{Tx("Hello World")}, nil), nil, nil).Size() + size := MakeBlock(int64(3), makeData([]Tx{Tx("Hello World")}), nil, nil).Size() if size <= 0 { t.Fatal("Size of the block is zero or negative") } @@ -161,7 +161,7 @@ func TestBlockString(t *testing.T) { assert.Equal(t, "nil-Block", (*Block)(nil).StringIndented("")) assert.Equal(t, "nil-Block", (*Block)(nil).StringShort()) - block := MakeBlock(int64(3), makeData([]Tx{Tx("Hello World")}, nil), nil, nil) + block := MakeBlock(int64(3), makeData([]Tx{Tx("Hello World")}), nil, nil) assert.NotEqual(t, "nil-Block", block.String()) assert.NotEqual(t, "nil-Block", block.StringIndented("")) assert.NotEqual(t, "nil-Block", block.StringShort()) @@ -196,7 +196,7 @@ func makeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) BlockID { var nilBytes []byte // This follows RFC-6962, i.e. `echo -n ” | sha256sum` -var emptyBytes = []byte{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, +var emptyBytes = []byte{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, //nolint:unused 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55} @@ -619,16 +619,16 @@ func TestBlockIDValidateBasic(t *testing.T) { func TestBlockProtoBuf(t *testing.T) { h := cmtrand.Int63() c1 := randCommit(time.Now()) - b1 := MakeBlock(h, makeData([]Tx{Tx([]byte{1})}, nil), &Commit{Signatures: []CommitSig{}}, []Evidence{}) + b1 := MakeBlock(h, makeData([]Tx{Tx([]byte{1})}), &Commit{Signatures: []CommitSig{}}, []Evidence{}) b1.ProposerAddress = cmtrand.Bytes(crypto.AddressSize) evidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC) evi := NewMockDuplicateVoteEvidence(h, evidenceTime, "block-test-chain") - b2 := MakeBlock(h, makeData([]Tx{Tx([]byte{1})}, nil), c1, []Evidence{evi}) + b2 := MakeBlock(h, makeData([]Tx{Tx([]byte{1})}), c1, []Evidence{evi}) b2.ProposerAddress = cmtrand.Bytes(crypto.AddressSize) b2.Evidence.ByteSize() - b3 := MakeBlock(h, makeData([]Tx{}, nil), c1, []Evidence{}) + b3 := MakeBlock(h, makeData([]Tx{}), c1, []Evidence{}) b3.ProposerAddress = cmtrand.Bytes(crypto.AddressSize) testCases := []struct { msg string @@ -891,9 +891,9 @@ func TestBlobsByNamespaceIsSorted(t *testing.T) { } type testCase struct { - descripton string - blobs []Blob - want bool + description string + blobs []Blob + want bool } tests := []testCase{ @@ -903,7 +903,7 @@ func TestBlobsByNamespaceIsSorted(t *testing.T) { } for _, tc := range tests { - t.Run(tc.descripton, func(t *testing.T) { + t.Run(tc.description, func(t *testing.T) { bs := tc.blobs assert.Equal(t, tc.want, sort.IsSorted(BlobsByNamespace(bs))) }) diff --git a/types/event_bus_test.go b/types/event_bus_test.go index 1f3f02c42e..5738509aa7 100644 --- a/types/event_bus_test.go +++ b/types/event_bus_test.go @@ -126,7 +126,7 @@ func TestEventBusPublishEventNewBlock(t *testing.T) { } }) - block := MakeBlock(0, makeData([]Tx{}, nil), nil, []Evidence{}) + block := MakeBlock(0, makeData([]Tx{}), nil, []Evidence{}) // blockID := BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(BlockPartSizeBytes).Header()} resultBeginBlock := abci.ResponseBeginBlock{ Events: []abci.Event{ @@ -279,7 +279,7 @@ func TestEventBusPublishEventNewBlockHeader(t *testing.T) { } }) - block := MakeBlock(0, makeData([]Tx{}, nil), nil, []Evidence{}) + block := MakeBlock(0, makeData([]Tx{}), nil, []Evidence{}) resultBeginBlock := abci.ResponseBeginBlock{ Events: []abci.Event{ {Type: "testType", Attributes: []abci.EventAttribute{{Key: []byte("baz"), Value: []byte("1")}}}, diff --git a/types/light.go b/types/light.go index 60605353a8..5a2a2901e3 100644 --- a/types/light.go +++ b/types/light.go @@ -120,6 +120,11 @@ type SignedHeader struct { Commit *Commit `json:"commit"` } +// IsEmpty returns true if both the header and commit are nil. +func (sh SignedHeader) IsEmpty() bool { + return sh.Header == nil && sh.Commit == nil +} + // ValidateBasic does basic consistency checks and makes sure the header // and commit are consistent. // diff --git a/types/test_util.go b/types/test_util.go index d4b88d14f6..8c9bcc7533 100644 --- a/types/test_util.go +++ b/types/test_util.go @@ -4,7 +4,7 @@ import ( "fmt" "time" - tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + cmtproto "github.com/tendermint/tendermint/proto/tendermint/types" ) func MakeCommit(blockID BlockID, height int64, round int32, @@ -21,7 +21,7 @@ func MakeCommit(blockID BlockID, height int64, round int32, ValidatorIndex: int32(i), Height: height, Round: round, - Type: tmproto.PrecommitType, + Type: cmtproto.PrecommitType, BlockID: blockID, Timestamp: now, } @@ -65,7 +65,7 @@ func MakeVote( Height: height, Round: 0, Timestamp: now, - Type: tmproto.PrecommitType, + Type: cmtproto.PrecommitType, BlockID: blockID, } v := vote.ToProto() @@ -79,10 +79,7 @@ func MakeVote( return vote, nil } -func makeData(txs []Tx, blobs []Blob) Data { - if blobs == nil { - blobs = []Blob{} - } +func makeData(txs []Tx) Data { return Data{ Txs: txs, } diff --git a/types/tx_test.go b/types/tx_test.go index 1b0e386d91..758f00eb49 100644 --- a/types/tx_test.go +++ b/types/tx_test.go @@ -7,15 +7,15 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - tmrand "github.com/tendermint/tendermint/libs/rand" + cmtrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/pkg/consts" - tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + cmtproto "github.com/tendermint/tendermint/proto/tendermint/types" ) func makeTxs(cnt, size int) Txs { txs := make(Txs, cnt) for i := 0; i < cnt; i++ { - txs[i] = tmrand.Bytes(size) + txs[i] = cmtrand.Bytes(size) } return txs } @@ -86,7 +86,7 @@ func TestUnmarshalIndexWrapper(t *testing.T) { func TestUnmarshalBlobTx(t *testing.T) { tx := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9} namespaceOne := bytes.Repeat([]byte{1}, consts.NamespaceIDSize) - blob := tmproto.Blob{ + blob := cmtproto.Blob{ NamespaceId: namespaceOne, Data: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9}, ShareVersion: 0, diff --git a/types/vote.go b/types/vote.go index b74fd6c34c..544ae47b58 100644 --- a/types/vote.go +++ b/types/vote.go @@ -161,8 +161,8 @@ func (vote *Vote) ValidateBasic() error { return errors.New("invalid Type") } - if vote.Height < 0 { - return errors.New("negative Height") + if vote.Height <= 0 { + return errors.New("negative or zero Height") } if vote.Round < 0 { diff --git a/types/vote_test.go b/types/vote_test.go index ea36f2be4a..6d3a8c720d 100644 --- a/types/vote_test.go +++ b/types/vote_test.go @@ -241,6 +241,7 @@ func TestVoteValidateBasic(t *testing.T) { }{ {"Good Vote", func(v *Vote) {}, false}, {"Negative Height", func(v *Vote) { v.Height = -1 }, true}, + {"Zero Height", func(v *Vote) { v.Height = 0 }, true}, {"Negative Round", func(v *Vote) { v.Round = -1 }, true}, {"Invalid BlockID", func(v *Vote) { v.BlockID = BlockID{[]byte{1, 2, 3}, PartSetHeader{111, []byte("blockparts")}} diff --git a/version/version.go b/version/version.go index 46d9868ea1..36c9901711 100644 --- a/version/version.go +++ b/version/version.go @@ -3,7 +3,7 @@ package version const ( // TMCoreSemVer is the used as the fallback version of CometBFT Core // when not using git describe. It is formatted with semantic versioning. - TMCoreSemVer = "0.34.27" + TMCoreSemVer = "0.34.28" // ABCISemVer is the semantic version of the ABCI library ABCISemVer = "0.17.0"