diff --git a/Makefile b/Makefile
index 6d46f90be..2332adca1 100644
--- a/Makefile
+++ b/Makefile
@@ -81,39 +81,10 @@ endif
.PHONY: install_ci_deps
install_ci_deps: ## Installs `mockgen` and other go tools
go install "github.com/golang/mock/mockgen@v1.6.0" && mockgen --version
- go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.60.3 && golangci-lint --version
+ go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.59.1 && golangci-lint --version
go install golang.org/x/tools/cmd/goimports@latest
go install github.com/mikefarah/yq/v4@latest
-.PHONY: install_cosmovisor
-install_cosmovisor: ## Installs `cosmovisor`
- go install cosmossdk.io/tools/cosmovisor/cmd/cosmovisor@v1.6.0 && cosmovisor version --cosmovisor-only
-
-.PHONY: cosmovisor_cross_compile
-cosmovisor_cross_compile: # Installs multiple cosmovisor binaries for different platforms (used by Dockerfile.release)
- @COSMOVISOR_VERSION="v1.6.0"; \
- PLATFORMS="linux/amd64 linux/arm64"; \
- mkdir -p ./tmp; \
- echo "Fetching Cosmovisor source..."; \
- temp_dir=$$(mktemp -d); \
- cd $$temp_dir; \
- go mod init temp; \
- go get cosmossdk.io/tools/cosmovisor/cmd/cosmovisor@$$COSMOVISOR_VERSION; \
- for platform in $$PLATFORMS; do \
- OS=$${platform%/*}; \
- ARCH=$${platform#*/}; \
- echo "Compiling for $$OS/$$ARCH..."; \
- GOOS=$$OS GOARCH=$$ARCH go build -o $(CURDIR)/tmp/cosmovisor-$$OS-$$ARCH cosmossdk.io/tools/cosmovisor/cmd/cosmovisor; \
- done; \
- cd $(CURDIR); \
- rm -rf $$temp_dir; \
- echo "Compilation complete. Binaries are in ./tmp/"; \
- ls -l ./tmp/cosmovisor-*
-
-.PHONY: cosmovisor_clean
-cosmovisor_clean:
- rm -f ./tmp/cosmovisor-*
-
########################
### Makefile Helpers ###
########################
@@ -132,6 +103,167 @@ list: ## List all make targets
help: ## Prints all the targets in all the Makefiles
@grep -h -E '^[a-zA-Z0-9_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-60s\033[0m %s\n", $$1, $$2}'
+##############
+### Checks ###
+##############
+
+# TODO_DOCUMENT: All of the `check_` helpers can be installed differently depending
+# on the user's OS and environment.
+# NB: For mac users, you may need to install with the proper linkers: https://github.com/golang/go/issues/65940
+
+.PHONY: check_go_version
+# Internal helper target - check go version
+check_go_version:
+ @# Extract the version number from the `go version` command.
+ @GO_VERSION=$$(go version | cut -d " " -f 3 | cut -c 3-) && \
+ MAJOR_VERSION=$$(echo $$GO_VERSION | cut -d "." -f 1) && \
+ MINOR_VERSION=$$(echo $$GO_VERSION | cut -d "." -f 2) && \
+ \
+ if [ "$$MAJOR_VERSION" -ne 1 ] || [ "$$MINOR_VERSION" -le 20 ] ; then \
+ echo "Invalid Go version. Expected 1.21.x or newer but found $$GO_VERSION"; \
+ exit 1; \
+ fi
+
+.PHONY: check_ignite_version
+# Internal helper target - check ignite version
+check_ignite_version:
+ @version=$$(ignite version 2>/dev/null | grep 'Ignite CLI version:' | awk '{print $$4}') ; \
+ if [ "$$(printf "v28\n$$version" | sort -V | head -n1)" != "v28" ]; then \
+ echo "Error: Version $$version is less than v28. Exiting with error." ; \
+ exit 1 ; \
+ fi
+
+.PHONY: check_mockgen
+# Internal helper target- Check if mockgen is installed
+check_mockgen:
+ { \
+ if ( ! ( command -v mockgen >/dev/null )); then \
+ echo "Seems like you don't have `mockgen` installed. Please visit https://github.com/golang/mock#installation and follow the instructions to install `mockgen` before continuing"; \
+ exit 1; \
+ fi; \
+ }
+
+
+.PHONY: check_act
+# Internal helper target - check if `act` is installed
+check_act:
+ { \
+ if ( ! ( command -v act >/dev/null )); then \
+ echo "Seems like you don't have `act` installed. Please visit https://github.com/nektos/act before continuing"; \
+ exit 1; \
+ fi; \
+ }
+
+.PHONY: check_gh
+# Internal helper target - check if `gh` is installed
+check_gh:
+ { \
+ if ( ! ( command -v gh >/dev/null )); then \
+ echo "Seems like you don't have `gh` installed. Please visit https://cli.github.com/ before continuing"; \
+ exit 1; \
+ fi; \
+ }
+
+.PHONY: check_docker
+# Internal helper target - check if docker is installed
+check_docker:
+ { \
+ if ( ! ( command -v docker >/dev/null && (docker compose version >/dev/null || command -v docker-compose >/dev/null) )); then \
+ echo "Seems like you don't have Docker or docker-compose installed. Make sure you review build/localnet/README.md and docs/development/README.md before continuing"; \
+ exit 1; \
+ fi; \
+ }
+.PHONY: check_kind
+# Internal helper target - check if kind is installed
+check_kind:
+ @if ! command -v kind >/dev/null 2>&1; then \
+ echo "kind is not installed. Make sure you review build/localnet/README.md and docs/development/README.md before continuing"; \
+ exit 1; \
+ fi
+
+.PHONY: check_docker_ps
+ ## Internal helper target - checks if Docker is running
+check_docker_ps: check_docker
+ @echo "Checking if Docker is running..."
+ @docker ps > /dev/null 2>&1 || (echo "Docker is not running. Please start Docker and try again."; exit 1)
+
+.PHONY: check_kind_context
+## Internal helper target - checks if the kind-kind context exists and is set
+check_kind_context: check_kind
+ @if ! kubectl config get-contexts | grep -q 'kind-kind'; then \
+ echo "kind-kind context does not exist. Please create it or switch to it."; \
+ exit 1; \
+ fi
+ @if ! kubectl config current-context | grep -q 'kind-kind'; then \
+ echo "kind-kind context is not currently set. Use 'kubectl config use-context kind-kind' to set it."; \
+ exit 1; \
+ fi
+
+
+.PHONY: check_godoc
+# Internal helper target - check if godoc is installed
+check_godoc:
+ { \
+ if ( ! ( command -v godoc >/dev/null )); then \
+ echo "Seems like you don't have godoc installed. Make sure you install it via 'go install golang.org/x/tools/cmd/godoc@latest' before continuing"; \
+ exit 1; \
+ fi; \
+ }
+
+.PHONY: check_npm
+# Internal helper target - check if npm is installed
+check_npm:
+ { \
+ if ( ! ( command -v npm >/dev/null )); then \
+ echo "Seems like you don't have npm installed. Make sure you install it before continuing"; \
+ exit 1; \
+ fi; \
+ }
+
+.PHONY: check_jq
+# Internal helper target - check if jq is installed
+check_jq:
+ { \
+ if ( ! ( command -v jq >/dev/null )); then \
+ echo "Seems like you don't have jq installed. Make sure you install it before continuing"; \
+ exit 1; \
+ fi; \
+ }
+
+.PHONY: check_yq
+# Internal helper target - check if `yq` is installed
+check_yq:
+ { \
+ if ( ! ( command -v yq >/dev/null )); then \
+ echo "Seems like you don't have `yq` installed. Make sure you install it before continuing"; \
+ exit 1; \
+ fi; \
+ }
+
+.PHONY: check_node
+# Internal helper target - check if node is installed
+check_node:
+ { \
+ if ( ! ( command -v node >/dev/null )); then \
+ echo "Seems like you don't have node installed. Make sure you install it before continuing"; \
+ exit 1; \
+ fi; \
+ }
+
+.PHONY: check_proto_unstable_marshalers
+check_proto_unstable_marshalers: ## Check that all protobuf files have the `stable_marshalers_all` option set to true.
+ go run ./tools/scripts/protocheck/cmd unstable
+
+.PHONY: fix_proto_unstable_marshalers
+fix_proto_unstable_marshalers: ## Ensure the `stable_marshaler_all` option is present on all protobuf files.
+ go run ./tools/scripts/protocheck/cmd unstable --fix
+ ${MAKE} proto_regen
+
+
+.PHONY: warn_destructive
+warn_destructive: ## Print WARNING to the user
+ @echo "This is a destructive action that will affect docker resources outside the scope of this repo!"
+
#######################
### Proto Helpers ####
#######################
@@ -182,6 +314,75 @@ docker_wipe: check_docker warn_destructive prompt_user ## [WARNING] Remove all t
docker images -q | xargs -r -I {} docker rmi {}
docker volume ls -q | xargs -r -I {} docker volume rm {}
+########################
+### Localnet Helpers ###
+########################
+
+.PHONY: localnet_up
+localnet_up: check_docker_ps check_kind_context proto_regen localnet_regenesis ## Starts up a clean localnet
+ tilt up
+
+.PHONY: localnet_up_quick
+localnet_up_quick: check_docker_ps check_kind_context ## Starts up a localnet without regenerating fixtures
+ tilt up
+
+.PHONY: localnet_down
+localnet_down: ## Delete resources created by localnet
+ tilt down
+
+.PHONY: localnet_regenesis
+localnet_regenesis: check_yq warn_message_acc_initialize_pubkeys ## Regenerate the localnet genesis file
+# NOTE: intentionally not using --home
flag to avoid overwriting the test keyring
+ @echo "Initializing chain..."
+ @set -e
+ @ignite chain init --skip-proto
+ AUTH_CONTENT=$$(cat ./tools/scripts/authz/dao_genesis_authorizations.json | jq -r tostring); \
+ $(SED) -i -E 's!^(\s*)"authorization": (\[\]|null)!\1"authorization": '$$AUTH_CONTENT'!' ${HOME}/.poktroll/config/genesis.json;
+
+ @cp -r ${HOME}/.poktroll/keyring-test $(POKTROLLD_HOME)
+ @cp -r ${HOME}/.poktroll/config $(POKTROLLD_HOME)/
+
+.PHONE: localnet_relayminer1_ping
+localnet_relayminer1_ping:
+ @echo "Pinging relayminer 1..."
+ @curl -X GET localhost:7001 || (echo "Failed to ping relayminer1. Make sure your localnet environment or your relayminer pod is up and running"; exit 1)
+ @echo "OK"
+
+.PHONE: localnet_relayminer2_ping
+localnet_relayminer2_ping:
+ @echo "Pinging relayminer 2..."
+ @curl -X GET localhost:7002 || (echo "Failed to ping relayminer2. Make sure your localnet environment or your relayminer pod is up and running"; exit 1)
+ @echo "OK"
+
+.PHONE: localnet_relayminer3_ping
+localnet_relayminer3_ping:
+ @echo "Pinging relayminer 3..."
+ @curl -X GET localhost:7003 || (echo "Failed to ping relayminer3. Make sure your localnet environment or your relayminer pod is up and running"; exit 1)
+ @echo "OK"
+
+.PHONY: send_relay_sovereign_app_JSONRPC
+send_relay_sovereign_app_JSONRPC: # Send a JSONRPC relay through the AppGateServer as a sovereign application
+ curl -X POST -H "Content-Type: application/json" \
+ --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}' \
+ $(APPGATE_SERVER)/anvil
+
+.PHONY: send_relay_delegating_app_JSONRPC
+send_relay_delegating_app_JSONRPC: # Send a relay through the gateway as an application that's delegating to this gateway
+ @appAddr=$$(poktrolld keys show app1 -a) && \
+ curl -X POST -H "Content-Type: application/json" \
+ --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}' \
+ $(GATEWAY_URL)/anvil?applicationAddr=$$appAddr
+
+.PHONY: send_relay_sovereign_app_REST
+send_relay_sovereign_app_REST: # Send a REST relay through the AppGateServer as a sovereign application
+ curl -X POST -H "Content-Type: application/json" \
+ --data '{"model": "qwen:0.5b", "stream": false, "messages": [{"role": "user", "content":"count from 1 to 10"}]}' \
+ $(APPGATE_SERVER)/ollama/api/chat
+
+.PHONY: cosmovisor_start_node
+cosmovisor_start_node: # Starts the node using cosmovisor that waits for an upgrade plan
+ bash tools/scripts/upgrades/cosmovisor-start-node.sh
+
###############
### Linting ###
###############
@@ -193,6 +394,96 @@ go_lint: ## Run all go linters
go_imports: check_go_version ## Run goimports on all go files
go run ./tools/scripts/goimports
+#############
+### Tests ###
+#############
+
+.PHONY: test_e2e_env
+test_e2e_env: warn_message_acc_initialize_pubkeys ## Setup the default env vars for E2E tests
+ export POCKET_NODE=$(POCKET_NODE) && \
+ export APPGATE_SERVER=$(APPGATE_SERVER) && \
+ export POKTROLLD_HOME=../../$(POKTROLLD_HOME)
+
+.PHONY: test_e2e
+test_e2e: test_e2e_env ## Run all E2E tests
+ go test -count=1 -v ./e2e/tests/... -tags=e2e,test
+
+.PHONY: test_e2e_relay
+test_e2e_relay: test_e2e_env ## Run only the E2E suite that exercises the relay life-cycle
+ go test -v ./e2e/tests/... -tags=e2e,test --features-path=relay.feature
+
+.PHONY: test_e2e_app
+test_e2e_app: test_e2e_env ## Run only the E2E suite that exercises the application life-cycle
+ go test -v ./e2e/tests/... -tags=e2e,test --features-path=stake_app.feature
+
+.PHONY: test_e2e_supplier
+test_e2e_supplier: test_e2e_env ## Run only the E2E suite that exercises the supplier life-cycle
+ go test -v ./e2e/tests/... -tags=e2e,test --features-path=stake_supplier.feature
+
+.PHONY: test_e2e_gateway
+test_e2e_gateway: test_e2e_env ## Run only the E2E suite that exercises the gateway life-cycle
+ go test -v ./e2e/tests/... -tags=e2e,test --features-path=stake_gateway.feature
+
+.PHONY: test_e2e_session
+test_e2e_session: test_e2e_env ## Run only the E2E suite that exercises the session (i.e. claim/proof) life-cycle
+ go test -v ./e2e/tests/... -tags=e2e,test --features-path=session.feature
+
+.PHONY: test_e2e_tokenomics
+test_e2e_tokenomics: test_e2e_env ## Run only the E2E suite that exercises the session & tokenomics settlement
+ go test -v ./e2e/tests/... -tags=e2e,test --features-path=0_settlement.feature
+
+.PHONY: test_e2e_params
+test_e2e_params: test_e2e_env ## Run only the E2E suite that exercises parameter updates for all modules
+ go test -v ./e2e/tests/... -tags=e2e,test --features-path=update_params.feature
+
+.PHONY: test_load_relays_stress_custom
+test_load_relays_stress_custom: ## Run the stress test for E2E relays using custom manifest. "loadtest_manifest_example.yaml" manifest is used by default. Set `LOAD_TEST_CUSTOM_MANIFEST` environment variable to use the different manifest.
+ go test -v -count=1 ./load-testing/tests/... \
+ -tags=load,test -run LoadRelays --log-level=debug --timeout=30m \
+ --manifest ./load-testing/$(LOAD_TEST_CUSTOM_MANIFEST)
+
+.PHONY: test_load_relays_stress_localnet
+test_load_relays_stress_localnet: test_e2e_env warn_message_local_stress_test ## Run the stress test for E2E relays on LocalNet.
+ go test -v -count=1 ./load-testing/tests/... \
+ -tags=load,test -run LoadRelays --log-level=debug --timeout=30m \
+ --manifest ./load-testing/loadtest_manifest_localnet.yaml
+
+.PHONY: test_load_relays_stress_localnet_single_supplier
+test_load_relays_stress_localnet_single_supplier: test_e2e_env warn_message_local_stress_test ## Run the stress test for E2E relays on LocalNet using exclusively one supplier.
+ go test -v -count=1 ./load-testing/tests/... \
+ -tags=load,test -run TestLoadRelaysSingleSupplier --log-level=debug --timeout=30m \
+ --manifest ./load-testing/loadtest_manifest_localnet_single_supplier.yaml
+
+.PHONY: test_verbose
+test_verbose: check_go_version ## Run all go tests verbosely
+ go test -count=1 -v -race -tags test ./...
+
+# NB: buildmode=pie is necessary to avoid linker errors on macOS.
+# It is not compatible with `-race`, which is why it's omitted here.
+# See ref for more details: https://github.com/golang/go/issues/54482#issuecomment-1251124908
+.PHONY: test_all
+test_all: warn_flaky_tests check_go_version ## Run all go tests showing detailed output only on failures
+ go test -count=1 -buildmode=pie -tags test ./...
+
+.PHONY: test_all_with_integration
+test_all_with_integration: check_go_version ## Run all go tests, including those with the integration
+ go test -count=1 -v -race -tags test,integration ./...
+
+# We are explicitly using an env variable rather than a build tag to keep flaky
+# tests in line with non flaky tests and use it as a way to easily turn them
+# on and off without maintaining extra files.
+.PHONY: test_all_with_integration_and_flaky
+test_all_with_integration_and_flaky: check_go_version ## Run all go tests, including those with the integration and flaky tests
+ INCLUDE_FLAKY_TESTS=true go test -count=1 -v -race -tags test,integration ./...
+
+.PHONY: test_integration
+test_integration: check_go_version ## Run only the in-memory integration "unit" tests
+ go test -count=1 -v -race -tags test,integration ./tests/integration/...
+
+.PHONY: itest
+itest: check_go_version ## Run tests iteratively (see usage for more)
+ ./tools/scripts/itest.sh $(filter-out $@,$(MAKECMDGOALS))
+
.PHONY: go_mockgen
go_mockgen: ## Use `mockgen` to generate mocks used for testing purposes of all the modules.
find . -name "*_mock.go" | xargs --no-run-if-empty rm
@@ -219,6 +510,278 @@ go_develop: check_ignite_version proto_regen go_mockgen ## Generate protos and m
.PHONY: go_develop_and_test
go_develop_and_test: go_develop test_all ## Generate protos, mocks and run all tests
+#############
+### TODOS ###
+#############
+
+# How do I use TODOs?
+# 1. : ;
+# e.g. TODO_HACK: This is a hack, we need to fix it later
+# 2. If there's a specific issue, or specific person, add that in paranthesiss
+# e.g. TODO(@Olshansk): Automatically link to the Github user https://github.com/olshansk
+# e.g. TODO_INVESTIGATE(#420): Automatically link this to github issue https://github.com/pokt-network/poktroll/issues/420
+# e.g. TODO_DISCUSS(@Olshansk, #420): Specific individual should tend to the action item in the specific ticket
+# e.g. TODO_CLEANUP(core): This is not tied to an issue, or a person, but should only be done by the core team.
+# e.g. TODO_CLEANUP: This is not tied to an issue, or a person, and can be done by the core team or external contributors.
+# 3. Feel free to add additional keywords to the list above.
+
+# Inspired by @goldinguy_ in this post: https://goldin.io/blog/stop-using-todo ###
+# TODO - General Purpose catch-all.
+# TODO_COMMUNITY - A TODO that may be a candidate for outsourcing to the community.
+# TODO_DECIDE - A TODO indicating we need to make a decision and document it using an ADR in the future; https://github.com/pokt-network/pocket-network-protocol/tree/main/ADRs
+# TODO_TECHDEBT - Not a great implementation, but we need to fix it later.
+# TODO_BLOCKER - BEFORE MAINNET. Similar to TECHDEBT, but of higher priority, urgency & risk prior to the next release
+# TODO_QOL - AFTER MAINNET. Similar to TECHDEBT, but of lower priority. Doesn't deserve a GitHub Issue but will improve everyone's life.
+# TODO_IMPROVE - A nice to have, but not a priority. It's okay if we never get to this.
+# TODO_OPTIMIZE - An opportunity for performance improvement if/when it's necessary
+# TODO_DISCUSS - Probably requires a lengthy offline discussion to understand next steps.
+# TODO_INCOMPLETE - A change which was out of scope of a specific PR but needed to be documented.
+# TODO_INVESTIGATE - TBD what was going on, but needed to continue moving and not get distracted.
+# TODO_CLEANUP - Like TECHDEBT, but not as bad. It's okay if we never get to this.
+# TODO_HACK - Like TECHDEBT, but much worse. This needs to be prioritized
+# TODO_REFACTOR - Similar to TECHDEBT, but will require a substantial rewrite and change across the codebase
+# TODO_CONSIDERATION - A comment that involves extra work but was thoughts / considered as part of some implementation
+# TODO_CONSOLIDATE - We likely have similar implementations/types of the same thing, and we should consolidate them.
+# TODO_ADDTEST / TODO_TEST - Add more tests for a specific code section
+# TODO_FLAKY - Signals that the test is flaky and we are aware of it. Provide an explanation if you know why.
+# TODO_DEPRECATE - Code that should be removed in the future
+# TODO_RESEARCH - A non-trivial action item that requires deep research and investigation being next steps can be taken
+# TODO_DOCUMENT - A comment that involves the creation of a README or other documentation
+# TODO_BUG - There is a known existing bug in this code
+# TODO_NB - An important note to reference later
+# TODO_DISCUSS_IN_THIS_COMMIT - SHOULD NEVER BE COMMITTED TO MASTER. It is a way for the reviewer of a PR to start / reply to a discussion.
+# TODO_IN_THIS_COMMIT - SHOULD NEVER BE COMMITTED TO MASTER. It is a way to start the review process while non-critical changes are still in progress
+
+
+# Define shared variable for the exclude parameters
+EXCLUDE_GREP = --exclude-dir={.git,vendor,./docusaurus,.vscode,.idea} --exclude={Makefile,reviewdog.yml,*.pb.go,*.pulsar.go}
+
+.PHONY: todo_list
+todo_list: ## List all the TODOs in the project (excludes vendor and prototype directories)
+ grep -r $(EXCLUDE_GREP) TODO . | grep -v 'TODO()'
+
+.PHONY: todo_count
+todo_count: ## Print a count of all the TODOs in the project
+ grep -r $(EXCLUDE_GREP) TODO . | grep -v 'TODO()' | wc -l
+
+.PHONY: todo_this_commit
+todo_this_commit: ## List all the TODOs needed to be done in this commit
+ grep -r $(EXCLUDE_GREP) TODO_IN_THIS .| grep -v 'TODO()'
+
+
+####################
+### Gateways ###
+####################
+
+.PHONY: gateway_list
+gateway_list: ## List all the staked gateways
+ poktrolld --home=$(POKTROLLD_HOME) q gateway list-gateway --node $(POCKET_NODE)
+
+.PHONY: gateway_stake
+gateway_stake: ## Stake tokens for the gateway specified (must specify the gateway env var)
+ poktrolld --home=$(POKTROLLD_HOME) tx gateway stake-gateway -y --config $(POKTROLLD_HOME)/config/$(STAKE) --keyring-backend test --from $(GATEWAY) --node $(POCKET_NODE)
+
+.PHONY: gateway1_stake
+gateway1_stake: ## Stake gateway1
+ GATEWAY=gateway1 STAKE=gateway1_stake_config.yaml make gateway_stake
+
+.PHONY: gateway2_stake
+gateway2_stake: ## Stake gateway2
+ GATEWAY=gateway2 STAKE=gateway2_stake_config.yaml make gateway_stake
+
+.PHONY: gateway3_stake
+gateway3_stake: ## Stake gateway3
+ GATEWAY=gateway3 STAKE=gateway3_stake_config.yaml make gateway_stake
+
+.PHONY: gateway_unstake
+gateway_unstake: ## Unstake an gateway (must specify the GATEWAY env var)
+ poktrolld --home=$(POKTROLLD_HOME) tx gateway unstake-gateway -y --keyring-backend test --from $(GATEWAY) --node $(POCKET_NODE)
+
+.PHONY: gateway1_unstake
+gateway1_unstake: ## Unstake gateway1
+ GATEWAY=gateway1 make gateway_unstake
+
+.PHONY: gateway2_unstake
+gateway2_unstake: ## Unstake gateway2
+ GATEWAY=gateway2 make gateway_unstake
+
+.PHONY: gateway3_unstake
+gateway3_unstake: ## Unstake gateway3
+ GATEWAY=gateway3 make gateway_unstake
+
+####################
+### Applications ###
+####################
+
+.PHONY: app_list
+app_list: ## List all the staked applications
+ poktrolld --home=$(POKTROLLD_HOME) q application list-application --node $(POCKET_NODE)
+
+.PHONY: app_stake
+app_stake: ## Stake tokens for the application specified (must specify the APP and SERVICES env vars)
+ poktrolld --home=$(POKTROLLD_HOME) tx application stake-application -y --config $(POKTROLLD_HOME)/config/$(SERVICES) --keyring-backend test --from $(APP) --node $(POCKET_NODE)
+
+.PHONY: app1_stake
+app1_stake: ## Stake app1 (also staked in genesis)
+ APP=app1 SERVICES=application1_stake_config.yaml make app_stake
+
+.PHONY: app2_stake
+app2_stake: ## Stake app2
+ APP=app2 SERVICES=application2_stake_config.yaml make app_stake
+
+.PHONY: app3_stake
+app3_stake: ## Stake app3
+ APP=app3 SERVICES=application3_stake_config.yaml make app_stake
+
+.PHONY: app_unstake
+app_unstake: ## Unstake an application (must specify the APP env var)
+ poktrolld --home=$(POKTROLLD_HOME) tx application unstake-application -y --keyring-backend test --from $(APP) --node $(POCKET_NODE)
+
+.PHONY: app1_unstake
+app1_unstake: ## Unstake app1
+ APP=app1 make app_unstake
+
+.PHONY: app2_unstake
+app2_unstake: ## Unstake app2
+ APP=app2 make app_unstake
+
+.PHONY: app3_unstake
+app3_unstake: ## Unstake app3
+ APP=app3 make app_unstake
+
+.PHONY: app_delegate
+app_delegate: ## Delegate trust to a gateway (must specify the APP and GATEWAY_ADDR env vars). Requires the app to be staked
+ poktrolld --home=$(POKTROLLD_HOME) tx application delegate-to-gateway $(GATEWAY_ADDR) --keyring-backend test --from $(APP) --node $(POCKET_NODE)
+
+.PHONY: app1_delegate_gateway1
+app1_delegate_gateway1: ## Delegate trust to gateway1
+ GATEWAY1=$$(make poktrolld_addr ACC_NAME=gateway1) && \
+ APP=app1 GATEWAY_ADDR=$$GATEWAY1 make app_delegate
+
+.PHONY: app2_delegate_gateway2
+app2_delegate_gateway2: ## Delegate trust to gateway2
+ GATEWAY2=$$(make poktrolld_addr ACC_NAME=gateway2) && \
+ APP=app2 GATEWAY_ADDR=$$GATEWAY2 make app_delegate
+
+.PHONY: app3_delegate_gateway3
+app3_delegate_gateway3: ## Delegate trust to gateway3
+ GATEWAY3=$$(make poktrolld_addr ACC_NAME=gateway3) && \
+ APP=app3 GATEWAY_ADDR=$$GATEWAY3 make app_delegate
+
+.PHONY: app_undelegate
+app_undelegate: ## Undelegate trust to a gateway (must specify the APP and GATEWAY_ADDR env vars). Requires the app to be staked
+ poktrolld --home=$(POKTROLLD_HOME) tx application undelegate-from-gateway $(GATEWAY_ADDR) --keyring-backend test --from $(APP) --node $(POCKET_NODE)
+
+.PHONY: app1_undelegate_gateway1
+app1_undelegate_gateway1: ## Undelegate trust to gateway1
+ GATEWAY1=$$(make poktrolld_addr ACC_NAME=gateway1) && \
+ APP=app1 GATEWAY_ADDR=$$GATEWAY1 make app_undelegate
+
+.PHONY: app2_undelegate_gateway2
+app2_undelegate_gateway2: ## Undelegate trust to gateway2
+ GATEWAY2=$$(make poktrolld_addr ACC_NAME=gateway2) && \
+ APP=app2 GATEWAY_ADDR=$$GATEWAY2 make app_undelegate
+
+.PHONY: app3_undelegate_gateway3
+app3_undelegate_gateway3: ## Undelegate trust to gateway3
+ GATEWAY3=$$(make poktrolld_addr ACC_NAME=gateway3) && \
+ APP=app3 GATEWAY_ADDR=$$GATEWAY3 make app_undelegate
+
+#################
+### Suppliers ###
+#################
+
+.PHONY: supplier_list
+supplier_list: ## List all the staked supplier
+ poktrolld --home=$(POKTROLLD_HOME) q supplier list-supplier --node $(POCKET_NODE)
+
+.PHONY: supplier_stake
+supplier_stake: ## Stake tokens for the supplier specified (must specify the SUPPLIER and SUPPLIER_CONFIG env vars)
+ poktrolld --home=$(POKTROLLD_HOME) tx supplier stake-supplier -y --config $(POKTROLLD_HOME)/config/$(SERVICES) --keyring-backend test --from $(SUPPLIER) --node $(POCKET_NODE)
+
+.PHONY: supplier1_stake
+supplier1_stake: ## Stake supplier1 (also staked in genesis)
+ SUPPLIER=supplier1 SERVICES=supplier1_stake_config.yaml make supplier_stake
+
+.PHONY: supplier2_stake
+supplier2_stake: ## Stake supplier2
+ SUPPLIER=supplier2 SERVICES=supplier2_stake_config.yaml make supplier_stake
+
+.PHONY: supplier3_stake
+supplier3_stake: ## Stake supplier3
+ SUPPLIER=supplier3 SERVICES=supplier3_stake_config.yaml make supplier_stake
+
+.PHONY: supplier_unstake
+supplier_unstake: ## Unstake an supplier (must specify the SUPPLIER env var)
+ poktrolld --home=$(POKTROLLD_HOME) tx supplier unstake-supplier $(SUPPLIER) --keyring-backend test --from $(SUPPLIER) --node $(POCKET_NODE)
+
+.PHONY: supplier1_unstake
+supplier1_unstake: ## Unstake supplier1
+ SUPPLIER=supplier1 make supplier_unstake
+
+.PHONY: supplier2_unstake
+supplier2_unstake: ## Unstake supplier2
+ SUPPLIER=supplier2 make supplier_unstake
+
+.PHONY: supplier3_unstake
+supplier3_unstake: ## Unstake supplier3
+ SUPPLIER=supplier3 make supplier_unstake
+
+###############
+### Session ###
+###############
+
+.PHONY: get_session
+get_session: ## Retrieve the session given the following env vars: (APP_ADDR, SVC, HEIGHT)
+ poktrolld --home=$(POKTROLLD_HOME) q session get-session $(APP) $(SVC) $(HEIGHT) --node $(POCKET_NODE)
+
+.PHONY: get_session_app1_anvil
+get_session_app1_anvil: ## Retrieve the session for (app1, anvil, latest_height)
+ APP1=$$(make poktrolld_addr ACC_NAME=app1) && \
+ APP=$$APP1 SVC=anvil HEIGHT=0 make get_session
+
+.PHONY: get_session_app2_anvil
+get_session_app2_anvil: ## Retrieve the session for (app2, anvil, latest_height)
+ APP2=$$(make poktrolld_addr ACC_NAME=app2) && \
+ APP=$$APP2 SVC=anvil HEIGHT=0 make get_session
+
+.PHONY: get_session_app3_anvil
+get_session_app3_anvil: ## Retrieve the session for (app3, anvil, latest_height)
+ APP3=$$(make poktrolld_addr ACC_NAME=app3) && \
+ APP=$$APP3 SVC=anvil HEIGHT=0 make get_session
+
+###############
+### TestNet ###
+###############
+
+.PHONY: testnet_supplier_list
+testnet_supplier_list: ## List all the staked supplier on TestNet
+ poktrolld q supplier list-supplier --node=$(TESTNET_RPC)
+
+.PHONY: testnet_gateway_list
+testnet_gateway_list: ## List all the staked gateways on TestNet
+ poktrolld q gateway list-gateway --node=$(TESTNET_RPC)
+
+.PHONY: testnet_app_list
+testnet_app_list: ## List all the staked applications on TestNet
+ poktrolld q application list-application --node=$(TESTNET_RPC)
+
+.PHONY: testnet_consensus_params
+testnet_consensus_params: ## Output consensus parameters
+ poktrolld q consensus params --node=$(TESTNET_RPC)
+
+.PHONY: testnet_gov_params
+testnet_gov_params: ## Output gov parameters
+ poktrolld q gov params --node=$(TESTNET_RPC)
+
+.PHONY: testnet_status
+testnet_status: ## Output status of the RPC node (most likely a validator)
+ poktrolld status --node=$(TESTNET_RPC) | jq
+
+.PHONY: testnet_height
+testnet_height: ## Height of the network from the RPC node point of view
+ poktrolld status --node=$(TESTNET_RPC) | jq ".sync_info.latest_block_height"
+
################
### Accounts ###
################
@@ -271,6 +834,161 @@ acc_initialize_pubkeys: ## Make sure the account keeper has public keys for all
--home=$(POKTROLLD_HOME) \
--node $(POCKET_NODE);)
+########################
+### Warning Messages ###
+########################
+
+.PHONY: warn_message_acc_initialize_pubkeys
+warn_message_acc_initialize_pubkeys: ## Print a warning message about the need to run `make acc_initialize_pubkeys`
+ @echo "+----------------------------------------------------------------------------------+"
+ @echo "| |"
+ @echo "| IMPORTANT: Please run the following command once to initialize |"
+ @echo "| E2E tests after the network has started: |"
+ @echo "| |"
+ @echo "| make acc_initialize_pubkeys |"
+ @echo "| |"
+ @echo "+----------------------------------------------------------------------------------+"
+
+.PHONY: warn_message_local_stress_test
+warn_message_local_stress_test: ## Print a warning message when kicking off a local E2E relay stress test
+ @echo "+-----------------------------------------------------------------------------------------------+"
+ @echo "| |"
+ @echo "| IMPORTANT: Please read the following before continuing with the stress test. |"
+ @echo "| |"
+ @echo "| 1. Review the # of suppliers & gateways in 'load-testing/localnet_loadtest_manifest.yaml' |"
+ @echo "| 2. Update 'localnet_config.yaml' to reflect what you found in (1) |"
+ @echo "| DEVELOPER_TIP: If you're operating off defaults, you'll likely need to update to 3 |"
+ @echo "| |"
+ @echo "| TODO_DOCUMENT(@okdas): Move this into proper documentation w/ clearer explanations |"
+ @echo "| |"
+ @echo "+-----------------------------------------------------------------------------------------------+"
+
+PHONY: warn_flaky_tests
+warn_flaky_tests: ## Print a warning message that some unit tests may be flaky
+ @echo "+-----------------------------------------------------------------------------------------------+"
+ @echo "| |"
+ @echo "| IMPORTANT: READ ME IF YOUR TESTS FAIL!!! |"
+ @echo "| |"
+ @echo "| 1. Our unit / integration tests are far from perfect & some are flaky |"
+ @echo "| 2. If you ran 'make go_develop_and_test' and a failure occurred, try to run: |"
+ @echo "| 'make test_all' once or twice more |"
+ @echo "| 3. If the same error persists, isolate it with 'go test -v ./path/to/failing/module |"
+ @echo "| |"
+ @echo "+-----------------------------------------------------------------------------------------------+"
+
+##############
+### Claims ###
+##############
+
+# These encoded values were generated using the `encodeSessionHeader` helpers in `query_claim_test.go` as dummy values.
+ENCODED_SESSION_HEADER = "eyJhcHBsaWNhdGlvbl9hZGRyZXNzIjoicG9rdDFleXJuNDUwa3JoZnpycmVyemd0djd2c3J4bDA5NDN0dXN4azRhayIsInNlcnZpY2UiOnsiaWQiOiJhbnZpbCIsIm5hbWUiOiIifSwic2Vzc2lvbl9zdGFydF9ibG9ja19oZWlnaHQiOiI1Iiwic2Vzc2lvbl9pZCI6InNlc3Npb25faWQxIiwic2Vzc2lvbl9lbmRfYmxvY2tfaGVpZ2h0IjoiOSJ9"
+ENCODED_ROOT_HASH = "cm9vdF9oYXNo"
+.PHONY: claim_create_dummy
+claim_create_dummy: ## Create a dummy claim by supplier1
+ poktrolld --home=$(POKTROLLD_HOME) tx supplier create-claim \
+ $(ENCODED_SESSION_HEADER) \
+ $(ENCODED_ROOT_HASH) \
+ --from supplier1 --node $(POCKET_NODE)
+
+.PHONY: claims_list
+claim_list: ## List all the claims
+ poktrolld --home=$(POKTROLLD_HOME) q supplier list-claims --node $(POCKET_NODE)
+
+.PHONY: claims_list_address
+claim_list_address: ## List all the claims for a specific address (specified via ADDR variable)
+ poktrolld --home=$(POKTROLLD_HOME) q supplier list-claims --supplier-operator-address $(ADDR) --node $(POCKET_NODE)
+
+.PHONY: claims_list_address_supplier1
+claim_list_address_supplier1: ## List all the claims for supplier1
+ SUPPLIER1=$$(make poktrolld_addr ACC_NAME=supplier1) && \
+ ADDR=$$SUPPLIER1 make claim_list_address
+
+.PHONY: claim_list_height
+claim_list_height: ## List all the claims ending at a specific height (specified via HEIGHT variable)
+ poktrolld --home=$(POKTROLLD_HOME) q supplier list-claims --session-end-height $(HEIGHT) --node $(POCKET_NODE)
+
+.PHONY: claim_list_height_5
+claim_list_height_5: ## List all the claims at height 5
+ HEIGHT=5 make claim_list_height
+
+.PHONY: claim_list_session
+claim_list_session: ## List all the claims ending at a specific session (specified via SESSION variable)
+ poktrolld --home=$(POKTROLLD_HOME) q supplier list-claims --session-id $(SESSION) --node $(POCKET_NODE)
+
+##############
+### Params ###
+##############
+
+# TODO_CONSIDERATION: additional factoring (e.g. POKTROLLD_FLAGS).
+PARAM_FLAGS = --home=$(POKTROLLD_HOME) --keyring-backend test --from $(PNF_ADDRESS) --node $(POCKET_NODE)
+
+### Tokenomics Module Params ###
+.PHONY: update_tokenomics_params_all
+params_update_tokenomics_all: ## Update the tokenomics module params
+ poktrolld tx authz exec ./tools/scripts/params/tokenomics_all.json $(PARAM_FLAGS)
+
+.PHONY: params_update_tokenomics_compute_units_to_tokens_multiplier
+params_update_tokenomics_compute_units_to_tokens_multiplier: ## Update the tokenomics module compute_units_to_tokens_multiplier param
+ poktrolld tx authz exec ./tools/scripts/params/tokenomics_compute_units_to_tokens_multiplier.json $(PARAM_FLAGS)
+
+### Proof Module Params ###
+.PHONY: params_update_proof_all
+params_update_proof_all: ## Update the proof module params
+ poktrolld tx authz exec ./tools/scripts/params/proof_all.json $(PARAM_FLAGS)
+
+.PHONY: params_update_proof_min_relay_difficulty_bits
+params_update_proof_min_relay_difficulty_bits: ## Update the proof module min_relay_difficulty_bits param
+ poktrolld tx authz exec ./tools/scripts/params/proof_min_relay_difficulty_bits.json $(PARAM_FLAGS)
+
+.PHONY: params_update_proof_proof_request_probability
+params_update_proof_proof_request_probability: ## Update the proof module proof_request_probability param
+ poktrolld tx authz exec ./tools/scripts/params/proof_proof_request_probability.json $(PARAM_FLAGS)
+
+.PHONY: params_update_proof_proof_requirement_threshold
+params_update_proof_proof_requirement_threshold: ## Update the proof module proof_requirement_threshold param
+ poktrolld tx authz exec ./tools/scripts/params/proof_proof_requirement_threshold.json $(PARAM_FLAGS)
+
+.PHONY: params_update_proof_proof_missing_penalty
+params_update_proof_proof_missing_penalty: ## Update the proof module proof_missing_penalty param
+ poktrolld tx authz exec ./tools/scripts/params/proof_proof_missing_penalty.json $(PARAM_FLAGS)
+
+### Shared Module Params ###
+.PHONY: params_update_shared_all
+params_update_shared_all: ## Update the session module params
+ poktrolld tx authz exec ./tools/scripts/params/shared_all.json $(PARAM_FLAGS)
+
+.PHONY: params_update_shared_num_blocks_per_session
+params_update_shared_num_blocks_per_session: ## Update the shared module num_blocks_per_session param
+ poktrolld tx authz exec ./tools/scripts/params/shared_num_blocks_per_session.json $(PARAM_FLAGS)
+
+.PHONY: params_update_shared_grace_period_end_offset_blocks
+params_update_shared_grace_period_end_offset_blocks: ## Update the shared module grace_period_end_offset_blocks param
+ poktrolld tx authz exec ./tools/scripts/params/shared_grace_period_end_offset_blocks.json $(PARAM_FLAGS)
+
+.PHONY: params_update_shared_claim_window_open_offset_blocks
+params_update_shared_claim_window_open_offset_blocks: ## Update the shared module claim_window_open_offset_blocks param
+ poktrolld tx authz exec ./tools/scripts/params/shared_claim_window_open_offset_blocks.json $(PARAM_FLAGS)
+
+.PHONY: params_update_shared_claim_window_close_offset_blocks
+params_update_shared_claim_window_close_offset_blocks: ## Update the shared module claim_window_close_offset_blocks param
+ poktrolld tx authz exec ./tools/scripts/params/shared_claim_window_close_offset_blocks.json $(PARAM_FLAGS)
+
+.PHONY: params_update_shared_proof_window_open_offset_blocks
+params_update_shared_proof_window_open_offset_blocks: ## Update the shared module proof_window_open_offset_blocks param
+ poktrolld tx authz exec ./tools/scripts/params/shared_proof_window_open_offset_blocks.json $(PARAM_FLAGS)
+
+.PHONY: params_update_shared_proof_window_close_offset_blocks
+params_update_shared_proof_window_close_offset_blocks: ## Update the shared module proof_window_close_offset_blocks param
+ poktrolld tx authz exec ./tools/scripts/params/shared_proof_window_close_offset_blocks.json $(PARAM_FLAGS)
+
+.PHONY: params_query_all
+params_query_all: check_jq ## Query the params from all available modules
+ @for module in $(MODULES); do \
+ echo "~~~ Querying $$module module params ~~~"; \
+ poktrolld query $$module params --node $(POCKET_NODE) --output json | jq; \
+ echo ""; \
+ done
+
######################
### Ignite Helpers ###
######################
@@ -385,36 +1103,6 @@ act_reviewdog: check_act check_gh ## Run the reviewdog workflow locally like so:
@echo "Detected architecture: $(CONTAINER_ARCH)"
act -v -s GITHUB_TOKEN=$(GITHUB_TOKEN) -W .github/workflows/reviewdog.yml --container-architecture $(CONTAINER_ARCH)
-
-###########################
-### Release Helpers ###
-###########################
-
-# List tags: git tag
-# Delete tag locally: git tag -d v1.2.3
-# Delete tag remotely: git push --delete origin v1.2.3
-
-.PHONY: release_tag_bug_fix
-release_tag_bug_fix: ## Tag a new bug fix release (e.g. v1.0.1 -> v1.0.2)
- @$(eval LATEST_TAG=$(shell git tag --sort=-v:refname | head -n 1))
- @$(eval NEW_TAG=$(shell echo $(LATEST_TAG) | awk -F. -v OFS=. '{ $$NF = sprintf("%d", $$NF + 1); print }'))
- @git tag $(NEW_TAG)
- @echo "New bug fix version tagged: $(NEW_TAG)"
- @echo "Run the following commands to push the new tag:"
- @echo " git push origin $(NEW_TAG)"
- @echo "And draft a new release at https://github.com/pokt-network/poktroll/releases/new"
-
-
-.PHONY: release_tag_minor_release
-release_tag_minor_release: ## Tag a new minor release (e.g. v1.0.0 -> v1.1.0)
- @$(eval LATEST_TAG=$(shell git tag --sort=-v:refname | head -n 1))
- @$(eval NEW_TAG=$(shell echo $(LATEST_TAG) | awk -F. '{$$2 += 1; $$3 = 0; print $$1 "." $$2 "." $$3}'))
- @git tag $(NEW_TAG)
- @echo "New minor release version tagged: $(NEW_TAG)"
- @echo "Run the following commands to push the new tag:"
- @echo " git push origin $(NEW_TAG)"
- @echo "And draft a new release at https://github.com/pokt-network/poktroll/releases/new"
-
#############################
### Grove Gateway Helpers ###
#############################
@@ -433,20 +1121,3 @@ grove_staging_eth_block_height: ## Sends a relay through the staging grove gatew
%:
@echo "Error: target '$@' not found."
@exit 1
-
-###############
-### Imports ###
-###############
-include ./makefiles/warnings.mk
-include ./makefiles/todos.mk
-include ./makefiles/checks.mk
-include ./makefiles/tests.mk
-include ./makefiles/localnet.mk
-include ./makefiles/query.mk
-include ./makefiles/testnet.mk
-include ./makefiles/params.mk
-include ./makefiles/applications.mk
-include ./makefiles/suppliers.mk
-include ./makefiles/gateways.mk
-include ./makefiles/session.mk
-include ./makefiles/claims.mk
diff --git a/Tiltfile b/Tiltfile
index 8c339de7f..f04d457e9 100644
--- a/Tiltfile
+++ b/Tiltfile
@@ -224,16 +224,28 @@ helm_resource(
actor_number = 0
for x in range(localnet_config["relayminers"]["count"]):
actor_number = actor_number + 1
- helm_resource(
- "relayminer" + str(actor_number),
- chart_prefix + "relayminer",
- flags=[
+
+ flags = [
"--values=./localnet/kubernetes/values-common.yaml",
"--values=./localnet/kubernetes/values-relayminer-common.yaml",
"--values=./localnet/kubernetes/values-relayminer-" + str(actor_number) + ".yaml",
"--set=metrics.serviceMonitor.enabled=" + str(localnet_config["observability"]["enabled"]),
"--set=development.delve.enabled=" + str(localnet_config["relayminers"]["delve"]["enabled"]),
- ],
+ ]
+
+ if localnet_config["rest"]["enabled"]:
+ flags.append("--values=./localnet/kubernetes/values-relayminer-" + str(actor_number) + "-rest" + ".yaml")
+
+ if localnet_config["ollama"]["enabled"]:
+ flags.append("--values=./localnet/kubernetes/values-relayminer-" + str(actor_number) + "-ollama" + ".yaml")
+
+ if localnet_config["rest"]["enabled"] and localnet_config["ollama"]["enabled"]:
+ flags.append("--values=./localnet/kubernetes/values-relayminer-" + str(actor_number) + "-all" + ".yaml")
+
+ helm_resource(
+ "relayminer" + str(actor_number),
+ chart_prefix + "relayminer",
+ flags=flags,
image_deps=["poktrolld"],
image_keys=[("image.repository", "image.tag")],
)
@@ -257,6 +269,7 @@ for x in range(localnet_config["relayminers"]["count"]):
# Use with pprof like this: `go tool pprof -http=:3333 http://localhost:6070/debug/pprof/goroutine`
str(6069 + actor_number)
+ ":6060", # Relayminer pprof port. relayminer1 - exposes 6070, relayminer2 exposes 6071, etc.
+ str(7000 + actor_number) + ":8081", # Relayminer ping port. relayminer1 - exposes 7001, relayminer2 exposes 7002, ect.
],
)
diff --git a/docusaurus/docs/operate/configs/relayminer_config.md b/docusaurus/docs/operate/configs/relayminer_config.md
index 8931d03c2..d6a30d711 100644
--- a/docusaurus/docs/operate/configs/relayminer_config.md
+++ b/docusaurus/docs/operate/configs/relayminer_config.md
@@ -23,6 +23,7 @@ You can find a fully featured example configuration at [relayminer_config_full_e
- [`smt_store_path`](#smt_store_path)
- [`metrics`](#metrics)
- [`pprof`](#pprof)
+ - [`ping`](#ping)
- [Pocket node connectivity](#pocket-node-connectivity)
- [`query_node_rpc_url`](#query_node_rpc_url)
- [`query_node_grpc_url`](#query_node_grpc_url)
@@ -173,6 +174,21 @@ pprof:
You can learn how to use that endpoint on the [Performance Troubleshooting](../../develop/developer_guide/performance_troubleshooting.md) page.
+### `ping`
+
+Configures a `ping` server to test the connectivity of every backend URLs. If
+all the backend URLs are reachable, the endpoint returns a 200 HTTP
+Code. Otherwise, if one or more backend URLs aren't reachable, the service
+returns an 500 HTTP Internal server error.
+
+Example configuration:
+
+```yaml
+ping:
+ enabled: true
+ addr: localhost:8081
+```
+
## Pocket node connectivity
```yaml
diff --git a/localnet/kubernetes/values-relayminer-1-all.yaml b/localnet/kubernetes/values-relayminer-1-all.yaml
new file mode 100644
index 000000000..4b5bb730e
--- /dev/null
+++ b/localnet/kubernetes/values-relayminer-1-all.yaml
@@ -0,0 +1,20 @@
+config:
+ suppliers:
+ - service_id: anvil
+ listen_url: http://0.0.0.0:8545
+ service_config:
+ backend_url: http://anvil:8547/
+ publicly_exposed_endpoints:
+ - relayminer1
+ - service_id: rest
+ listen_url: http://0.0.0.0:8545
+ service_config:
+ backend_url: http://rest:10000/
+ publicly_exposed_endpoints:
+ - relayminer1
+ - service_id: ollama
+ listen_url: http://0.0.0.0:8545
+ service_config:
+ backend_url: http://ollama:11434/
+ publicly_exposed_endpoints:
+ - relayminer1
diff --git a/localnet/kubernetes/values-relayminer-1-ollama.yaml b/localnet/kubernetes/values-relayminer-1-ollama.yaml
new file mode 100644
index 000000000..eb90af154
--- /dev/null
+++ b/localnet/kubernetes/values-relayminer-1-ollama.yaml
@@ -0,0 +1,14 @@
+config:
+ suppliers:
+ - service_id: anvil
+ listen_url: http://0.0.0.0:8545
+ service_config:
+ backend_url: http://anvil:8547/
+ publicly_exposed_endpoints:
+ - relayminer1
+ - service_id: ollama
+ listen_url: http://0.0.0.0:8545
+ service_config:
+ backend_url: http://ollama:11434/
+ publicly_exposed_endpoints:
+ - relayminer1
diff --git a/localnet/kubernetes/values-relayminer-1-rest.yaml b/localnet/kubernetes/values-relayminer-1-rest.yaml
new file mode 100644
index 000000000..ca68d24ea
--- /dev/null
+++ b/localnet/kubernetes/values-relayminer-1-rest.yaml
@@ -0,0 +1,14 @@
+config:
+ suppliers:
+ - service_id: anvil
+ listen_url: http://0.0.0.0:8545
+ service_config:
+ backend_url: http://anvil:8547/
+ publicly_exposed_endpoints:
+ - relayminer1
+ - service_id: rest
+ listen_url: http://0.0.0.0:8545
+ service_config:
+ backend_url: http://rest:10000/
+ publicly_exposed_endpoints:
+ - relayminer1
diff --git a/localnet/kubernetes/values-relayminer-1.yaml b/localnet/kubernetes/values-relayminer-1.yaml
index b17bd5765..35cc0240e 100644
--- a/localnet/kubernetes/values-relayminer-1.yaml
+++ b/localnet/kubernetes/values-relayminer-1.yaml
@@ -11,15 +11,3 @@ config:
backend_url: http://anvil:8547/
publicly_exposed_endpoints:
- relayminer1
- - service_id: ollama
- listen_url: http://0.0.0.0:8545
- service_config:
- backend_url: http://ollama:11434/
- publicly_exposed_endpoints:
- - relayminer1
- - service_id: rest
- listen_url: http://0.0.0.0:8545
- service_config:
- backend_url: http://rest:10000/
- publicly_exposed_endpoints:
- - relayminer1
diff --git a/localnet/kubernetes/values-relayminer-2-ollama.yaml b/localnet/kubernetes/values-relayminer-2-ollama.yaml
new file mode 100644
index 000000000..21957cd08
--- /dev/null
+++ b/localnet/kubernetes/values-relayminer-2-ollama.yaml
@@ -0,0 +1,14 @@
+config:
+ suppliers:
+ - service_id: anvil
+ listen_url: http://0.0.0.0:8545
+ service_config:
+ backend_url: http://anvil:8547/
+ publicly_exposed_endpoints:
+ - relayminer2
+ - service_id: ollama
+ listen_url: http://0.0.0.0:8545
+ service_config:
+ backend_url: http://ollama:11434/
+ publicly_exposed_endpoints:
+ - relayminer2
diff --git a/localnet/kubernetes/values-relayminer-2.yaml b/localnet/kubernetes/values-relayminer-2.yaml
index de12138d4..2110bc000 100644
--- a/localnet/kubernetes/values-relayminer-2.yaml
+++ b/localnet/kubernetes/values-relayminer-2.yaml
@@ -11,9 +11,3 @@ config:
backend_url: http://anvil:8547/
publicly_exposed_endpoints:
- relayminer2
- - service_id: ollama
- listen_url: http://0.0.0.0:8545
- service_config:
- backend_url: http://ollama:11434/
- publicly_exposed_endpoints:
- - relayminer2
diff --git a/localnet/kubernetes/values-relayminer-3-ollama.yaml b/localnet/kubernetes/values-relayminer-3-ollama.yaml
new file mode 100644
index 000000000..f585f0066
--- /dev/null
+++ b/localnet/kubernetes/values-relayminer-3-ollama.yaml
@@ -0,0 +1,14 @@
+config:
+ suppliers:
+ - service_id: anvil
+ listen_url: http://0.0.0.0:8545
+ service_config:
+ backend_url: http://anvil:8547/
+ publicly_exposed_endpoints:
+ - relayminer3
+ - service_id: ollama
+ listen_url: http://0.0.0.0:8545
+ service_config:
+ backend_url: http://ollama:11434/
+ publicly_exposed_endpoints:
+ - relayminer3
diff --git a/localnet/kubernetes/values-relayminer-3.yaml b/localnet/kubernetes/values-relayminer-3.yaml
index 624aaa1bf..5a9cf0ec8 100644
--- a/localnet/kubernetes/values-relayminer-3.yaml
+++ b/localnet/kubernetes/values-relayminer-3.yaml
@@ -11,9 +11,3 @@ config:
backend_url: http://anvil:8547/
publicly_exposed_endpoints:
- relayminer3
- - service_id: ollama
- listen_url: http://0.0.0.0:8545
- service_config:
- backend_url: http://ollama:11434/
- publicly_exposed_endpoints:
- - relayminer3
diff --git a/localnet/kubernetes/values-relayminer-common.yaml b/localnet/kubernetes/values-relayminer-common.yaml
index 207c636b2..f4fbe4b5b 100644
--- a/localnet/kubernetes/values-relayminer-common.yaml
+++ b/localnet/kubernetes/values-relayminer-common.yaml
@@ -11,3 +11,6 @@ config:
pprof:
enabled: true
addr: localhost:6060
+ ping:
+ enabled: true
+ addr: localhost:8081
diff --git a/localnet/poktrolld/config/relayminer_config.yaml b/localnet/poktrolld/config/relayminer_config.yaml
index 1b9895122..71d71cf94 100644
--- a/localnet/poktrolld/config/relayminer_config.yaml
+++ b/localnet/poktrolld/config/relayminer_config.yaml
@@ -17,3 +17,6 @@ suppliers:
pprof:
enabled: false
addr: localhost:6060
+ping:
+ enabled: false
+ addr: localhost:8082
diff --git a/localnet/poktrolld/config/relayminer_config_full_example.yaml b/localnet/poktrolld/config/relayminer_config_full_example.yaml
index 7196024fd..5cf0c4f10 100644
--- a/localnet/poktrolld/config/relayminer_config_full_example.yaml
+++ b/localnet/poktrolld/config/relayminer_config_full_example.yaml
@@ -18,6 +18,12 @@ pprof:
enabled: false
addr: localhost:6060
+# Ping server configuration to test the connectivity of every
+# suppliers.[].service_config.backend_url
+ping:
+ enabled: false
+ addr: localhost:8081
+
pocket_node:
# Pocket node URL exposing the CometBFT JSON-RPC API.
# Used by the Cosmos client SDK, event subscriptions, etc.
diff --git a/localnet/poktrolld/config/relayminer_config_localnet_vscode.yaml b/localnet/poktrolld/config/relayminer_config_localnet_vscode.yaml
index 74ba44dde..995d6fa89 100644
--- a/localnet/poktrolld/config/relayminer_config_localnet_vscode.yaml
+++ b/localnet/poktrolld/config/relayminer_config_localnet_vscode.yaml
@@ -41,3 +41,6 @@ suppliers:
pprof:
enabled: false
addr: localhost:6070
+ping:
+ enabled: false
+ addr: localhost:8081
diff --git a/pkg/relayer/cmd/cmd.go b/pkg/relayer/cmd/cmd.go
index 66681b881..ac455d72a 100644
--- a/pkg/relayer/cmd/cmd.go
+++ b/pkg/relayer/cmd/cmd.go
@@ -4,6 +4,7 @@ import (
"context"
"errors"
"fmt"
+ "net"
"net/http"
"net/url"
"os"
@@ -134,6 +135,17 @@ func runRelayer(cmd *cobra.Command, _ []string) error {
}
}
+ if relayMinerConfig.Ping.Enabled {
+ ln, err := net.Listen("tcp", relayMinerConfig.Ping.Addr)
+ if err != nil {
+ return fmt.Errorf("failed to listen ping server: %w", err)
+ }
+
+ if err := relayMiner.ServePing(ctx, ln); err != nil {
+ return fmt.Errorf("failed to start ping server: %w", err)
+ }
+ }
+
// Start the relay miner
logger.Info().Msg("Starting relay miner...")
if err := relayMiner.Start(ctx); err != nil && !errors.Is(err, http.ErrServerClosed) {
diff --git a/pkg/relayer/config/relayminer_configs_reader.go b/pkg/relayer/config/relayminer_configs_reader.go
index 8d90344eb..2f59c53ee 100644
--- a/pkg/relayer/config/relayminer_configs_reader.go
+++ b/pkg/relayer/config/relayminer_configs_reader.go
@@ -42,6 +42,11 @@ func ParseRelayMinerConfigs(configContent []byte) (*RelayMinerConfig, error) {
Addr: yamlRelayMinerConfig.Pprof.Addr,
}
+ relayMinerConfig.Ping = &RelayMinerPingConfig{
+ Enabled: yamlRelayMinerConfig.Ping.Enabled,
+ Addr: yamlRelayMinerConfig.Ping.Addr,
+ }
+
// Hydrate the pocket node urls
if err := relayMinerConfig.HydratePocketNodeUrls(&yamlRelayMinerConfig.PocketNode); err != nil {
return nil, err
diff --git a/pkg/relayer/config/types.go b/pkg/relayer/config/types.go
index ba1411b14..35512fbcb 100644
--- a/pkg/relayer/config/types.go
+++ b/pkg/relayer/config/types.go
@@ -24,6 +24,13 @@ type YAMLRelayMinerConfig struct {
Pprof YAMLRelayMinerPprofConfig `yaml:"pprof"`
SmtStorePath string `yaml:"smt_store_path"`
Suppliers []YAMLRelayMinerSupplierConfig `yaml:"suppliers"`
+ Ping YAMLRelayMinerPingConfig `yaml:"ping"`
+}
+
+// YAMLRelayMinerPingConfig represents the configuration to expose a ping server.
+type YAMLRelayMinerPingConfig struct {
+ Enabled bool `yaml:"enabled"`
+ Addr string `yaml:"addr"`
}
// YAMLRelayMinerPocketNodeConfig is the structure used to unmarshal the pocket
@@ -83,6 +90,14 @@ type RelayMinerConfig struct {
Pprof *RelayMinerPprofConfig
Servers map[string]*RelayMinerServerConfig
SmtStorePath string
+ Ping *RelayMinerPingConfig
+}
+
+// RelayMinerPingConfig is the structure resulting from parsing the ping
+// server configuration.
+type RelayMinerPingConfig struct {
+ Enabled bool
+ Addr string
}
// RelayMinerPocketNodeConfig is the structure resulting from parsing the pocket
diff --git a/pkg/relayer/interface.go b/pkg/relayer/interface.go
index 1f231e7f1..d9aabb70d 100644
--- a/pkg/relayer/interface.go
+++ b/pkg/relayer/interface.go
@@ -72,6 +72,9 @@ type RelayerProxy interface {
// TODO_TECHDEBT(@red-0ne): This method should be moved out of the RelayerProxy interface
// that should not be responsible for signing relay responses.
SignRelayResponse(relayResponse *servicetypes.RelayResponse, supplierOperatorAddr string) error
+
+ // PingAll tests the connectivity between all the managed relay servers and their respective backend URLs.
+ PingAll(ctx context.Context) []error
}
type RelayerProxyOption func(RelayerProxy)
@@ -83,8 +86,14 @@ type RelayServer interface {
// Stop terminates the service server and returns an error if it fails.
Stop(ctx context.Context) error
+
+ // Ping tests the connection between the relay server and its backend URL.
+ Ping(ctx context.Context) error
}
+// RelayServers aggregates a slice of RelayServer interface.
+type RelayServers []RelayServer
+
// RelayerSessionsManager is responsible for managing the relayer's session lifecycles.
// It handles the creation and retrieval of SMSTs (trees) for a given session, as
// well as the respective and subsequent claim creation and proof submission.
diff --git a/pkg/relayer/proxy/proxy.go b/pkg/relayer/proxy/proxy.go
index 0db1016f9..0d09775e3 100644
--- a/pkg/relayer/proxy/proxy.go
+++ b/pkg/relayer/proxy/proxy.go
@@ -139,7 +139,6 @@ func (rp *relayerProxy) Start(ctx context.Context) error {
if err := rp.BuildProvidedServices(ctx); err != nil {
return err
}
-
// Start the ring cache.
rp.ringCache.Start(ctx)
@@ -147,6 +146,13 @@ func (rp *relayerProxy) Start(ctx context.Context) error {
for _, relayServer := range rp.servers {
server := relayServer // create a new variable scoped to the anonymous function
+
+ // Ensure that each backing data node responds to a ping request
+ // (at least) before continuing operation.
+ if err := server.Ping(ctx); err != nil {
+ return err
+ }
+
startGroup.Go(func() error { return server.Start(ctx) })
}
@@ -187,3 +193,22 @@ func (rp *relayerProxy) validateConfig() error {
return nil
}
+
+// PingAll tests the connectivity between all the managed relay servers and their respective backend URLs.
+func (rp *relayerProxy) PingAll(ctx context.Context) []error {
+ var errs []error
+
+ for _, srv := range rp.servers {
+ if err := srv.Ping(ctx); err != nil {
+ rp.logger.Error().Err(err).
+ Msg("an unexpected error occured while pinging backend URL")
+ errs = append(errs, err)
+ }
+ }
+
+ if len(errs) > 0 {
+ return errs
+ }
+
+ return nil
+}
diff --git a/pkg/relayer/proxy/proxy_test.go b/pkg/relayer/proxy/proxy_test.go
index b4481e8d8..3ed7d1134 100644
--- a/pkg/relayer/proxy/proxy_test.go
+++ b/pkg/relayer/proxy/proxy_test.go
@@ -152,6 +152,11 @@ func TestRelayerProxy_StartAndStop(t *testing.T) {
// Block so relayerProxy has sufficient time to start
time.Sleep(100 * time.Millisecond)
+ errs := rp.PingAll(ctx)
+ for _, err := range errs {
+ require.NoError(t, err)
+ }
+
// Test that RelayerProxy is handling requests (ignoring the actual response content)
res, err := http.DefaultClient.Get(fmt.Sprintf("http://%s/", servicesConfigMap[defaultRelayMinerServer].ListenAddress))
require.NoError(t, err)
diff --git a/pkg/relayer/proxy/synchronous.go b/pkg/relayer/proxy/synchronous.go
index e3767eef6..b386529f9 100644
--- a/pkg/relayer/proxy/synchronous.go
+++ b/pkg/relayer/proxy/synchronous.go
@@ -4,6 +4,7 @@ import (
"bytes"
"context"
"crypto/tls"
+ "errors"
"fmt"
"io"
"net/http"
@@ -91,6 +92,25 @@ func (sync *synchronousRPCServer) Stop(ctx context.Context) error {
return sync.server.Shutdown(ctx)
}
+// Ping tries to dial the suppliers backend URLs to test the connection.
+func (sync *synchronousRPCServer) Ping(ctx context.Context) error {
+ for _, supplierCfg := range sync.serverConfig.SupplierConfigsMap {
+ c := &http.Client{Timeout: 2 * time.Second}
+
+ resp, err := c.Head(supplierCfg.ServiceConfig.BackendUrl.String())
+ if err != nil {
+ return err
+ }
+ _ = resp.Body.Close()
+
+ if resp.StatusCode >= http.StatusInternalServerError {
+ return errors.New("ping failed")
+ }
+ }
+
+ return nil
+}
+
// ServeHTTP listens for incoming relay requests. It implements the respective
// method of the http.Handler interface. It is called by http.ListenAndServe()
// when synchronousRPCServer is used as an http.Handler with an http.Server.
diff --git a/pkg/relayer/relayminer.go b/pkg/relayer/relayminer.go
index a81e2d982..dbeed0dde 100644
--- a/pkg/relayer/relayminer.go
+++ b/pkg/relayer/relayminer.go
@@ -134,3 +134,28 @@ func (rel *relayMiner) ServePprof(ctx context.Context, addr string) error {
return nil
}
+
+// ServePing exposes ping HTTP server to check the reachability between the
+// relay miner and its dependencies (Ex: relay server and their respective
+// backend URLs).
+func (rel *relayMiner) ServePing(ctx context.Context, ln net.Listener) error {
+ // Start a long-lived goroutine that starts an HTTP server responding to
+ // ping requests. A single ping request on the relay server broadcasts a
+ // ping to all backing services/data nodes.
+ go func() {
+ if err := http.Serve(ln, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ rel.logger.Debug().Msg("pinging relay servers...")
+
+ if errs := rel.relayerProxy.PingAll(ctx); errs != nil {
+ w.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+
+ w.WriteHeader(http.StatusOK)
+ })); err != nil {
+ return
+ }
+ }()
+
+ return nil
+}
diff --git a/pkg/relayer/relayminer_test.go b/pkg/relayer/relayminer_test.go
index f7de39d39..930d4e11b 100644
--- a/pkg/relayer/relayminer_test.go
+++ b/pkg/relayer/relayminer_test.go
@@ -2,6 +2,9 @@ package relayer_test
import (
"context"
+ "net"
+ "net/http"
+ "os"
"testing"
"time"
@@ -57,3 +60,69 @@ func TestRelayMiner_StartAndStop(t *testing.T) {
err = relayminer.Stop(ctx)
require.NoError(t, err)
}
+
+func TestRelayMiner_Ping(t *testing.T) {
+ srObs, _ := channel.NewObservable[*servicetypes.Relay]()
+ servedRelaysObs := relayer.RelaysObservable(srObs)
+
+ mrObs, _ := channel.NewObservable[*relayer.MinedRelay]()
+ minedRelaysObs := relayer.MinedRelaysObservable(mrObs)
+
+ ctx := polyzero.NewLogger().WithContext(context.Background())
+ relayerProxyMock := testrelayer.NewMockOneTimeRelayerProxyWithPing(
+ ctx, t,
+ servedRelaysObs,
+ )
+
+ minerMock := testrelayer.NewMockOneTimeMiner(
+ ctx, t,
+ servedRelaysObs,
+ minedRelaysObs,
+ )
+
+ relayerSessionsManagerMock := testrelayer.NewMockOneTimeRelayerSessionsManager(
+ ctx, t,
+ minedRelaysObs,
+ )
+
+ deps := depinject.Supply(
+ relayerProxyMock,
+ minerMock,
+ relayerSessionsManagerMock,
+ )
+
+ relayminer, err := relayer.NewRelayMiner(ctx, deps)
+ require.NoError(t, err)
+ require.NotNil(t, relayminer)
+
+ err = relayminer.Start(ctx)
+ require.NoError(t, err)
+
+ time.Sleep(time.Millisecond)
+
+ filename := "/tmp/relayerminer.ping.sock"
+
+ ln, err := net.Listen("unix", filename)
+ require.NoError(t, err)
+ defer os.Remove(filename)
+
+ err = relayminer.ServePing(ctx, ln)
+ require.NoError(t, err)
+
+ time.Sleep(time.Millisecond)
+
+ c := http.Client{
+ Transport: &http.Transport{
+ DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) {
+ return net.Dial(ln.Addr().Network(), ln.Addr().String())
+ },
+ },
+ }
+ require.NoError(t, err)
+
+ _, err = c.Get("http://unix")
+ require.NoError(t, err)
+
+ err = relayminer.Stop(ctx)
+ require.NoError(t, err)
+}
diff --git a/testutil/testrelayer/proxy.go b/testutil/testrelayer/proxy.go
index a876ebec3..719bd2204 100644
--- a/testutil/testrelayer/proxy.go
+++ b/testutil/testrelayer/proxy.go
@@ -33,5 +33,36 @@ func NewMockOneTimeRelayerProxy(
ServedRelays().
Return(returnedRelaysObs).
Times(1)
+
+ return relayerProxyMock
+}
+
+// NewMockOneTimeRelayerProxyWithPing creates a new mock RelayerProxy. This mock
+// RelayerProxy will expect a call to ServedRelays with the given context, and
+// when that call is made, returnedRelaysObs is returned. It also expects a call
+// to Start, Ping, and Stop with the given context.
+func NewMockOneTimeRelayerProxyWithPing(
+ ctx context.Context,
+ t *testing.T,
+ returnedRelaysObs relayer.RelaysObservable,
+) *mockrelayer.MockRelayerProxy {
+ t.Helper()
+
+ ctrl := gomock.NewController(t)
+ relayerProxyMock := mockrelayer.NewMockRelayerProxy(ctrl)
+ relayerProxyMock.EXPECT().
+ Start(gomock.Eq(ctx)).
+ Times(1)
+ relayerProxyMock.EXPECT().
+ Stop(gomock.Eq(ctx)).
+ Times(1)
+ relayerProxyMock.EXPECT().
+ ServedRelays().
+ Return(returnedRelaysObs).
+ Times(1)
+ relayerProxyMock.EXPECT().
+ PingAll(gomock.Eq(ctx)).
+ Times(1)
+
return relayerProxyMock
}